diff --git a/docs/doc_examples/00ad41bde67beac991534ae0e04b1296.asciidoc b/docs/doc_examples/00ad41bde67beac991534ae0e04b1296.asciidoc new file mode 100644 index 000000000..aad48ff29 --- /dev/null +++ b/docs/doc_examples/00ad41bde67beac991534ae0e04b1296.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataStream({ + name: "my-data-stream", + filter_path: "data_streams.indices.index_name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0f028f71f04c1d569fab402869565a84.asciidoc b/docs/doc_examples/0f028f71f04c1d569fab402869565a84.asciidoc new file mode 100644 index 000000000..007f558d8 --- /dev/null +++ b/docs/doc_examples/0f028f71f04c1d569fab402869565a84.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: ".reindexed-v9-ml-anomalies-custom-example", + settings: { + index: { + number_of_replicas: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/12adea5d76f73d94d80d42f53f67563f.asciidoc b/docs/doc_examples/12adea5d76f73d94d80d42f53f67563f.asciidoc new file mode 100644 index 000000000..83d87f9c6 --- /dev/null +++ b/docs/doc_examples/12adea5d76f73d94d80d42f53f67563f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.addBlock({ + index: ".ml-anomalies-custom-example", + block: "read_only", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ead35c954963e83f89872048dabdbe9.asciidoc b/docs/doc_examples/1ead35c954963e83f89872048dabdbe9.asciidoc new file mode 100644 index 000000000..347f3152e --- /dev/null +++ b/docs/doc_examples/1ead35c954963e83f89872048dabdbe9.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryRole({ + query: { + bool: { + must_not: { + term: { + "metadata._reserved": true, + }, + }, + }, + }, + sort: ["name"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc b/docs/doc_examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc new file mode 100644 index 000000000..5a65c9753 --- /dev/null +++ b/docs/doc_examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.addBlock({ + index: ".ml-anomalies-custom-example", + block: "write", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d29031409016b2b798148ef173a196ae.asciidoc b/docs/doc_examples/2afd49985950cbcccf727fa858d00067.asciidoc similarity index 83% rename from docs/doc_examples/d29031409016b2b798148ef173a196ae.asciidoc rename to docs/doc_examples/2afd49985950cbcccf727fa858d00067.asciidoc index fac02d172..38aa159a8 100644 --- a/docs/doc_examples/d29031409016b2b798148ef173a196ae.asciidoc +++ b/docs/doc_examples/2afd49985950cbcccf727fa858d00067.asciidoc @@ -6,13 +6,13 @@ const response = await client.indices.create({ index: "test-index", query: { - semantic: { - field: "my_semantic_field", + match: { + my_field: "Which country is Paris in?", }, }, highlight: { fields: { - my_semantic_field: { + my_field: { type: "semantic", number_of_fragments: 2, order: "score", diff --git a/docs/doc_examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc b/docs/doc_examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc new file mode 100644 index 000000000..e61007e9c --- /dev/null +++ b/docs/doc_examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-*", + query: { + bool: { + must: [ + { + match: { + "user.id": "kimchy", + }, + }, + ], + must_not: [ + { + terms: { + _index: ["my-index-01"], + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc b/docs/doc_examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc new file mode 100644 index 000000000..dcc8ff429 --- /dev/null +++ b/docs/doc_examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50gb", + }, + searchable_snapshot: { + snapshot_repository: "backing_repo", + replicate_for: "14d", + }, + }, + }, + delete: { + min_age: "28d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc b/docs/doc_examples/36792c81c053e0555407d1e83e7e054f.asciidoc similarity index 94% rename from docs/doc_examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc rename to docs/doc_examples/36792c81c053e0555407d1e83e7e054f.asciidoc index 8651f44c6..2256f4c94 100644 --- a/docs/doc_examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc +++ b/docs/doc_examples/36792c81c053e0555407d1e83e7e054f.asciidoc @@ -9,10 +9,13 @@ const response = await client.search({ retriever: { rescorer: { rescore: { + window_size: 50, query: { - window_size: 50, rescore_query: { script_score: { + query: { + match_all: {}, + }, script: { source: "cosineSimilarity(params.queryVector, 'product-vector_final_stage') + 1.0", diff --git a/docs/doc_examples/3722dad876023e0757138dd5a6d3240e.asciidoc b/docs/doc_examples/3722dad876023e0757138dd5a6d3240e.asciidoc new file mode 100644 index 000000000..e071509a9 --- /dev/null +++ b/docs/doc_examples/3722dad876023e0757138dd5a6d3240e.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + settings: { + index: { + number_of_shards: 3, + "blocks.write": true, + }, + }, + mappings: { + properties: { + field1: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc b/docs/doc_examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc new file mode 100644 index 000000000..087b6dc1b --- /dev/null +++ b/docs/doc_examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ".ml-anomalies-custom-example", + size: 0, + aggs: { + job_ids: { + terms: { + field: "job_id", + size: 100, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc b/docs/doc_examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc new file mode 100644 index 000000000..929ab0ee8 --- /dev/null +++ b/docs/doc_examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc @@ -0,0 +1,61 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + linear: { + retrievers: [ + { + retriever: { + standard: { + query: { + function_score: { + query: { + term: { + topic: "ai", + }, + }, + functions: [ + { + script_score: { + script: { + source: "doc['timestamp'].value.millis", + }, + }, + }, + ], + boost_mode: "replace", + }, + }, + sort: { + timestamp: { + order: "asc", + }, + }, + }, + }, + weight: 2, + normalizer: "minmax", + }, + { + retriever: { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + weight: 1.5, + }, + ], + rank_window_size: 10, + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc b/docs/doc_examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc new file mode 100644 index 000000000..22100f235 --- /dev/null +++ b/docs/doc_examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "elser-model-eis", + inference_config: { + service: "elastic", + service_settings: { + model_name: "elser", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/537bce129338d9227bccb6a0283dab45.asciidoc b/docs/doc_examples/537bce129338d9227bccb6a0283dab45.asciidoc new file mode 100644 index 000000000..cfeed0dff --- /dev/null +++ b/docs/doc_examples/537bce129338d9227bccb6a0283dab45.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "migrate.data_stream_reindex_max_request_per_second": 10000, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc b/docs/doc_examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc new file mode 100644 index 000000000..61ac89373 --- /dev/null +++ b/docs/doc_examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: ".reindexed-v9-ml-anomalies-custom-example", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc b/docs/doc_examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc new file mode 100644 index 000000000..41c42d206 --- /dev/null +++ b/docs/doc_examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test-index", + query: { + match: { + my_semantic_field: "Which country is Paris in?", + }, + }, + highlight: { + fields: { + my_semantic_field: { + number_of_fragments: 2, + order: "score", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc b/docs/doc_examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc new file mode 100644 index 000000000..fdd6ab8f3 --- /dev/null +++ b/docs/doc_examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: ".ml-anomalies-custom-example", + }, + dest: { + index: ".reindexed-v9-ml-anomalies-custom-example", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc b/docs/doc_examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc new file mode 100644 index 000000000..7cb4b44d1 --- /dev/null +++ b/docs/doc_examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: ".reindexed-v9-ml-anomalies-custom-example", + settings: { + index: { + number_of_replicas: 0, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/89f547649895176c246bb8c41313ff21.asciidoc b/docs/doc_examples/89f547649895176c246bb8c41313ff21.asciidoc new file mode 100644 index 000000000..571f64436 --- /dev/null +++ b/docs/doc_examples/89f547649895176c246bb8c41313ff21.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\nFROM library\n| EVAL year = DATE_EXTRACT("year", release_date)\n| WHERE page_count > ? AND match(author, ?, {"minimum_should_match": ?})\n| LIMIT 5\n', + params: [300, "Frank Herbert", 2], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc b/docs/doc_examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc new file mode 100644 index 000000000..680f24481 --- /dev/null +++ b/docs/doc_examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getAlias({ + index: ".ml-anomalies-custom-example", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a46f566ca031375658c22f89b87dc6d2.asciidoc b/docs/doc_examples/a46f566ca031375658c22f89b87dc6d2.asciidoc new file mode 100644 index 000000000..f11302fa4 --- /dev/null +++ b/docs/doc_examples/a46f566ca031375658c22f89b87dc6d2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.indices({ + index: ".ml-anomalies-custom-example", + v: "true", + h: "index,store.size", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc b/docs/doc_examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc new file mode 100644 index 000000000..2837854a9 --- /dev/null +++ b/docs/doc_examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.get({ + index: ".migrated-ds-my-data-stream-2025.01.23-000001", + human: "true", + filter_path: "*.settings.index.version.created_string", +}); +console.log(response); +---- diff --git a/docs/doc_examples/357edc9d10e98ed776401c7a439a1a55.asciidoc b/docs/doc_examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc similarity index 94% rename from docs/doc_examples/357edc9d10e98ed776401c7a439a1a55.asciidoc rename to docs/doc_examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc index 088bda3bc..42c6d4763 100644 --- a/docs/doc_examples/357edc9d10e98ed776401c7a439a1a55.asciidoc +++ b/docs/doc_examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc @@ -6,6 +6,7 @@ const response = await client.indices.resolveCluster({ name: "not-present,clust*:my-index*,oldcluster:*", ignore_unavailable: "false", + timeout: "5s", }); console.log(response); ---- diff --git a/docs/doc_examples/436d50b85fc8f0977d02059eec00719b.asciidoc b/docs/doc_examples/bcd1afb793240b1dddd9fa5d3f21192b.asciidoc similarity index 65% rename from docs/doc_examples/436d50b85fc8f0977d02059eec00719b.asciidoc rename to docs/doc_examples/bcd1afb793240b1dddd9fa5d3f21192b.asciidoc index d1a2f84de..5dc68e409 100644 --- a/docs/doc_examples/436d50b85fc8f0977d02059eec00719b.asciidoc +++ b/docs/doc_examples/bcd1afb793240b1dddd9fa5d3f21192b.asciidoc @@ -6,15 +6,11 @@ const response = await client.update({ index: "test", id: 1, - script: { - source: "ctx._source.counter += params.count", - lang: "painless", - params: { - count: 4, - }, + doc: { + product_price: 100, }, upsert: { - counter: 1, + product_price: 50, }, }); console.log(response); diff --git a/docs/doc_examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc b/docs/doc_examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc new file mode 100644 index 000000000..f80f1ac99 --- /dev/null +++ b/docs/doc_examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\nFROM library\n| WHERE match(author, "Frank Herbert", {"minimum_should_match": 2, "operator": "AND"})\n| LIMIT 5\n', +}); +console.log(response); +---- diff --git a/docs/doc_examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc b/docs/doc_examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc new file mode 100644 index 000000000..ff0c652e4 --- /dev/null +++ b/docs/doc_examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + metric: "metadata", + filter_path: "metadata.indices.*.system", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc b/docs/doc_examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc new file mode 100644 index 000000000..46940cf06 --- /dev/null +++ b/docs/doc_examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + linear: { + retrievers: [ + { + retriever: { + standard: { + query: { + query_string: { + query: "(information retrieval) OR (artificial intelligence)", + default_field: "text", + }, + }, + }, + }, + weight: 2, + normalizer: "minmax", + }, + { + retriever: { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + weight: 1.5, + normalizer: "minmax", + }, + ], + rank_window_size: 10, + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d6a4548b29e939fb197189c20c7c016f.asciidoc b/docs/doc_examples/d6a4548b29e939fb197189c20c7c016f.asciidoc new file mode 100644 index 000000000..745cb7efe --- /dev/null +++ b/docs/doc_examples/d6a4548b29e939fb197189c20c7c016f.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "chat_completion", + inference_id: "chat-completion-endpoint", + inference_config: { + service: "elastic", + service_settings: { + model_id: "model-1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc b/docs/doc_examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc new file mode 100644 index 000000000..c00660b74 --- /dev/null +++ b/docs/doc_examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: ".reindexed-v9-ml-anomalies-custom-example", + alias: ".ml-anomalies-example1", + filter: { + term: { + job_id: { + value: "example1", + }, + }, + }, + is_hidden: true, + }, + }, + { + add: { + index: ".reindexed-v9-ml-anomalies-custom-example", + alias: ".ml-anomalies-example2", + filter: { + term: { + job_id: { + value: "example2", + }, + }, + }, + is_hidden: true, + }, + }, + { + remove: { + index: ".ml-anomalies-custom-example", + aliases: ".ml-anomalies-*", + }, + }, + { + remove_index: { + index: ".ml-anomalies-custom-example", + }, + }, + { + add: { + index: ".reindexed-v9-ml-anomalies-custom-example", + alias: ".ml-anomalies-custom-example", + is_hidden: true, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc b/docs/doc_examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc similarity index 67% rename from docs/doc_examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc rename to docs/doc_examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc index 3bccba38f..1b37d265b 100644 --- a/docs/doc_examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc +++ b/docs/doc_examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc @@ -3,8 +3,8 @@ [source, js] ---- -const response = await client.security.queryRole({ - sort: ["name"], +const response = await client.migration.deprecations({ + index: ".ml-anomalies-*", }); console.log(response); ---- diff --git a/docs/doc_examples/9ad0864bcd665b63551e944653d32423.asciidoc b/docs/doc_examples/f994498dd6576be657dedce2822d2b9e.asciidoc similarity index 87% rename from docs/doc_examples/9ad0864bcd665b63551e944653d32423.asciidoc rename to docs/doc_examples/f994498dd6576be657dedce2822d2b9e.asciidoc index f553c8706..21a737450 100644 --- a/docs/doc_examples/9ad0864bcd665b63551e944653d32423.asciidoc +++ b/docs/doc_examples/f994498dd6576be657dedce2822d2b9e.asciidoc @@ -30,6 +30,13 @@ const response = await client.search({ ], }, }, + highlight: { + fields: { + semantic_text: { + number_of_fragments: 2, + }, + }, + }, }); console.log(response); ---- diff --git a/docs/doc_examples/681d24c2633f598fc43d6afff8996dbb.asciidoc b/docs/doc_examples/ffda10edaa7ce087703193c3cb95a426.asciidoc similarity index 92% rename from docs/doc_examples/681d24c2633f598fc43d6afff8996dbb.asciidoc rename to docs/doc_examples/ffda10edaa7ce087703193c3cb95a426.asciidoc index bfb21cf32..0ccb2c77a 100644 --- a/docs/doc_examples/681d24c2633f598fc43d6afff8996dbb.asciidoc +++ b/docs/doc_examples/ffda10edaa7ce087703193c3cb95a426.asciidoc @@ -28,6 +28,9 @@ const response = await client.indices.create({ topic: { type: "keyword", }, + timestamp: { + type: "date", + }, }, }, }); @@ -41,6 +44,7 @@ const response1 = await client.index({ text: "Large language models are revolutionizing information retrieval by boosting search precision, deepening contextual understanding, and reshaping user experiences in data-rich environments.", year: 2024, topic: ["llm", "ai", "information_retrieval"], + timestamp: "2021-01-01T12:10:30", }, }); console.log(response1); @@ -53,6 +57,7 @@ const response2 = await client.index({ text: "Artificial intelligence is transforming medicine, from advancing diagnostics and tailoring treatment plans to empowering predictive patient care for improved health outcomes.", year: 2023, topic: ["ai", "medicine"], + timestamp: "2022-01-01T12:10:30", }, }); console.log(response2); @@ -65,6 +70,7 @@ const response3 = await client.index({ text: "AI is redefining security by enabling advanced threat detection, proactive risk analysis, and dynamic defenses against increasingly sophisticated cyber threats.", year: 2024, topic: ["ai", "security"], + timestamp: "2023-01-01T12:10:30", }, }); console.log(response3); @@ -77,6 +83,7 @@ const response4 = await client.index({ text: "Elastic introduces Elastic AI Assistant, the open, generative AI sidekick powered by ESRE to democratize cybersecurity and enable users of every skill level.", year: 2023, topic: ["ai", "elastic", "assistant"], + timestamp: "2024-01-01T12:10:30", }, }); console.log(response4); @@ -89,6 +96,7 @@ const response5 = await client.index({ text: "Learn how to spin up a deployment of our hosted Elasticsearch Service and use Elastic Observability to gain deeper insight into the behavior of your applications and systems.", year: 2024, topic: ["documentation", "observability", "elastic"], + timestamp: "2025-01-01T12:10:30", }, }); console.log(response5); diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index a027e3341..f315a5cee 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -201,8 +201,8 @@ client.closePointInTime({ id }) Count search results. Get the number of documents matching a query. -The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. -The latter must be nested in a `query` key, which is the same as the search API. +The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. +The query is optional. When no query is provided, the API uses `match_all` to count all the documents. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. @@ -220,7 +220,7 @@ client.count({ ... }) * *Request (object):* ** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. The query is optional, and when not provided, it will use `match_all` to count all the docs. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. ** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. ** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. @@ -234,7 +234,7 @@ client.count({ ... }) ** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, it is random. ** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. ** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -** *`q` (Optional, string)*: The query in Lucene query string syntax. +** *`q` (Optional, string)*: The query in Lucene query string syntax. This parameter cannot be used with a request body. [discrete] === create @@ -542,7 +542,7 @@ client.deleteByQueryRethrottle({ task_id }) Delete a script or search template. Deletes a stored script or search template. -{ref}/modules-scripting.html[Endpoint documentation] +{ref}/delete-stored-script-api.html[Endpoint documentation] [source,ts] ---- client.deleteScript({ id }) @@ -551,9 +551,9 @@ client.deleteScript({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the stored script or search template. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`id` (string)*: The identifier for the stored script or search template. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. [discrete] === exists @@ -636,7 +636,8 @@ client.existsSource({ id, index }) [discrete] === explain Explain a document match result. -Returns information about why a specific document matches, or doesn’t match, a query. +Get information about why a specific document matches, or doesn't match, a query. +It computes a score explanation for a query and a specific document. {ref}/search-explain.html[Endpoint documentation] [source,ts] @@ -647,21 +648,21 @@ client.explain({ id, index }) ==== Arguments * *Request (object):* -** *`id` (string)*: Defines the document ID. -** *`index` (string)*: Index names used to limit the request. Only a single index name can be provided to this parameter. +** *`id` (string)*: The document identifier. +** *`index` (string)*: Index names that are used to limit the request. Only a single index name can be provided to this parameter. ** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`_source` (Optional, boolean | string | string[])*: True or false to return the `_source` field or not, or a list of fields to return. -** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. -** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. +** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: `True` or `false` to return the `_source` field or not or a list of fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. ** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return in the response. -** *`q` (Optional, string)*: Query in the Lucene query string syntax. +** *`q` (Optional, string)*: The query in the Lucene query string syntax. [discrete] === field_caps @@ -682,16 +683,16 @@ client.fieldCaps({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. -** *`fields` (Optional, string | string[])*: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter indices if the provided query rewrites to match_none on every shard. -** *`runtime_mappings` (Optional, Record)*: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. +** *`fields` (Optional, string | string[])*: A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. +** *`runtime_mappings` (Optional, Record)*: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. ** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. ** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. ** *`include_unmapped` (Optional, boolean)*: If true, unmapped fields are included in the response. -** *`filters` (Optional, string)*: An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent -** *`types` (Optional, string[])*: Only return results for fields that have one of the types in the list +** *`filters` (Optional, string)*: A list of filters to apply to the response. +** *`types` (Optional, string[])*: A list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. ** *`include_empty_fields` (Optional, boolean)*: If false, empty fields are not included in the response. [discrete] @@ -783,7 +784,7 @@ client.get({ id, index }) Get a script or search template. Retrieves a stored script or search template. -{ref}/modules-scripting.html[Endpoint documentation] +{ref}/get-stored-script-api.html[Endpoint documentation] [source,ts] ---- client.getScript({ id }) @@ -792,8 +793,8 @@ client.getScript({ id }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the stored script or search template. -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`id` (string)*: The identifier for the stored script or search template. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. [discrete] === get_script_context @@ -801,7 +802,7 @@ Get script contexts. Get a list of supported script contexts and their methods. -{painless}/painless-contexts.html[Endpoint documentation] +{ref}/get-script-contexts-api.html[Endpoint documentation] [source,ts] ---- client.getScriptContext() @@ -813,7 +814,7 @@ Get script languages. Get a list of available script types, languages, and contexts. -{ref}/modules-scripting.html[Endpoint documentation] +{ref}/get-script-languages-api.html[Endpoint documentation] [source,ts] ---- client.getScriptLanguages() @@ -1067,7 +1068,13 @@ This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. -{ref}/search-search.html[Endpoint documentation] +A kNN search response has the exact same structure as a search API response. +However, certain sections have a meaning specific to kNN search: + +* The document `_score` is determined by the similarity between the query and document vector. +* The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. + +{ref}/knn-search-api.html[Endpoint documentation] [source,ts] ---- client.knnSearch({ index, knn }) @@ -1076,14 +1083,14 @@ client.knnSearch({ index, knn }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A list of index names to search; use `_all` or to perform the operation on all indices -** *`knn` ({ field, query_vector, k, num_candidates })*: kNN query to execute -** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. -** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: The request returns doc values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. -** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. -** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. -** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. -** *`routing` (Optional, string)*: A list of specific routing values +** *`index` (string | string[])*: A list of index names to search; use `_all` or to perform the operation on all indices. +** *`knn` ({ field, query_vector, k, num_candidates })*: The kNN query to run. +** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These fields are returned in the `hits._source` property of the search response. +** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. +** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: A query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. +** *`routing` (Optional, string)*: A list of specific routing values. [discrete] === mget @@ -1093,6 +1100,18 @@ Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. +**Filter source fields** + +By default, the `_source` field is returned for every document (if stored). +Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. +You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. + +**Get stored fields** + +Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. +Any requested fields that are not stored are ignored. +You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. + {ref}/docs-multi-get.html[Endpoint documentation] [source,ts] ---- @@ -1164,7 +1183,21 @@ client.msearch({ ... }) === msearch_template Run multiple templated searches. -{ref}/search-multi-search.html[Endpoint documentation] +Run multiple templated searches with a single request. +If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. +For example: + +---- +$ cat requests +{ "index": "my-index" } +{ "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} +{ "index": "my-other-index" } +{ "id": "my-other-search-template", "params": { "query_type": "match_all" }} + +$ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo +---- + +{ref}/multi-search-template.html[Endpoint documentation] [source,ts] ---- client.msearchTemplate({ ... }) @@ -1173,11 +1206,11 @@ client.msearchTemplate({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. ** *`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* ** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. -** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the API can run. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options: `query_then_fetch`, `dfs_query_then_fetch`. +** *`max_concurrent_searches` (Optional, number)*: The maximum number of concurrent searches the API can run. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. ** *`rest_total_hits_as_int` (Optional, boolean)*: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. ** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. @@ -1185,11 +1218,17 @@ client.msearchTemplate({ ... }) === mtermvectors Get multiple term vectors. +Get multiple term vectors with a single request. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. +**Artificial documents** + +You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. +The mapping used is determined by the specified `_index`. + {ref}/docs-multi-termvectors.html[Endpoint documentation] [source,ts] ---- @@ -1199,20 +1238,20 @@ client.mtermvectors({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string)*: Name of the index that contains the documents. -** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: Array of existing or artificial documents. -** *`ids` (Optional, string[])*: Simplified syntax to specify documents by their ID if they're in the same index. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +** *`index` (Optional, string)*: The name of the index that contains the documents. +** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: An array of existing or artificial documents. +** *`ids` (Optional, string[])*: A simplified syntax to specify documents by their ID if they're in the same index. +** *`fields` (Optional, string | string[])*: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. ** *`field_statistics` (Optional, boolean)*: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. ** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. ** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. ** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. ** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. ** *`term_statistics` (Optional, boolean)*: If true, the response includes term frequency and document frequency. ** *`version` (Optional, number)*: If `true`, returns the document version as part of a hit. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. [discrete] === open_point_in_time @@ -1277,7 +1316,7 @@ client.openPointInTime({ index, keep_alive }) Ping the cluster. Get information about whether the cluster is running. -{ref}/index.html[Endpoint documentation] +{ref}/cluster.html[Endpoint documentation] [source,ts] ---- client.ping() @@ -1288,7 +1327,7 @@ client.ping() Create or update a script or search template. Creates or updates a stored script or search template. -{ref}/modules-scripting.html[Endpoint documentation] +{ref}/create-stored-script-api.html[Endpoint documentation] [source,ts] ---- client.putScript({ id, script }) @@ -1297,11 +1336,11 @@ client.putScript({ id, script }) ==== Arguments * *Request (object):* -** *`id` (string)*: Identifier for the stored script or search template. Must be unique within the cluster. -** *`script` ({ lang, options, source })*: Contains the script or search template, its parameters, and its language. -** *`context` (Optional, string)*: Context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`id` (string)*: The identifier for the stored script or search template. It must be unique within the cluster. +** *`script` ({ lang, options, source })*: The script or search template, its parameters, and its language. +** *`context` (Optional, string)*: The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. [discrete] === rank_eval @@ -1319,7 +1358,7 @@ client.rankEval({ requests }) * *Request (object):* ** *`requests` ({ id, request, ratings, template_id, params }[])*: A set of typical search requests, together with their provided ratings. -** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. ** *`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })*: Definition of the evaluation metric to calculate. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. @@ -1577,15 +1616,22 @@ client.renderSearchTemplate({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string)*: ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. +** *`id` (Optional, string)*: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. ** *`file` (Optional, string)* ** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. -** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. +** *`source` (Optional, string)*: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. [discrete] === scripts_painless_execute Run a script. + Runs a script and returns a result. +Use this API to build and test scripts, such as when defining a script for a runtime field. +This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. + +The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. + +Each context requires a script, but additional parameters depend on the context you're using for that script. {painless}/painless-execute-api.html[Endpoint documentation] [source,ts] @@ -1596,9 +1642,9 @@ client.scriptsPainlessExecute({ ... }) ==== Arguments * *Request (object):* -** *`context` (Optional, string)*: The context that the script should run in. -** *`context_setup` (Optional, { document, index, query })*: Additional parameters for the `context`. -** *`script` (Optional, { source, id, params, lang, options })*: The Painless script to execute. +** *`context` (Optional, Enum("painless_test" | "filter" | "score" | "boolean_field" | "date_field" | "double_field" | "geo_point_field" | "ip_field" | "keyword_field" | "long_field" | "composite_field"))*: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. +** *`context_setup` (Optional, { document, index, query })*: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. +** *`script` (Optional, { source, id, params, lang, options })*: The Painless script to run. [discrete] === scroll @@ -1617,7 +1663,7 @@ You can also use the scroll API to specify a new scroll parameter that extends o IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. -{ref}/search-request-body.html[Endpoint documentation] +{ref}/scroll-api.html[Endpoint documentation] [source,ts] ---- client.scroll({ scroll_id }) @@ -1626,8 +1672,8 @@ client.scroll({ scroll_id }) ==== Arguments * *Request (object):* -** *`scroll_id` (string)*: Scroll ID of the search. -** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. +** *`scroll_id` (string)*: The scroll ID of the search. +** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. ** *`rest_total_hits_as_int` (Optional, boolean)*: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. [discrete] @@ -1734,6 +1780,138 @@ client.search({ ... }) Search a vector tile. Search a vector tile for geospatial values. +Before using this API, you should be familiar with the Mapbox vector tile specification. +The API returns results as a binary mapbox vector tile. + +Internally, Elasticsearch translates a vector tile search API request into a search containing: + +* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. +* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. +* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. +* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. + +For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search + +---- +GET my-index/_search +{ + "size": 10000, + "query": { + "geo_bounding_box": { + "my-geo-field": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "aggregations": { + "grid": { + "geotile_grid": { + "field": "my-geo-field", + "precision": 11, + "size": 65536, + "bounds": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "bounds": { + "geo_bounds": { + "field": "my-geo-field", + "wrap_longitude": false + } + } + } +} +---- + +The API returns results as a binary Mapbox vector tile. +Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: + +* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. +* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. +* A meta layer containing: + * A feature containing a bounding box. By default, this is the bounding box of the tile. + * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. + * Metadata for the search. + +The API only returns features that can display at its zoom level. +For example, if a polygon feature has no area at its zoom level, the API omits it. +The API returns errors as UTF-8 encoded JSON. + +IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. +If you specify both parameters, the query parameter takes precedence. + +**Grid precision for geotile** + +For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. +`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. +For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. +The maximum final precision is 29. +The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. +For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +The `aggs` layer only contains features for cells with matching data. + +**Grid precision for geohex** + +For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. + +This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. +The following table maps the H3 resolution for each precision. +For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +At a precision of 6, hexagonal cells have an H3 resolution of 2. +If `` is 3 and `grid_precision` is 4, the precision is 7. +At a precision of 7, hexagonal cells have an H3 resolution of 3. + +| Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +| --------- | ---------------- | ------------- | ----------------| ----- | +| 1 | 4 | 0 | 122 | 30.5 | +| 2 | 16 | 0 | 122 | 7.625 | +| 3 | 64 | 1 | 842 | 13.15625 | +| 4 | 256 | 1 | 842 | 3.2890625 | +| 5 | 1024 | 2 | 5882 | 5.744140625 | +| 6 | 4096 | 2 | 5882 | 1.436035156 | +| 7 | 16384 | 3 | 41162 | 2.512329102 | +| 8 | 65536 | 3 | 41162 | 0.6280822754 | +| 9 | 262144 | 4 | 288122 | 1.099098206 | +| 10 | 1048576 | 4 | 288122 | 0.2747745514 | +| 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +| 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +| 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +| 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +| 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | + +Hexagonal cells don't align perfectly on a vector tile. +Some cells may intersect more than one vector tile. +To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. +Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. {ref}/search-vector-tile-api.html[Endpoint documentation] [source,ts] @@ -1749,20 +1927,20 @@ client.searchMvt({ index, field, zoom, x, y }) ** *`zoom` (number)*: Zoom level for the vector tile to search ** *`x` (number)*: X coordinate for the vector tile to search ** *`y` (number)*: Y coordinate for the vector tile to search -** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. Supports the following aggregation types: - avg - cardinality - max - min - sum -** *`buffer` (Optional, number)*: Size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. -** *`exact_bounds` (Optional, boolean)*: If false, the meta layer’s feature is the bounding box of the tile. If true, the meta layer’s feature is a bounding box resulting from a geo_bounds aggregation. The aggregation runs on values that intersect the // tile with wrap_longitude set to false. The resulting bounding box may be larger than the vector tile. -** *`extent` (Optional, number)*: Size, in pixels, of a side of the tile. Vector tiles are square with equal sides. -** *`fields` (Optional, string | string[])*: Fields to return in the `hits` layer. Supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. -** *`grid_agg` (Optional, Enum("geotile" | "geohex"))*: Aggregation used to create a grid for the `field`. -** *`grid_precision` (Optional, number)*: Additional zoom levels available through the aggs layer. For example, if is 7 and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results don’t include the aggs layer. -** *`grid_type` (Optional, Enum("grid" | "point" | "centroid"))*: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a geotile_grid cell. If 'grid' each feature is a Polygon of the cells bounding box. If 'point' each feature is a Point that is the centroid of the cell. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query DSL used to filter documents for the search. +** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. +** *`buffer` (Optional, number)*: The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. +** *`exact_bounds` (Optional, boolean)*: If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. +** *`extent` (Optional, number)*: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. +** *`fields` (Optional, string | string[])*: The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. +** *`grid_agg` (Optional, Enum("geotile" | "geohex"))*: The aggregation used to create a grid for the `field`. +** *`grid_precision` (Optional, number)*: Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. +** *`grid_type` (Optional, Enum("grid" | "point" | "centroid"))*: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The query DSL used to filter documents for the search. ** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. -** *`size` (Optional, number)*: Maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don’t include the hits layer. -** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Sorts features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box’s diagonal length, from longest to shortest. -** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. -** *`with_labels` (Optional, boolean)*: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. +** *`size` (Optional, number)*: The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. +** *`track_total_hits` (Optional, boolean | number)*: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +** *`with_labels` (Optional, boolean)*: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. [discrete] === search_shards @@ -1770,7 +1948,9 @@ Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. -When filtered aliases are used, the filter is returned as part of the indices section. +When filtered aliases are used, the filter is returned as part of the `indices` section. + +If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. {ref}/search-shards.html[Endpoint documentation] [source,ts] @@ -1781,20 +1961,20 @@ client.searchShards({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: Returns the indices and shards that a search request would be executed against. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. ** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. [discrete] === search_template Run a search with a search template. -{ref}/search-template.html[Endpoint documentation] +{ref}/search-template-api.html[Endpoint documentation] [source,ts] ---- client.searchTemplate({ ... }) @@ -1803,22 +1983,22 @@ client.searchTemplate({ ... }) ==== Arguments * *Request (object):* -** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (*). -** *`explain` (Optional, boolean)*: If `true`, returns detailed information about score calculation as part of each hit. -** *`id` (Optional, string)*: ID of the search template to use. If no source is specified, this parameter is required. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). +** *`explain` (Optional, boolean)*: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. +** *`id` (Optional, string)*: The ID of the search template to use. If no `source` is specified, this parameter is required. ** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. ** *`profile` (Optional, boolean)*: If `true`, the query execution is profiled. -** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's request body. Also supports Mustache variables. If no id is specified, this parameter is required. +** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. ** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`ignore_throttled` (Optional, boolean)*: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. ** *`scroll` (Optional, string | -1 | 0)*: Specifies how long a consistent view of the index should be maintained for scrolled search. ** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. -** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are rendered as an integer in the response. +** *`rest_total_hits_as_int` (Optional, boolean)*: If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. ** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. [discrete] @@ -1826,12 +2006,10 @@ client.searchTemplate({ ... }) Get terms in an index. Discover terms that match a partial string in an index. -This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios. +This API is designed for low-latency look-ups used in auto-complete scenarios. -If the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate. -This can occur due to a few reasons, such as a request timeout or a node error. - -NOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. +> info +> The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. {ref}/search-terms-enum.html[Endpoint documentation] [source,ts] @@ -1842,14 +2020,14 @@ client.termsEnum({ index, field }) ==== Arguments * *Request (object):* -** *`index` (string)*: List of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported. +** *`index` (string)*: A list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. ** *`field` (string)*: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. -** *`size` (Optional, number)*: How many matching terms to return. -** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. -** *`case_insensitive` (Optional, boolean)*: When true the provided search string is matched against index terms without case sensitivity. -** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Allows to filter an index shard if the provided query rewrites to match_none. -** *`string` (Optional, string)*: The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. -** *`search_after` (Optional, string)* +** *`size` (Optional, number)*: The number of matching terms to return. +** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. +** *`case_insensitive` (Optional, boolean)*: When `true`, the provided search string is matched against index terms without case sensitivity. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter an index shard if the provided query rewrites to `match_none`. +** *`string` (Optional, string)*: The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. +** *`search_after` (Optional, string)*: The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. [discrete] === termvectors @@ -1857,6 +2035,44 @@ Get term vector information. Get information and statistics about terms in the fields of a particular document. +You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. +You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. +For example: + +---- +GET /my-index-000001/_termvectors/1?fields=message +---- + +Fields can be specified using wildcards, similar to the multi match query. + +Term vectors are real-time by default, not near real-time. +This can be changed by setting `realtime` parameter to `false`. + +You can request three types of values: _term information_, _term statistics_, and _field statistics_. +By default, all term information and field statistics are returned for all fields but term statistics are excluded. + +**Term information** + +* term frequency in the field (always returned) +* term positions (`positions: true`) +* start and end offsets (`offsets: true`) +* term payloads (`payloads: true`), as base64 encoded bytes + +If the requested information wasn't stored in the index, it will be computed on the fly if possible. +Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. + +> warn +> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. + +**Behaviour** + +The term and field statistics are not accurate. +Deleted documents are not taken into account. +The information is only retrieved for the shard the requested document resides in. +The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. +By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. +Use `routing` only to hit a particular shard. + {ref}/docs-termvectors.html[Endpoint documentation] [source,ts] ---- @@ -1866,22 +2082,22 @@ client.termvectors({ index }) ==== Arguments * *Request (object):* -** *`index` (string)*: Name of the index that contains the document. -** *`id` (Optional, string)*: Unique identifier of the document. +** *`index` (string)*: The name of the index that contains the document. +** *`id` (Optional, string)*: A unique identifier for the document. ** *`doc` (Optional, object)*: An artificial document (a document not present in the index) for which you want to retrieve term vectors. -** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })*: Filter terms based on their tf-idf scores. -** *`per_field_analyzer` (Optional, Record)*: Overrides the default per-field analyzer. -** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. -** *`field_statistics` (Optional, boolean)*: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. +** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })*: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. +** *`per_field_analyzer` (Optional, Record)*: Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. +** *`fields` (Optional, string | string[])*: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +** *`field_statistics` (Optional, boolean)*: If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). ** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. ** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. ** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. ** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`term_statistics` (Optional, boolean)*: If `true`, the response includes term frequency and document frequency. +** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. +** *`term_statistics` (Optional, boolean)*: If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. ** *`version` (Optional, number)*: If `true`, returns the document version as part of a hit. -** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: Specific version type. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. [discrete] === update @@ -1941,6 +2157,87 @@ Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: + +* `read` +* `index` or `write` + +You can specify the query criteria in the request URI or the request body using the same syntax as the search API. + +When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. +When the versions match, the document is updated and the version number is incremented. +If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. +Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. + +NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. + +While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. +A bulk update request is performed for each batch of matching documents. +Any query or update failures cause the update by query request to fail and the failures are shown in the response. +Any update requests that completed successfully still stick, they are not rolled back. + +**Throttling update requests** + +To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. +This pads each batch with a wait time to throttle the rate. +Set `requests_per_second` to `-1` to turn off throttling. + +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is 1000, so if `requests_per_second` is set to `500`: + +---- +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +---- + +Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". + +**Slicing** + +Update by query supports sliced scroll to parallelize the update process. +This can improve efficiency and provide a convenient way to break the request down into smaller parts. + +Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. +This setting will use one slice per shard, up to a certain limit. +If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. + +Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: + +* You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with `slices` only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with slices will cancel each sub-request. +* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. +* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. + +If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: + +* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. +* Update performance scales linearly across available resources with the number of slices. + +Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Update the document source** + +Update by query supports scripts to update the document source. +As with the update API, you can set `ctx.op` to change the operation that is performed. + +Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. +The update by query operation skips updating the document and increments the `noop` counter. + +Set `ctx.op = "delete"` if your script decides that the document should be deleted. +The update by query operation deletes the document and increments the `deleted` counter. + +Update by query supports only `index`, `noop`, and `delete`. +Setting `ctx.op` to anything else is an error. +Setting any other field in `ctx` is an error. +This API enables you to only modify the source of matching documents; you cannot move them. + {ref}/docs-update-by-query.html[Endpoint documentation] [source,ts] ---- @@ -1950,41 +2247,41 @@ client.updateByQuery({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. +** *`index` (string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. ** *`max_docs` (Optional, number)*: The maximum number of documents to update. -** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies the documents to update using the Query DSL. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The documents to update using the Query DSL. ** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document source or metadata when updating. ** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. -** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do if update by query hits version conflicts: `abort` or `proceed`. +** *`conflicts` (Optional, Enum("abort" | "proceed"))*: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. ** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. -** *`analyzer` (Optional, string)*: Analyzer to use for the query string. -** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. -** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. -** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`from` (Optional, number)*: Starting offset (default: 0) ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. -** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. -** *`pipeline` (Optional, string)*: ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. -** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. -** *`q` (Optional, string)*: Query in the Lucene query string syntax. -** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search. -** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`q` (Optional, string)*: A query in the Lucene query string syntax. +** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. +** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. It defaults to the index-level setting. ** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. -** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. -** *`scroll` (Optional, string | -1 | 0)*: Period to retain the search context for scrolling. -** *`scroll_size` (Optional, number)*: Size of the scroll request that powers the operation. -** *`search_timeout` (Optional, string | -1 | 0)*: Explicit timeout for each search request. -** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options: `query_then_fetch`, `dfs_query_then_fetch`. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. +** *`scroll_size` (Optional, number)*: The size of the scroll request that powers the operation. +** *`search_timeout` (Optional, string | -1 | 0)*: An explicit timeout for each search request. By default, there is no timeout. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. ** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. ** *`sort` (Optional, string[])*: A list of : pairs. -** *`stats` (Optional, string[])*: Specific `tag` of the request for logging and statistical purposes. -** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. -** *`timeout` (Optional, string | -1 | 0)*: Period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. +** *`stats` (Optional, string[])*: The specific `tag` of the request for logging and statistical purposes. +** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +** *`timeout` (Optional, string | -1 | 0)*: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. ** *`version` (Optional, boolean)*: If `true`, returns the document version as part of a hit. ** *`version_type` (Optional, boolean)*: Should the document increment the version number (internal) on hit or not (reindex) -** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). -** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. [discrete] === update_by_query_rethrottle @@ -2003,7 +2300,7 @@ client.updateByQueryRethrottle({ task_id }) * *Request (object):* ** *`task_id` (string)*: The ID for the task. -** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. [discrete] === async_search @@ -2881,7 +3178,7 @@ Get task information. Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. -{ref}/tasks.html[Endpoint documentation] +{ref}/cat-tasks.html[Endpoint documentation] [source,ts] ---- client.cat.tasks({ ... }) @@ -3068,6 +3365,7 @@ client.ccr.followInfo({ index }) [discrete] ==== follow_stats Get follower stats. + Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. @@ -3081,8 +3379,9 @@ client.ccr.followStats({ index }) ==== Arguments * *Request (object):* -** *`index` (string | string[])*: A list of index patterns; use `_all` to perform the operation on all indices -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`index` (string | string[])*: A comma-delimited list of index patterns. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== forget_follower @@ -3119,6 +3418,7 @@ client.ccr.forgetFollower({ index }) [discrete] ==== get_auto_follow_pattern Get auto-follow patterns. + Get cross-cluster replication auto-follow patterns. {ref}/ccr-get-auto-follow-pattern.html[Endpoint documentation] @@ -3131,12 +3431,16 @@ client.ccr.getAutoFollowPattern({ ... }) ==== Arguments * *Request (object):* -** *`name` (Optional, string)*: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`name` (Optional, string)*: The auto-follow pattern collection that you want to retrieve. +If you do not specify a name, the API returns information for all collections. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] ==== pause_auto_follow_pattern Pause an auto-follow pattern. + Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored. @@ -3155,12 +3459,15 @@ client.ccr.pauseAutoFollowPattern({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the auto follow pattern that should pause discovering new indices to follow. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`name` (string)*: The name of the auto-follow pattern to pause. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] ==== pause_follow Pause a follower. + Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. @@ -3176,8 +3483,10 @@ client.ccr.pauseFollow({ index }) ==== Arguments * *Request (object):* -** *`index` (string)*: The name of the follower index that should pause following its leader index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`index` (string)*: The name of the follower index. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] ==== put_auto_follow_pattern @@ -3220,6 +3529,7 @@ client.ccr.putAutoFollowPattern({ name, remote_cluster }) [discrete] ==== resume_auto_follow_pattern Resume an auto-follow pattern. + Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. @@ -3234,8 +3544,10 @@ client.ccr.resumeAutoFollowPattern({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the auto follow pattern to resume discovering new indices to follow. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`name` (string)*: The name of the auto-follow pattern to resume. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] ==== resume_follow @@ -3271,6 +3583,7 @@ client.ccr.resumeFollow({ index }) [discrete] ==== stats Get cross-cluster replication stats. + This API returns stats about auto-following and the same shard-level stats as the get follower stats API. {ref}/ccr-get-stats.html[Endpoint documentation] @@ -3283,17 +3596,21 @@ client.ccr.stats({ ... }) ==== Arguments * *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== unfollow Unfollow an index. + Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. -NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. +> info +> Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. {ref}/ccr-post-unfollow.html[Endpoint documentation] [source,ts] @@ -3305,8 +3622,10 @@ client.ccr.unfollow({ index }) ==== Arguments * *Request (object):* -** *`index` (string)*: The name of the follower index that should be turned into a regular index. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`index` (string)*: The name of the follower index. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] === cluster @@ -3659,8 +3978,15 @@ client.cluster.putSettings({ ... }) [discrete] ==== remote_info Get remote cluster information. -Get all of the configured remote cluster information. -This API returns connection and endpoint information keyed by the configured remote cluster alias. + +Get information about configured remote clusters. +The API returns connection and endpoint information keyed by the configured remote cluster alias. + +> info +> This API returns information that reflects current state on the local cluster. +> The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. +> Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. +> To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). {ref}/cluster-remote-info.html[Endpoint documentation] [source,ts] @@ -4529,7 +4855,7 @@ Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. -{ref}/eql-search-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-eql-delete[Endpoint documentation] [source,ts] ---- client.eql.delete({ id }) @@ -4608,8 +4934,12 @@ client.eql.search({ index, query }) ** *`keep_alive` (Optional, string | -1 | 0)* ** *`keep_on_completion` (Optional, boolean)* ** *`wait_for_completion_timeout` (Optional, string | -1 | 0)* -** *`allow_partial_search_results` (Optional, boolean)* -** *`allow_partial_sequence_results` (Optional, boolean)* +** *`allow_partial_search_results` (Optional, boolean)*: Allow query execution also in case of shard failures. +If true, the query will keep running and will return results based on the available shards. +For sequences, the behavior can be further refined using allow_partial_sequence_results +** *`allow_partial_sequence_results` (Optional, boolean)*: This flag applies only to sequences and has effect only if allow_partial_search_results=true. +If true, the sequence query will return results based on the available shards, ignoring the others. +If false, the sequence query will return successfully, but will always have empty results. ** *`size` (Optional, number)*: For basic queries, the maximum number of matching events to return. Defaults to 10 ** *`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. ** *`result_position` (Optional, Enum("tail" | "head"))* @@ -4720,6 +5050,29 @@ By default, the request waits for complete query results. If the request completes during the period specified in this parameter, complete query results are returned. Otherwise, the response returns an `is_running` value of `true` and no results. +[discrete] +==== async_query_stop +Stop async ES|QL query. + +This API interrupts the query execution and returns the results so far. +If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. + +{ref}/esql-async-query-stop-api.html[Endpoint documentation] +[source,ts] +---- +client.esql.asyncQueryStop({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. + [discrete] ==== query Run an ES|QL query. @@ -4798,7 +5151,7 @@ To list the features that will be affected, use the get features API. IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. -{ref}/modules-snapshots.html[Endpoint documentation] +{ref}/reset-features-api.html[Endpoint documentation] [source,ts] ---- client.features.resetFeatures({ ... }) @@ -4814,7 +5167,10 @@ client.features.resetFeatures({ ... }) === fleet [discrete] ==== global_checkpoints -Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. +Get global checkpoints. + +Get the current global checkpoints for an index. +This API is designed for internal use by the Fleet server project. {ref}/get-global-checkpoints.html[Endpoint documentation] [source,ts] @@ -4841,6 +5197,8 @@ will cause Elasticsearch to immediately return the current global checkpoints. Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) with a single API request. The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) API. However, similar to the fleet search API, it supports the wait_for_checkpoints parameter. + +{ref}/fleet-multi-search.html[Endpoint documentation] [source,ts] ---- client.fleet.msearch({ ... }) @@ -4874,6 +5232,8 @@ which is true by default. ==== search The purpose of the fleet search api is to provide a search api where the search will only be executed after provided checkpoint has been processed and is visible for searches inside of Elasticsearch. + +{ref}/fleet-search.html[Endpoint documentation] [source,ts] ---- client.fleet.search({ index }) @@ -5070,6 +5430,7 @@ client.ilm.getLifecycle({ ... }) [discrete] ==== get_status Get the ILM status. + Get the current index lifecycle management status. {ref}/ilm-get-status.html[Endpoint documentation] @@ -5247,7 +5608,9 @@ client.ilm.stop({ ... }) [discrete] ==== add_block Add an index block. -Limits the operations allowed on an index by blocking specific operation types. + +Add an index block to an index. +Index blocks limit the operations allowed on an index by blocking specific operation types. {ref}/index-modules-blocks.html[Endpoint documentation] [source,ts] @@ -5259,13 +5622,24 @@ client.indices.addBlock({ index, block }) ==== Arguments * *Request (object):* -** *`index` (string)*: A comma separated list of indices to add a block to -** *`block` (Enum("metadata" | "read" | "read_only" | "write"))*: The block to add (one of read, write, read_only or metadata) -** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. -** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) -** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master -** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout +** *`index` (string)*: A list or wildcard expression of index names used to limit the request. +By default, you must explicitly name the indices you are adding blocks to. +To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. +** *`block` (Enum("metadata" | "read" | "read_only" | "write"))*: The block type to add to the index. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +It can also be set to `-1` to indicate that the request should never timeout. [discrete] ==== analyze @@ -5521,10 +5895,10 @@ Set to `all` or any positive integer up to the total number of shards in the ind [discrete] ==== create_data_stream Create a data stream. -Creates a data stream. + You must have a matching index template with data stream enabled. -{ref}/data-streams.html[Endpoint documentation] +{ref}/indices-create-data-stream.html[Endpoint documentation] [source,ts] ---- client.indices.createDataStream({ name }) @@ -5564,9 +5938,10 @@ client.indices.createFrom({ source, dest }) [discrete] ==== data_streams_stats Get data stream stats. -Retrieves statistics for one or more data streams. -{ref}/data-streams.html[Endpoint documentation] +Get statistics for one or more data streams. + +{ref}/data-stream-stats-api.html[Endpoint documentation] [source,ts] ---- client.indices.dataStreamsStats({ ... }) @@ -5667,7 +6042,7 @@ client.indices.deleteDataLifecycle({ name }) Delete data streams. Deletes one or more data streams and their backing indices. -{ref}/data-streams.html[Endpoint documentation] +{ref}/indices-delete-data-stream.html[Endpoint documentation] [source,ts] ---- client.indices.deleteDataStream({ name }) @@ -5813,9 +6188,10 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== exists_alias Check aliases. -Checks if one or more data stream or index aliases exist. -{ref}/indices-aliases.html[Endpoint documentation] +Check if one or more data stream or index aliases exist. + +https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-alias[Endpoint documentation] [source,ts] ---- client.indices.existsAlias({ name }) @@ -5840,9 +6216,10 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== exists_index_template Check index templates. + Check whether index templates exist. -{ref}/index-templates.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-index-template[Endpoint documentation] [source,ts] ---- client.indices.existsIndexTemplate({ name }) @@ -6109,7 +6486,8 @@ Valid values are: `all`, `open`, `closed`, `hidden`, `none`. [discrete] ==== get_data_lifecycle Get data stream lifecycles. -Retrieves the data stream lifecycle configuration of one or more data streams. + +Get the data stream lifecycle configuration of one or more data streams. {ref}/data-streams-get-lifecycle.html[Endpoint documentation] [source,ts] @@ -6145,9 +6523,10 @@ client.indices.getDataLifecycleStats() [discrete] ==== get_data_stream Get data streams. -Retrieves information about one or more data streams. -{ref}/data-streams.html[Endpoint documentation] +Get information about one or more data streams. + +{ref}/indices-get-data-stream.html[Endpoint documentation] [source,ts] ---- client.indices.getDataStream({ ... }) @@ -6907,8 +7286,9 @@ client.indices.reloadSearchAnalyzers({ index }) [discrete] ==== resolve_cluster Resolve the cluster. -Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. -Multiple patterns and remote clusters are supported. + +Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. +If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. @@ -6917,7 +7297,7 @@ Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: -* Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. +* Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). @@ -6926,7 +7306,14 @@ For each cluster in the index expression, information is returned about: For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. -**Advantages of using this endpoint before a cross-cluster search** +## Note on backwards compatibility +The ability to query without an index expression was added in version 8.18, so when +querying remote clusters older than that, the local cluster will send the index +expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference +to that index expression even though you didn't request it. If it causes a problem, you can +instead include an index expression like `*:*` to bypass the issue. + +## Advantages of using this endpoint before a cross-cluster search You may want to exclude a cluster or index from a search when: @@ -6935,27 +7322,56 @@ You may want to exclude a cluster or index from a search when: * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. +## Test availability of remote clusters + +The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. +The remote cluster may be available, while the local cluster is not currently connected to it. + +You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. +For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +The `connected` field in the response will indicate whether it was successful. +If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. + {ref}/indices-resolve-cluster-api.html[Endpoint documentation] [source,ts] ---- -client.indices.resolveCluster({ name }) +client.indices.resolveCluster({ ... }) ---- [discrete] ==== Arguments * *Request (object):* -** *`name` (string | string[])*: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. +** *`name` (Optional, string | string[])*: A list of names or index patterns for the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. -** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing +Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. +If no index expression is specified, information about all remote clusters configured on the local cluster +is returned without doing any index matching +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request -targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. -** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. Defaults to false. -** *`ignore_unavailable` (Optional, boolean)*: If false, the request returns an error if it targets a missing or closed index. Defaults to false. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded, or aliased indices are ignored when frozen. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +** *`ignore_unavailable` (Optional, boolean)*: If false, the request returns an error if it targets a missing or closed index. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +** *`timeout` (Optional, string | -1 | 0)*: The maximum time to wait for remote clusters to respond. +If a remote cluster does not respond within this timeout period, the API response +will show the cluster as not connected and include an error message that the +request timed out. + +The default timeout is unset and the query can take +as long as the networking layer is configured to wait for remote clusters that are +not responding (typically 30 seconds). [discrete] ==== resolve_index @@ -7441,10 +7857,10 @@ client.inference.delete({ inference_id }) ==== Arguments * *Request (object):* -** *`inference_id` (string)*: The inference Id +** *`inference_id` (string)*: The inference identifier. ** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type -** *`dry_run` (Optional, boolean)*: When true, the endpoint is not deleted, and a list of ingest processors which reference this endpoint is returned -** *`force` (Optional, boolean)*: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields +** *`dry_run` (Optional, boolean)*: When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. +** *`force` (Optional, boolean)*: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. [discrete] ==== get @@ -7465,7 +7881,14 @@ client.inference.get({ ... }) [discrete] ==== inference -Perform inference on the service +Perform inference on the service. + +This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. +It returns a response with the results of the tasks. +The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. + +> info +> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. {ref}/post-inference-api.html[Endpoint documentation] [source,ts] @@ -7477,14 +7900,18 @@ client.inference.inference({ inference_id, input }) ==== Arguments * *Request (object):* -** *`inference_id` (string)*: The inference Id -** *`input` (string | string[])*: Inference input. -Either a string or an array of strings. -** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type -** *`query` (Optional, string)*: Query input, required for rerank task. -Not required for other tasks. -** *`task_settings` (Optional, User-defined value)*: Optional task settings -** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete. +** *`inference_id` (string)*: The unique identifier for the inference endpoint. +** *`input` (string | string[])*: The text on which you want to perform the inference task. +It can be a single string or an array. + +> info +> Inference endpoints for the `completion` task type currently only support a single string as input. +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The type of inference task that the model performs. +** *`query` (Optional, string)*: The query input, which is required only for the `rerank` task. +It is not required for other tasks. +** *`task_settings` (Optional, User-defined value)*: Task settings for the individual inference request. +These settings are specific to the task type you specified and override the task settings specified when initializing the service. +** *`timeout` (Optional, string | -1 | 0)*: The amount of time to wait for the inference request to complete. [discrete] ==== put @@ -7593,9 +8020,10 @@ client.inference.update({ inference_id }) [discrete] ==== delete_geoip_database Delete GeoIP database configurations. + Delete one or more IP geolocation database configurations. -{ref}/delete-geoip-database-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-delete-geoip-database[Endpoint documentation] [source,ts] ---- client.ingest.deleteGeoipDatabase({ id }) @@ -7606,9 +8034,9 @@ client.ingest.deleteGeoipDatabase({ id }) * *Request (object):* ** *`id` (string | string[])*: A list of geoip database configurations to delete -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== delete_ip_location_database @@ -7669,9 +8097,10 @@ client.ingest.geoIpStats() [discrete] ==== get_geoip_database Get GeoIP database configurations. + Get information about one or more IP geolocation database configurations. -{ref}/get-geoip-database-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-get-geoip-database[Endpoint documentation] [source,ts] ---- client.ingest.getGeoipDatabase({ ... }) @@ -7681,7 +8110,7 @@ client.ingest.getGeoipDatabase({ ... }) ==== Arguments * *Request (object):* -** *`id` (Optional, string | string[])*: List of database configuration IDs to retrieve. +** *`id` (Optional, string | string[])*: A list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. @@ -7709,6 +8138,7 @@ A value of `-1` indicates that the request should never time out. [discrete] ==== get_pipeline Get pipelines. + Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. @@ -7746,9 +8176,10 @@ client.ingest.processorGrok() [discrete] ==== put_geoip_database Create or update a GeoIP database configuration. + Refer to the create or update IP geolocation database configuration API. -{ref}/put-geoip-database-api.html[Endpoint documentation] +https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-put-geoip-database[Endpoint documentation] [source,ts] ---- client.ingest.putGeoipDatabase({ id, name, maxmind }) @@ -7819,6 +8250,7 @@ When a deprecated ingest pipeline is referenced as the default or final pipeline [discrete] ==== simulate Simulate a pipeline. + Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. @@ -7833,10 +8265,10 @@ client.ingest.simulate({ docs }) * *Request (object):* ** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline. -** *`id` (Optional, string)*: Pipeline to test. -If you don’t specify a `pipeline` in the request body, this parameter is required. -** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })*: Pipeline to test. -If you don’t specify the `pipeline` request path parameter, this parameter is required. +** *`id` (Optional, string)*: The pipeline to test. +If you don't specify a `pipeline` in the request body, this parameter is required. +** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })*: The pipeline to test. +If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. ** *`verbose` (Optional, boolean)*: If `true`, the response includes output data for each processor in the executed pipeline. @@ -7845,6 +8277,7 @@ If you specify both this and the request path parameter, the API only uses the r [discrete] ==== delete Delete the license. + When the license expires, your subscription level reverts to Basic. If the operator privileges feature is enabled, only operator users can use this API. @@ -7859,16 +8292,18 @@ client.license.delete({ ... }) ==== Arguments * *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get Get license information. + Get information about your Elastic license including its type, its status, when it was issued, and when it expires. -NOTE: If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. -If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. +>info +> If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. +> If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. {ref}/get-license.html[Endpoint documentation] [source,ts] @@ -7909,6 +8344,7 @@ client.license.getTrialStatus() [discrete] ==== post Update the license. + You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. @@ -7930,12 +8366,13 @@ client.license.post({ ... }) ** *`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })* ** *`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])*: A sequence of one or more JSON documents containing the license information. ** *`acknowledge` (Optional, boolean)*: Specifies whether you acknowledge the license changes. -** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. -** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== post_start_basic Start a basic license. + Start an indefinite basic license, which gives access to all the basic features. NOTE: In order to start a basic license, you must not currently have a basic license. @@ -8100,6 +8537,7 @@ client.migration.postFeatureUpgrade() [discrete] ==== clear_trained_model_deployment_cache Clear trained model deployment cache. + Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. @@ -8120,6 +8558,7 @@ client.ml.clearTrainedModelDeploymentCache({ model_id }) [discrete] ==== close_job Close anomaly detection jobs. + A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. @@ -8143,7 +8582,8 @@ client.ml.closeJob({ job_id }) [discrete] ==== delete_calendar Delete a calendar. -Removes all scheduled events from a calendar, then deletes it. + +Remove all scheduled events from a calendar, then delete it. {ref}/ml-delete-calendar.html[Endpoint documentation] [source,ts] @@ -8235,14 +8675,15 @@ stopping and deleting the datafeed. [discrete] ==== delete_expired_data Delete expired ML data. -Deletes all job results, model snapshots and forecast data that have exceeded + +Delete all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection -jobs by using _all, by specifying * as the , or by omitting the -. +jobs by using `_all`, by specifying `*` as the ``, or by omitting the +``. {ref}/ml-delete-expired-data.html[Endpoint documentation] [source,ts] @@ -8263,6 +8704,7 @@ behavior is no throttling. [discrete] ==== delete_filter Delete a filter. + If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. @@ -8281,6 +8723,7 @@ client.ml.deleteFilter({ filter_id }) [discrete] ==== delete_forecast Delete forecasts from a job. + By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more @@ -8311,6 +8754,7 @@ error. [discrete] ==== delete_job Delete an anomaly detection job. + All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request @@ -8340,6 +8784,7 @@ job deletion completes. [discrete] ==== delete_model_snapshot Delete a model snapshot. + You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. @@ -8360,6 +8805,7 @@ client.ml.deleteModelSnapshot({ job_id, snapshot_id }) [discrete] ==== delete_trained_model Delete an unreferenced trained model. + The request deletes a trained inference model that is not referenced by an ingest pipeline. {ref}/delete-trained-models.html[Endpoint documentation] @@ -8379,6 +8825,7 @@ client.ml.deleteTrainedModel({ model_id }) [discrete] ==== delete_trained_model_alias Delete a trained model alias. + This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. @@ -8399,11 +8846,12 @@ client.ml.deleteTrainedModelAlias({ model_alias, model_id }) [discrete] ==== estimate_model_memory Estimate job model memory usage. -Makes an estimation of the memory usage for an anomaly detection job model. -It is based on analysis configuration details for the job and cardinality + +Make an estimation of the memory usage for an anomaly detection job model. +The estimate is based on analysis configuration details for the job and cardinality estimates for the fields it references. -{ref}/ml-apis.html[Endpoint documentation] +{ref}/ml-estimate-model-memory.html[Endpoint documentation] [source,ts] ---- client.ml.estimateModelMemory({ ... }) @@ -8431,6 +8879,7 @@ omitted from the request if no detectors have a `by_field_name`, [discrete] ==== evaluate_data_frame Evaluate data frame analytics. + The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth @@ -8453,6 +8902,7 @@ client.ml.evaluateDataFrame({ evaluation, index }) [discrete] ==== explain_data_frame_analytics Explain data frame analytics config. + This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: @@ -9174,6 +9624,7 @@ client.ml.info() [discrete] ==== open_job Open anomaly detection jobs. + An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. @@ -9236,7 +9687,7 @@ client.ml.postData({ job_id }) [discrete] ==== preview_data_frame_analytics Preview features used by data frame analytics. -Previews the extracted features used by a data frame analytics config. +Preview the extracted features used by a data frame analytics config. {ref}/preview-dfanalytics.html[Endpoint documentation] [source,ts] @@ -9497,6 +9948,7 @@ Up to 10000 items are allowed in each filter. [discrete] ==== put_job Create an anomaly detection job. + If you include a `datafeed_config`, you must have read index privileges on the source index. If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. @@ -10163,7 +10615,7 @@ it will automatically be changed to a value less than the number of hardware thr [discrete] ==== upgrade_job_snapshot Upgrade a snapshot. -Upgrades an anomaly detection model snapshot to the latest major version. +Upgrade an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. @@ -10259,6 +10711,7 @@ before the timeout expires, the request fails and returns an error. [discrete] ==== info Get node information. + By default, the API returns all attributes and core settings for cluster nodes. {ref}/cluster-nodes-info.html[Endpoint documentation] @@ -10288,7 +10741,7 @@ When the Elasticsearch keystore is password protected and not simply obfuscated, Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. -{ref}/secure-settings.html[Endpoint documentation] +{ref}/cluster-nodes-reload-secure-settings.html[Endpoint documentation] [source,ts] ---- client.nodes.reloadSecureSettings({ ... }) @@ -10793,6 +11246,7 @@ If set to `false`, the API returns immediately and the indexer is stopped asynch [discrete] ==== delete Delete a search application. + Remove a search application and its associated alias. Indices attached to the search application are not removed. {ref}/delete-search-application.html[Endpoint documentation] @@ -10805,7 +11259,7 @@ client.searchApplication.delete({ name }) ==== Arguments * *Request (object):* -** *`name` (string)*: The name of the search application to delete +** *`name` (string)*: The name of the search application to delete. [discrete] ==== delete_behavioral_analytics @@ -10861,7 +11315,7 @@ client.searchApplication.getBehavioralAnalytics({ ... }) Get search applications. Get information about search applications. -{ref}/list-search-applications.html[Endpoint documentation] +{ref}/list-analytics-collection.html[Endpoint documentation] [source,ts] ---- client.searchApplication.list({ ... }) @@ -13094,7 +13548,7 @@ client.simulate.ingest({ docs }) ** *`index` (Optional, string)*: The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. -** *`component_template_substitutions` (Optional, Record)*: A map of component template names to substitute component template definition objects. +** *`component_template_substitutions` (Optional, Record)*: A map of component template names to substitute component template definition objects. ** *`index_template_subtitutions` (Optional, Record)*: A map of index template names to substitute index template definition objects. ** *`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })* ** *`pipeline_substitutions` (Optional, Record)*: Pipelines to test. @@ -14568,7 +15022,6 @@ Valid values are `disabled` and `v1`. [discrete] ==== delete_transform Delete a transform. -Deletes a transform. {ref}/delete-transform.html[Endpoint documentation] [source,ts] @@ -14599,7 +15052,7 @@ client.transform.getNodeStats() [discrete] ==== get_transform Get transforms. -Retrieves configuration information for transforms. +Get configuration information for transforms. {ref}/get-transform.html[Endpoint documentation] [source,ts] @@ -14632,7 +15085,8 @@ be retrieved and then added to another cluster. [discrete] ==== get_transform_stats Get transform stats. -Retrieves usage information for transforms. + +Get usage information for transforms. {ref}/get-transform-stats.html[Endpoint documentation] [source,ts] @@ -14761,7 +15215,7 @@ the exception of privilege checks. [discrete] ==== reset_transform Reset a transform. -Resets a transform. + Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. @@ -14784,11 +15238,11 @@ must be stopped before it can be reset. [discrete] ==== schedule_now_transform Schedule a transform to start now. -Instantly runs a transform to process data. -If you _schedule_now a transform, it will process the new data instantly, -without waiting for the configured frequency interval. After _schedule_now API is called, -the transform will be processed again at now + frequency unless _schedule_now API +Instantly run a transform to process data. +If you run this API, the transform will process the new data instantly, +without waiting for the configured frequency interval. After the API is called, +the transform will be processed again at `now + frequency` unless the API is called again in the meantime. {ref}/schedule-now-transform.html[Endpoint documentation] @@ -14807,7 +15261,6 @@ client.transform.scheduleNowTransform({ transform_id }) [discrete] ==== start_transform Start a transform. -Starts a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping @@ -14915,6 +15368,7 @@ timeout expires, the request fails and returns an error. [discrete] ==== upgrade_transforms Upgrade all transforms. + Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index a013e291a..491a40a5c 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -846,7 +846,7 @@ export default class Cat { /** * Get task information. Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/tasks.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/cat-tasks.html | Elasticsearch API documentation} */ async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise async tasks (this: That, params?: T.CatTasksRequest | TB.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index c5d952b55..6c82f3c5e 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -483,7 +483,7 @@ export default class Ccr { } /** - * Unfollow an index. Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. + * Unfollow an index. Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. > info > Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/ccr-post-unfollow.html | Elasticsearch API documentation} */ async unfollow (this: That, params: T.CcrUnfollowRequest | TB.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 0ad4f7e58..2b0143164 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -469,7 +469,7 @@ export default class Cluster { } /** - * Get remote cluster information. Get all of the configured remote cluster information. This API returns connection and endpoint information keyed by the configured remote cluster alias. + * Get remote cluster information. Get information about configured remote clusters. The API returns connection and endpoint information keyed by the configured remote cluster alias. > info > This API returns information that reflects current state on the local cluster. > The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. > Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. > To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/cluster-remote-info.html | Elasticsearch API documentation} */ async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest | TB.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/count.ts b/src/api/api/count.ts index fb3cdcc6c..3519ab237 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Count search results. Get the number of documents matching a query. The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. The latter must be nested in a `query` key, which is the same as the search API. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. + * Count search results. Get the number of documents matching a query. The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. The query is optional. When no query is provided, the API uses `match_all` to count all the documents. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/search-count.html | Elasticsearch API documentation} */ export default async function CountApi (this: That, params?: T.CountRequest | TB.CountRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index 76f7ad4e3..defc9ca40 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -40,7 +40,7 @@ interface That { transport: Transport } /** * Delete a script or search template. Deletes a stored script or search template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/modules-scripting.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/delete-stored-script-api.html | Elasticsearch API documentation} */ export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest | TB.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 87b4d38e7..8991a4b80 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -46,7 +46,7 @@ export default class Eql { /** * Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/eql-search-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-eql-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.EqlDeleteRequest | TB.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index f965ba4d3..2c3144cba 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -149,6 +149,38 @@ export default class Esql { return await this.transport.request({ path, method, querystring, body, meta }, options) } + /** + * Stop async ES|QL query. This API interrupts the query execution and returns the results so far. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/esql-async-query-stop-api.html | Elasticsearch API documentation} + */ + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest | TB.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest | TB.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest | TB.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest | TB.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_query/async/${encodeURIComponent(params.id.toString())}/stop` + const meta: TransportRequestMetadata = { + name: 'esql.async_query_stop', + pathParts: { + id: params.id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + /** * Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/esql-rest.html | Elasticsearch API documentation} diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index d16beb8b0..74a5dff82 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Explain a document match result. Returns information about why a specific document matches, or doesn’t match, a query. + * Explain a document match result. Get information about why a specific document matches, or doesn't match, a query. It computes a score explanation for a query and a specific document. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/search-explain.html | Elasticsearch API documentation} */ export default async function ExplainApi (this: That, params: T.ExplainRequest | TB.ExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/features.ts b/src/api/api/features.ts index 58ca1bffd..055e0c3b9 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -76,7 +76,7 @@ export default class Features { /** * Reset the features. Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. WARNING: Intended for development and testing use only. Do not reset features on a production cluster. Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices. The response code is HTTP 200 if the state is successfully reset for all features. It is HTTP 500 if the reset operation failed for any feature. Note that select features might provide a way to reset particular system indices. Using this API resets all features, both those that are built-in and implemented as plugins. To list the features that will be affected, use the get features API. IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/modules-snapshots.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/reset-features-api.html | Elasticsearch API documentation} */ async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest | TB.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 106cb3679..76cdc6dad 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -107,7 +107,7 @@ export default class Fleet { } /** - * Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. + * Get global checkpoints. Get the current global checkpoints for an index. This API is designed for internal use by the Fleet server project. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/get-global-checkpoints.html | Elasticsearch API documentation} */ async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest | TB.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -140,6 +140,7 @@ export default class Fleet { /** * Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) with a single API request. The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) API. However, similar to the fleet search API, it supports the wait_for_checkpoints parameter. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/fleet-multi-search.html | Elasticsearch API documentation} */ async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async msearch (this: That, params: T.FleetMsearchRequest | TB.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> @@ -211,6 +212,7 @@ export default class Fleet { /** * The purpose of the fleet search api is to provide a search api where the search will only be executed after provided checkpoint has been processed and is visible for searches inside of Elasticsearch. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/fleet-search.html | Elasticsearch API documentation} */ async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async search (this: That, params: T.FleetSearchRequest | TB.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index badc40939..d84a178a4 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -40,7 +40,7 @@ interface That { transport: Transport } /** * Get a script or search template. Retrieves a stored script or search template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/modules-scripting.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/get-stored-script-api.html | Elasticsearch API documentation} */ export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptApi (this: That, params: T.GetScriptRequest | TB.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index 08aae116b..78607764f 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -40,7 +40,7 @@ interface That { transport: Transport } /** * Get script contexts. Get a list of supported script contexts and their methods. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/8.x/painless-contexts.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/get-script-contexts-api.html | Elasticsearch API documentation} */ export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest | TB.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index 0407a0372..e138d7210 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -40,7 +40,7 @@ interface That { transport: Transport } /** * Get script languages. Get a list of available script types, languages, and contexts. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/modules-scripting.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/get-script-languages-api.html | Elasticsearch API documentation} */ export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest | TB.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index c1f58bfd0..59eb8f1ff 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -45,8 +45,8 @@ export default class Indices { } /** - * Add an index block. Limits the operations allowed on an index by blocking specific operation types. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/index-modules-blocks.html | Elasticsearch API documentation} + * Add an index block. Add an index block to an index. Index blocks limit the operations allowed on an index by blocking specific operation types. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/index-modules-blocks.html#add-index-block | Elasticsearch API documentation} */ async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise async addBlock (this: That, params: T.IndicesAddBlockRequest | TB.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -323,8 +323,8 @@ export default class Indices { } /** - * Create a data stream. Creates a data stream. You must have a matching index template with data stream enabled. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/data-streams.html | Elasticsearch API documentation} + * Create a data stream. You must have a matching index template with data stream enabled. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/indices-create-data-stream.html | Elasticsearch API documentation} */ async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest | TB.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -393,8 +393,8 @@ export default class Indices { } /** - * Get data stream stats. Retrieves statistics for one or more data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/data-streams.html | Elasticsearch API documentation} + * Get data stream stats. Get statistics for one or more data streams. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/data-stream-stats-api.html | Elasticsearch API documentation} */ async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest | TB.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -538,7 +538,7 @@ export default class Indices { /** * Delete data streams. Deletes one or more data streams and their backing indices. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/data-streams.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/indices-delete-data-stream.html | Elasticsearch API documentation} */ async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest | TB.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -735,8 +735,8 @@ export default class Indices { } /** - * Check aliases. Checks if one or more data stream or index aliases exist. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/indices-aliases.html | Elasticsearch API documentation} + * Check aliases. Check if one or more data stream or index aliases exist. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-alias | Elasticsearch API documentation} */ async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsAlias (this: That, params: T.IndicesExistsAliasRequest | TB.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -776,7 +776,7 @@ export default class Indices { /** * Check index templates. Check whether index templates exist. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/index-templates.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-index-template | Elasticsearch API documentation} */ async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest | TB.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1062,7 +1062,7 @@ export default class Indices { } /** - * Get data stream lifecycles. Retrieves the data stream lifecycle configuration of one or more data streams. + * Get data stream lifecycles. Get the data stream lifecycle configuration of one or more data streams. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/data-streams-get-lifecycle.html | Elasticsearch API documentation} */ async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest | TB.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1124,8 +1124,8 @@ export default class Indices { } /** - * Get data streams. Retrieves information about one or more data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/data-streams.html | Elasticsearch API documentation} + * Get data streams. Get information about one or more data streams. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/indices-get-data-stream.html | Elasticsearch API documentation} */ async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest | TB.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1951,17 +1951,18 @@ export default class Indices { } /** - * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. **Advantages of using this endpoint before a cross-cluster search** You may want to exclude a cluster or index from a search when: * A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. + * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. ## Note on backwards compatibility The ability to query without an index expression was added in version 8.18, so when querying remote clusters older than that, the local cluster will send the index expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference to that index expression even though you didn't request it. If it causes a problem, you can instead include an index expression like `*:*` to bypass the issue. ## Advantages of using this endpoint before a cross-cluster search You may want to exclude a cluster or index from a search when: * A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. ## Test availability of remote clusters The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. The remote cluster may be available, while the local cluster is not currently connected to it. You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. The `connected` field in the response will indicate whether it was successful. If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/indices-resolve-cluster-api.html | Elasticsearch API documentation} */ - async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> - async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise - async resolveCluster (this: That, params: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest | TB.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { const acceptedPath: string[] = ['name'] const querystring: Record = {} const body = undefined + params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue @@ -1971,8 +1972,15 @@ export default class Indices { } } - const method = 'GET' - const path = `/_resolve/cluster/${encodeURIComponent(params.name.toString())}` + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_resolve/cluster/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_resolve/cluster' + } const meta: TransportRequestMetadata = { name: 'indices.resolve_cluster', pathParts: { diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index 0ecbd491f..35cfc383a 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -129,7 +129,7 @@ export default class Inference { } /** - * Perform inference on the service + * Perform inference on the service. This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. > info > The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/post-inference-api.html | Elasticsearch API documentation} */ async inference (this: That, params: T.InferenceInferenceRequest | TB.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index dd67abcfc..9033633b5 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -46,7 +46,7 @@ export default class Ingest { /** * Delete GeoIP database configurations. Delete one or more IP geolocation database configurations. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/delete-geoip-database-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-delete-geoip-database | Elasticsearch API documentation} */ async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest | TB.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -172,7 +172,7 @@ export default class Ingest { /** * Get GeoIP database configurations. Get information about one or more IP geolocation database configurations. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/get-geoip-database-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-get-geoip-database | Elasticsearch API documentation} */ async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest | TB.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -322,7 +322,7 @@ export default class Ingest { /** * Create or update a GeoIP database configuration. Refer to the create or update IP geolocation database configuration API. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/put-geoip-database-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-ingest-put-geoip-database | Elasticsearch API documentation} */ async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest | TB.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index bdd3a0275..1343470db 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -39,8 +39,8 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/search-search.html | Elasticsearch API documentation} + * Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. A kNN search response has the exact same structure as a search API response. However, certain sections have a meaning specific to kNN search: * The document `_score` is determined by the similarity between the query and document vector. * The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/knn-search-api.html | Elasticsearch API documentation} */ export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest | TB.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/license.ts b/src/api/api/license.ts index 03656621d..de7a4d8e0 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -75,7 +75,7 @@ export default class License { } /** - * Get license information. Get information about your Elastic license including its type, its status, when it was issued, and when it expires. NOTE: If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. + * Get license information. Get information about your Elastic license including its type, its status, when it was issued, and when it expires. >info > If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. > If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/get-license.html | Elasticsearch API documentation} */ async get (this: That, params?: T.LicenseGetRequest | TB.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index 4a64fd66d..c5be2c343 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Get multiple documents. Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. + * Get multiple documents. Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. **Filter source fields** By default, the `_source` field is returned for every document (if stored). Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. **Get stored fields** Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. Any requested fields that are not stored are ignored. You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/docs-multi-get.html | Elasticsearch API documentation} */ export default async function MgetApi (this: That, params?: T.MgetRequest | TB.MgetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 7acdf854c..b59fa7ca2 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -121,7 +121,7 @@ export default class Ml { } /** - * Delete a calendar. Removes all scheduled events from a calendar, then deletes it. + * Delete a calendar. Remove all scheduled events from a calendar, then delete it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/ml-delete-calendar.html | Elasticsearch API documentation} */ async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -283,7 +283,7 @@ export default class Ml { } /** - * Delete expired ML data. Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . + * Delete expired ML data. Delete all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/ml-delete-expired-data.html | Elasticsearch API documentation} */ async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest | TB.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -537,8 +537,8 @@ export default class Ml { } /** - * Estimate job model memory usage. Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/ml-apis.html | Elasticsearch API documentation} + * Estimate job model memory usage. Make an estimation of the memory usage for an anomaly detection job model. The estimate is based on analysis configuration details for the job and cardinality estimates for the fields it references. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/ml-estimate-model-memory.html | Elasticsearch API documentation} */ async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -621,7 +621,7 @@ export default class Ml { /** * Explain data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. - * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/8.x/explain-dfanalytics.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/explain-dfanalytics.html | Elasticsearch API documentation} */ async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest | TB.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -1764,8 +1764,8 @@ export default class Ml { } /** - * Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. - * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/8.x/preview-dfanalytics.html | Elasticsearch API documentation} + * Preview features used by data frame analytics. Preview the extracted features used by a data frame analytics config. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/preview-dfanalytics.html | Elasticsearch API documentation} */ async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> @@ -2875,7 +2875,7 @@ export default class Ml { } /** - * Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. + * Upgrade a snapshot. Upgrade an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/ml-upgrade-job-model-snapshot.html | Elasticsearch API documentation} */ async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest | TB.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2951,7 +2951,7 @@ export default class Ml { /** * Validate an anomaly detection job. - * @see {@link https://www.elastic.co/guide/en/machine-learning/8.x/ml-jobs.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8 | Elasticsearch API documentation} */ async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptionsWithOutMeta): Promise async validateDetector (this: That, params: T.MlValidateDetectorRequest | TB.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index b584a8d1b..8a10dcea9 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -46,7 +46,7 @@ export default class Monitoring { /** * Send monitoring data. This API is used by the monitoring features to send monitoring data. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/monitor-elasticsearch-cluster.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v8 | Elasticsearch API documentation} */ async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise async bulk (this: That, params: T.MonitoringBulkRequest | TB.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/msearch_template.ts b/src/api/api/msearch_template.ts index 038cc6316..f77fcaa44 100644 --- a/src/api/api/msearch_template.ts +++ b/src/api/api/msearch_template.ts @@ -39,8 +39,8 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Run multiple templated searches. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/search-multi-search.html | Elasticsearch API documentation} + * Run multiple templated searches. Run multiple templated searches with a single request. If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. For example: ``` $ cat requests { "index": "my-index" } { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} { "index": "my-other-index" } { "id": "my-other-search-template", "params": { "query_type": "match_all" }} $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo ``` + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/multi-search-template.html | Elasticsearch API documentation} */ export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest | TB.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts index 64fb70d84..fa483319e 100644 --- a/src/api/api/mtermvectors.ts +++ b/src/api/api/mtermvectors.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Get multiple term vectors. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. + * Get multiple term vectors. Get multiple term vectors with a single request. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. **Artificial documents** You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. The mapping used is determined by the specified `_index`. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/docs-multi-termvectors.html | Elasticsearch API documentation} */ export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest | TB.MtermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 07c7bb38c..2ba3f899b 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -198,7 +198,7 @@ export default class Nodes { /** * Reload the keystore on nodes in the cluster. Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. That is, you can change them on disk and reload them without restarting any nodes in the cluster. When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/secure-settings.html#reloadable-secure-settings | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/cluster-nodes-reload-secure-settings.html | Elasticsearch API documentation} */ async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest | TB.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index d853b8064..bbdc56a90 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -40,7 +40,7 @@ interface That { transport: Transport } /** * Ping the cluster. Get information about whether the cluster is running. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/index.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/cluster.html | Elasticsearch API documentation} */ export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function PingApi (this: That, params?: T.PingRequest | TB.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index 0e0045f84..9645e9ba5 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -40,7 +40,7 @@ interface That { transport: Transport } /** * Create or update a script or search template. Creates or updates a stored script or search template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/modules-scripting.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/create-stored-script-api.html | Elasticsearch API documentation} */ export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function PutScriptApi (this: That, params: T.PutScriptRequest | TB.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index 7299c2927..a8da4a4ad 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -46,8 +46,8 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest | TB.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['file', 'params', 'source'] + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['id', 'file', 'params', 'source'] const querystring: Record = {} // @ts-expect-error const userBody: any = params?.body diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index f854c93e1..323402e5d 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Run a script. Runs a script and returns a result. + * Run a script. Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. Each context requires a script, but additional parameters depend on the context you're using for that script. * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/8.x/painless-execute-api.html | Elasticsearch API documentation} */ export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest | TB.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index 418415d00..ee06cbe41 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -40,7 +40,7 @@ interface That { transport: Transport } /** * Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/search-request-body.html#request-body-search-scroll | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/scroll-api.html | Elasticsearch API documentation} */ export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function ScrollApi> (this: That, params: T.ScrollRequest | TB.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/search_application.ts b/src/api/api/search_application.ts index a555e78d3..97ad1d6b9 100644 --- a/src/api/api/search_application.ts +++ b/src/api/api/search_application.ts @@ -182,7 +182,7 @@ export default class SearchApplication { /** * Get search applications. Get information about search applications. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/list-search-applications.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/list-analytics-collection.html | Elasticsearch API documentation} */ async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptionsWithOutMeta): Promise async list (this: That, params?: T.SearchApplicationListRequest | TB.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index c20a4699d..0a3628e40 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Search a vector tile. Search a vector tile for geospatial values. + * Search a vector tile. Search a vector tile for geospatial values. Before using this API, you should be familiar with the Mapbox vector tile specification. The API returns results as a binary mapbox vector tile. Internally, Elasticsearch translates a vector tile search API request into a search containing: * A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. * A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. * Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. * If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search ``` GET my-index/_search { "size": 10000, "query": { "geo_bounding_box": { "my-geo-field": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "aggregations": { "grid": { "geotile_grid": { "field": "my-geo-field", "precision": 11, "size": 65536, "bounds": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "bounds": { "geo_bounds": { "field": "my-geo-field", "wrap_longitude": false } } } } ``` The API returns results as a binary Mapbox vector tile. Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: * A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. * An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. * A meta layer containing: * A feature containing a bounding box. By default, this is the bounding box of the tile. * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. * Metadata for the search. The API only returns features that can display at its zoom level. For example, if a polygon feature has no area at its zoom level, the API omits it. The API returns errors as UTF-8 encoded JSON. IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. If you specify both parameters, the query parameter takes precedence. **Grid precision for geotile** For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. `grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. The maximum final precision is 29. The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. For example, a value of 8 divides the tile into a grid of 256 x 256 cells. The `aggs` layer only contains features for cells with matching data. **Grid precision for geohex** For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. The following table maps the H3 resolution for each precision. For example, if `` is 3 and `grid_precision` is 3, the precision is 6. At a precision of 6, hexagonal cells have an H3 resolution of 2. If `` is 3 and `grid_precision` is 4, the precision is 7. At a precision of 7, hexagonal cells have an H3 resolution of 3. | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | | --------- | ---------------- | ------------- | ----------------| ----- | | 1 | 4 | 0 | 122 | 30.5 | | 2 | 16 | 0 | 122 | 7.625 | | 3 | 64 | 1 | 842 | 13.15625 | | 4 | 256 | 1 | 842 | 3.2890625 | | 5 | 1024 | 2 | 5882 | 5.744140625 | | 6 | 4096 | 2 | 5882 | 1.436035156 | | 7 | 16384 | 3 | 41162 | 2.512329102 | | 8 | 65536 | 3 | 41162 | 0.6280822754 | | 9 | 262144 | 4 | 288122 | 1.099098206 | | 10 | 1048576 | 4 | 288122 | 0.2747745514 | | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | Hexagonal cells don't align perfectly on a vector tile. Some cells may intersect more than one vector tile. To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/search-vector-tile-api.html | Elasticsearch API documentation} */ export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest | TB.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index e97edb45f..254e37714 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the indices section. + * Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the `indices` section. If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/search-shards.html | Elasticsearch API documentation} */ export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest | TB.SearchShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index e7919043f..06d2e1889 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -40,7 +40,7 @@ interface That { transport: Transport } /** * Run a search with a search template. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/search-template.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/search-template-api.html | Elasticsearch API documentation} */ export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest | TB.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index 65fbacf26..fbf04e9f1 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Get terms in an index. Discover terms that match a partial string in an index. This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios. If the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate. This can occur due to a few reasons, such as a request timeout or a node error. NOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. + * Get terms in an index. Discover terms that match a partial string in an index. This API is designed for low-latency look-ups used in auto-complete scenarios. > info > The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/search-terms-enum.html | Elasticsearch API documentation} */ export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest | TB.TermsEnumRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index 4d1891713..56165c91c 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Get term vector information. Get information and statistics about terms in the fields of a particular document. + * Get term vector information. Get information and statistics about terms in the fields of a particular document. You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. For example: ``` GET /my-index-000001/_termvectors/1?fields=message ``` Fields can be specified using wildcards, similar to the multi match query. Term vectors are real-time by default, not near real-time. This can be changed by setting `realtime` parameter to `false`. You can request three types of values: _term information_, _term statistics_, and _field statistics_. By default, all term information and field statistics are returned for all fields but term statistics are excluded. **Term information** * term frequency in the field (always returned) * term positions (`positions: true`) * start and end offsets (`offsets: true`) * term payloads (`payloads: true`), as base64 encoded bytes If the requested information wasn't stored in the index, it will be computed on the fly if possible. Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. > warn > Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. **Behaviour** The term and field statistics are not accurate. Deleted documents are not taken into account. The information is only retrieved for the shard the requested document resides in. The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use `routing` only to hit a particular shard. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/docs-termvectors.html | Elasticsearch API documentation} */ export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest | TB.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index e29db39ef..cf409567f 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -45,7 +45,7 @@ export default class Transform { } /** - * Delete a transform. Deletes a transform. + * Delete a transform. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/delete-transform.html | Elasticsearch API documentation} */ async deleteTransform (this: That, params: T.TransformDeleteTransformRequest | TB.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -106,7 +106,7 @@ export default class Transform { } /** - * Get transforms. Retrieves configuration information for transforms. + * Get transforms. Get configuration information for transforms. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/get-transform.html | Elasticsearch API documentation} */ async getTransform (this: That, params?: T.TransformGetTransformRequest | TB.TransformGetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -146,7 +146,7 @@ export default class Transform { } /** - * Get transform stats. Retrieves usage information for transforms. + * Get transform stats. Get usage information for transforms. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/get-transform-stats.html | Elasticsearch API documentation} */ async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest | TB.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -274,7 +274,7 @@ export default class Transform { } /** - * Reset a transform. Resets a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. + * Reset a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/reset-transform.html | Elasticsearch API documentation} */ async resetTransform (this: That, params: T.TransformResetTransformRequest | TB.TransformResetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -306,7 +306,7 @@ export default class Transform { } /** - * Schedule a transform to start now. Instantly runs a transform to process data. If you _schedule_now a transform, it will process the new data instantly, without waiting for the configured frequency interval. After _schedule_now API is called, the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. + * Schedule a transform to start now. Instantly run a transform to process data. If you run this API, the transform will process the new data instantly, without waiting for the configured frequency interval. After the API is called, the transform will be processed again at `now + frequency` unless the API is called again in the meantime. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/schedule-now-transform.html | Elasticsearch API documentation} */ async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest | TB.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -338,7 +338,7 @@ export default class Transform { } /** - * Start a transform. Starts a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. + * Start a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/start-transform.html | Elasticsearch API documentation} */ async startTransform (this: That, params: T.TransformStartTransformRequest | TB.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts index dcbe48cea..2caa84fce 100644 --- a/src/api/api/update_by_query.ts +++ b/src/api/api/update_by_query.ts @@ -39,7 +39,7 @@ import * as TB from '../typesWithBodyKey' interface That { transport: Transport } /** - * Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. + * Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `index` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. **Throttling update requests** To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to turn off throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is 1000, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with slices will cancel each sub-request. * Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. * Update performance scales linearly across available resources with the number of slices. Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. **Update the document source** Update by query supports scripts to update the document source. As with the update API, you can set `ctx.op` to change the operation that is performed. Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. The update by query operation skips updating the document and increments the `noop` counter. Set `ctx.op = "delete"` if your script decides that the document should be deleted. The update by query operation deletes the document and increments the `deleted` counter. Update by query supports only `index`, `noop`, and `delete`. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. This API enables you to only modify the source of matching documents; you cannot move them. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/docs-update-by-query.html | Elasticsearch API documentation} */ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest | TB.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index 0ead66e3d..9161c3c42 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -40,7 +40,7 @@ interface That { transport: Transport } /** * Throttle an update by query operation. Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/docs-update-by-query.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/8.x/docs-update-by-query.html#docs-update-by-query-rethrottle | Elasticsearch API documentation} */ export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest | TB.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> diff --git a/src/api/types.ts b/src/api/types.ts index 50e060835..c04052ea4 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1119,6 +1119,8 @@ export interface RenderSearchTemplateResponse { template_output: Record } +export type ScriptsPainlessExecutePainlessContext = 'painless_test' | 'filter' | 'score' | 'boolean_field' | 'date_field' | 'double_field' | 'geo_point_field' | 'ip_field' | 'keyword_field' | 'long_field' | 'composite_field' + export interface ScriptsPainlessExecutePainlessContextSetup { document: any index: IndexName @@ -1126,7 +1128,7 @@ export interface ScriptsPainlessExecutePainlessContextSetup { } export interface ScriptsPainlessExecuteRequest extends RequestBase { - context?: string + context?: ScriptsPainlessExecutePainlessContext context_setup?: ScriptsPainlessExecutePainlessContextSetup script?: Script | string } @@ -2243,7 +2245,7 @@ export interface ErrorResponseBase { status: integer } -export type EsqlColumns = ArrayBuffer +export type EsqlResult = ArrayBuffer export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' @@ -5663,6 +5665,7 @@ export interface MappingPropertyBase { ignore_above?: integer dynamic?: MappingDynamicMapping fields?: Record + synthetic_source_keep?: MappingSyntheticSourceKeepEnum } export interface MappingRangePropertyBase extends MappingDocValuesPropertyBase { @@ -5769,6 +5772,8 @@ export interface MappingSuggestContext { precision?: integer | string } +export type MappingSyntheticSourceKeepEnum = 'none' | 'arrays' | 'all' + export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' | 'with_positions_payloads' export interface MappingTextIndexPrefixes { @@ -8933,6 +8938,7 @@ export interface ClusterComponentTemplateNode { template: ClusterComponentTemplateSummary version?: VersionNumber _meta?: Metadata + deprecated?: boolean } export interface ClusterComponentTemplateSummary { @@ -9249,6 +9255,7 @@ export interface ClusterRemoteInfoClusterRemoteProxyInfo { server_name: string num_proxy_sockets_connected: integer max_proxy_socket_connections: integer + cluster_credentials?: string } export interface ClusterRemoteInfoClusterRemoteSniffInfo { @@ -10343,8 +10350,6 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase> } -export interface EsqlAsyncQueryResponse { - columns?: EsqlColumns - id?: string - is_running: boolean -} +export type EsqlAsyncQueryResponse = EsqlResult export interface EsqlAsyncQueryDeleteRequest extends RequestBase { id: Id @@ -10395,13 +10396,19 @@ export interface EsqlAsyncQueryGetRequest extends RequestBase { wait_for_completion_timeout?: Duration } -export interface EsqlAsyncQueryGetResponse { - columns?: EsqlColumns - is_running: boolean +export type EsqlAsyncQueryGetResponse = EsqlResult + +export interface EsqlAsyncQueryStopRequest extends RequestBase { + id: Id + drop_null_columns?: boolean } +export type EsqlAsyncQueryStopResponse = EsqlResult + +export type EsqlQueryEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow' + export interface EsqlQueryRequest extends RequestBase { - format?: EsqlEsqlFormat + format?: EsqlQueryEsqlFormat delimiter?: string drop_null_columns?: boolean columnar?: boolean @@ -10413,7 +10420,7 @@ export interface EsqlQueryRequest extends RequestBase { tables?: Record> } -export type EsqlQueryResponse = EsqlColumns +export type EsqlQueryResponse = EsqlResult export interface FeaturesFeature { name: string @@ -11173,7 +11180,8 @@ export interface IndicesMappingLimitSettings { nested_objects?: IndicesMappingLimitSettingsNestedObjects field_name_length?: IndicesMappingLimitSettingsFieldNameLength dimension_fields?: IndicesMappingLimitSettingsDimensionFields - ignore_malformed?: boolean + source?: IndicesMappingLimitSettingsSourceFields + ignore_malformed?: boolean | string } export interface IndicesMappingLimitSettingsDepth { @@ -11196,6 +11204,10 @@ export interface IndicesMappingLimitSettingsNestedObjects { limit?: long } +export interface IndicesMappingLimitSettingsSourceFields { + mode: IndicesSourceMode +} + export interface IndicesMappingLimitSettingsTotalFields { limit?: long | string ignore_dynamic_beyond_limit?: boolean | string @@ -11323,6 +11335,8 @@ export interface IndicesSoftDeletes { retention_lease?: IndicesRetentionLease } +export type IndicesSourceMode = 'DISABLED' | 'STORED' | 'SYNTHETIC' + export interface IndicesStorage { type: IndicesStorageType allow_mmap?: boolean @@ -12260,11 +12274,12 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult export interface IndicesResolveClusterRequest extends RequestBase { - name: Names + name?: Names allow_no_indices?: boolean expand_wildcards?: ExpandWildcards ignore_throttled?: boolean ignore_unavailable?: boolean + timeout?: Duration } export interface IndicesResolveClusterResolveClusterInfo { @@ -13759,6 +13774,8 @@ export interface MigrationDeprecationsResponse { data_streams: Record node_settings: MigrationDeprecationsDeprecation[] ml_settings: MigrationDeprecationsDeprecation[] + templates: Record + ilm_policies: Record } export interface MigrationGetFeatureUpgradeStatusMigrationFeature { @@ -19503,12 +19520,7 @@ export interface SnapshotRepositoryAnalyzeDetailsInfo { write_elapsed_nanos: DurationValue write_throttled: Duration write_throttled_nanos: DurationValue - writer_node: SnapshotRepositoryAnalyzeNodeInfo -} - -export interface SnapshotRepositoryAnalyzeNodeInfo { - id: Id - name: Name + writer_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo } export interface SnapshotRepositoryAnalyzeReadBlobDetails { @@ -19518,7 +19530,7 @@ export interface SnapshotRepositoryAnalyzeReadBlobDetails { first_byte_time?: Duration first_byte_time_nanos: DurationValue found: boolean - node: SnapshotRepositoryAnalyzeNodeInfo + node: SnapshotRepositoryAnalyzeSnapshotNodeInfo throttled?: Duration throttled_nanos?: DurationValue } @@ -19557,7 +19569,7 @@ export interface SnapshotRepositoryAnalyzeResponse { blob_count: integer blob_path: string concurrency: integer - coordinating_node: SnapshotRepositoryAnalyzeNodeInfo + coordinating_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo delete_elapsed: Duration delete_elapsed_nanos: DurationValue details: SnapshotRepositoryAnalyzeDetailsInfo @@ -19576,6 +19588,11 @@ export interface SnapshotRepositoryAnalyzeResponse { summary: SnapshotRepositoryAnalyzeSummaryInfo } +export interface SnapshotRepositoryAnalyzeSnapshotNodeInfo { + id: Id + name: Name +} + export interface SnapshotRepositoryAnalyzeSummaryInfo { read: SnapshotRepositoryAnalyzeReadSummaryInfo write: SnapshotRepositoryAnalyzeWriteSummaryInfo @@ -20463,15 +20480,15 @@ export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursd export interface WatcherEmail { id?: Id - bcc?: string[] + bcc?: string | string[] body?: WatcherEmailBody - cc?: string[] + cc?: string | string[] from?: string priority?: WatcherEmailPriority - reply_to?: string[] + reply_to?: string | string[] sent_date?: DateTime subject: string - to: string[] + to: string | string[] attachments?: Record } diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 5f44fcb67..0d2a557ad 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -1157,6 +1157,7 @@ export interface RenderSearchTemplateRequest extends RequestBase { id?: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { + id?: Id file?: string params?: Record source?: string @@ -1167,6 +1168,8 @@ export interface RenderSearchTemplateResponse { template_output: Record } +export type ScriptsPainlessExecutePainlessContext = 'painless_test' | 'filter' | 'score' | 'boolean_field' | 'date_field' | 'double_field' | 'geo_point_field' | 'ip_field' | 'keyword_field' | 'long_field' | 'composite_field' + export interface ScriptsPainlessExecutePainlessContextSetup { document: any index: IndexName @@ -1176,7 +1179,7 @@ export interface ScriptsPainlessExecutePainlessContextSetup { export interface ScriptsPainlessExecuteRequest extends RequestBase { /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - context?: string + context?: ScriptsPainlessExecutePainlessContext context_setup?: ScriptsPainlessExecutePainlessContextSetup script?: Script | string } @@ -2319,7 +2322,7 @@ export interface ErrorResponseBase { status: integer } -export type EsqlColumns = ArrayBuffer +export type EsqlResult = ArrayBuffer export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' @@ -5739,6 +5742,7 @@ export interface MappingPropertyBase { ignore_above?: integer dynamic?: MappingDynamicMapping fields?: Record + synthetic_source_keep?: MappingSyntheticSourceKeepEnum } export interface MappingRangePropertyBase extends MappingDocValuesPropertyBase { @@ -5845,6 +5849,8 @@ export interface MappingSuggestContext { precision?: integer | string } +export type MappingSyntheticSourceKeepEnum = 'none' | 'arrays' | 'all' + export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' | 'with_positions_payloads' export interface MappingTextIndexPrefixes { @@ -9025,6 +9031,7 @@ export interface ClusterComponentTemplateNode { template: ClusterComponentTemplateSummary version?: VersionNumber _meta?: Metadata + deprecated?: boolean } export interface ClusterComponentTemplateSummary { @@ -9350,6 +9357,7 @@ export interface ClusterRemoteInfoClusterRemoteProxyInfo { server_name: string num_proxy_sockets_connected: integer max_proxy_socket_connections: integer + cluster_credentials?: string } export interface ClusterRemoteInfoClusterRemoteSniffInfo { @@ -10513,8 +10521,6 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase node_settings: MigrationDeprecationsDeprecation[] ml_settings: MigrationDeprecationsDeprecation[] + templates: Record + ilm_policies: Record } export interface MigrationGetFeatureUpgradeStatusMigrationFeature { @@ -20028,12 +20046,7 @@ export interface SnapshotRepositoryAnalyzeDetailsInfo { write_elapsed_nanos: DurationValue write_throttled: Duration write_throttled_nanos: DurationValue - writer_node: SnapshotRepositoryAnalyzeNodeInfo -} - -export interface SnapshotRepositoryAnalyzeNodeInfo { - id: Id - name: Name + writer_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo } export interface SnapshotRepositoryAnalyzeReadBlobDetails { @@ -20043,7 +20056,7 @@ export interface SnapshotRepositoryAnalyzeReadBlobDetails { first_byte_time?: Duration first_byte_time_nanos: DurationValue found: boolean - node: SnapshotRepositoryAnalyzeNodeInfo + node: SnapshotRepositoryAnalyzeSnapshotNodeInfo throttled?: Duration throttled_nanos?: DurationValue } @@ -20082,7 +20095,7 @@ export interface SnapshotRepositoryAnalyzeResponse { blob_count: integer blob_path: string concurrency: integer - coordinating_node: SnapshotRepositoryAnalyzeNodeInfo + coordinating_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo delete_elapsed: Duration delete_elapsed_nanos: DurationValue details: SnapshotRepositoryAnalyzeDetailsInfo @@ -20101,6 +20114,11 @@ export interface SnapshotRepositoryAnalyzeResponse { summary: SnapshotRepositoryAnalyzeSummaryInfo } +export interface SnapshotRepositoryAnalyzeSnapshotNodeInfo { + id: Id + name: Name +} + export interface SnapshotRepositoryAnalyzeSummaryInfo { read: SnapshotRepositoryAnalyzeReadSummaryInfo write: SnapshotRepositoryAnalyzeWriteSummaryInfo @@ -21022,15 +21040,15 @@ export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursd export interface WatcherEmail { id?: Id - bcc?: string[] + bcc?: string | string[] body?: WatcherEmailBody - cc?: string[] + cc?: string | string[] from?: string priority?: WatcherEmailPriority - reply_to?: string[] + reply_to?: string | string[] sent_date?: DateTime subject: string - to: string[] + to: string | string[] attachments?: Record }