From 61218d06549db29ea72a748afa37c97ac7066b78 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Mon, 3 Feb 2025 23:41:24 +0400 Subject: [PATCH 1/2] Add include_source_on_error to bulk API --- output/openapi/elasticsearch-openapi.json | 22 +++++++++++++++++++ .../elasticsearch-serverless-openapi.json | 22 +++++++++++++++++++ output/schema/schema-serverless.json | 15 ++++++++++++- output/schema/schema.json | 15 ++++++++++++- output/typescript/types.ts | 1 + specification/_global/bulk/BulkRequest.ts | 5 +++++ specification/_json_spec/bulk.json | 4 ++++ 7 files changed, 82 insertions(+), 2 deletions(-) diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 4f256bf8dd..1c05b8c9e0 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -689,6 +689,9 @@ "description": "Perform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "operationId": "bulk-1", "parameters": [ + { + "$ref": "#/components/parameters/bulk#include_source_on_error" + }, { "$ref": "#/components/parameters/bulk#list_executed_pipelines" }, @@ -740,6 +743,9 @@ "description": "Perform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "operationId": "bulk", "parameters": [ + { + "$ref": "#/components/parameters/bulk#include_source_on_error" + }, { "$ref": "#/components/parameters/bulk#list_executed_pipelines" }, @@ -796,6 +802,9 @@ { "$ref": "#/components/parameters/bulk#index" }, + { + "$ref": "#/components/parameters/bulk#include_source_on_error" + }, { "$ref": "#/components/parameters/bulk#list_executed_pipelines" }, @@ -850,6 +859,9 @@ { "$ref": "#/components/parameters/bulk#index" }, + { + "$ref": "#/components/parameters/bulk#include_source_on_error" + }, { "$ref": "#/components/parameters/bulk#list_executed_pipelines" }, @@ -98594,6 +98606,16 @@ }, "style": "simple" }, + "bulk#include_source_on_error": { + "in": "query", + "name": "include_source_on_error", + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "deprecated": false, + "schema": { + "type": "boolean" + }, + "style": "form" + }, "bulk#list_executed_pipelines": { "in": "query", "name": "list_executed_pipelines", diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index 66335414f0..77d4c2bb8e 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -467,6 +467,9 @@ "description": "Perform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "operationId": "bulk-1", "parameters": [ + { + "$ref": "#/components/parameters/bulk#include_source_on_error" + }, { "$ref": "#/components/parameters/bulk#list_executed_pipelines" }, @@ -518,6 +521,9 @@ "description": "Perform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "operationId": "bulk", "parameters": [ + { + "$ref": "#/components/parameters/bulk#include_source_on_error" + }, { "$ref": "#/components/parameters/bulk#list_executed_pipelines" }, @@ -574,6 +580,9 @@ { "$ref": "#/components/parameters/bulk#index" }, + { + "$ref": "#/components/parameters/bulk#include_source_on_error" + }, { "$ref": "#/components/parameters/bulk#list_executed_pipelines" }, @@ -628,6 +637,9 @@ { "$ref": "#/components/parameters/bulk#index" }, + { + "$ref": "#/components/parameters/bulk#include_source_on_error" + }, { "$ref": "#/components/parameters/bulk#list_executed_pipelines" }, @@ -58505,6 +58517,16 @@ }, "style": "simple" }, + "bulk#include_source_on_error": { + "in": "query", + "name": "include_source_on_error", + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "deprecated": false, + "schema": { + "type": "boolean" + }, + "style": "form" + }, "bulk#list_executed_pipelines": { "in": "query", "name": "list_executed_pipelines", diff --git a/output/schema/schema-serverless.json b/output/schema/schema-serverless.json index 5b54d28ea9..dd32dfb246 100644 --- a/output/schema/schema-serverless.json +++ b/output/schema/schema-serverless.json @@ -11954,6 +11954,19 @@ } ], "query": [ + { + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "name": "include_source_on_error", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, { "description": "If `true`, the response will include the ingest pipelines that were run for each index or create.", "name": "list_executed_pipelines", @@ -12093,7 +12106,7 @@ } } ], - "specLocation": "_global/bulk/BulkRequest.ts#L32-L242" + "specLocation": "_global/bulk/BulkRequest.ts#L32-L247" }, { "body": { diff --git a/output/schema/schema.json b/output/schema/schema.json index 1a0df1b9c3..195fabbd6e 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -22781,6 +22781,19 @@ } ], "query": [ + { + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "name": "include_source_on_error", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, { "description": "If `true`, the response will include the ingest pipelines that were run for each index or create.", "name": "list_executed_pipelines", @@ -22920,7 +22933,7 @@ } } ], - "specLocation": "_global/bulk/BulkRequest.ts#L32-L242" + "specLocation": "_global/bulk/BulkRequest.ts#L32-L247" }, { "kind": "response", diff --git a/output/typescript/types.ts b/output/typescript/types.ts index c9453d8e8d..98ab0b27e9 100644 --- a/output/typescript/types.ts +++ b/output/typescript/types.ts @@ -47,6 +47,7 @@ export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' export interface BulkRequest extends RequestBase { index?: IndexName + include_source_on_error?: boolean list_executed_pipelines?: boolean pipeline?: string refresh?: Refresh diff --git a/specification/_global/bulk/BulkRequest.ts b/specification/_global/bulk/BulkRequest.ts index cd493230be..3d8b2aa997 100644 --- a/specification/_global/bulk/BulkRequest.ts +++ b/specification/_global/bulk/BulkRequest.ts @@ -165,6 +165,11 @@ export interface Request extends RequestBase { index?: IndexName } query_parameters: { + /** + * True or false if to include the document source in the error message in case of parsing errors. + * @server_default false + */ + include_source_on_error?: boolean /** * If `true`, the response will include the ingest pipelines that were run for each index or create. * @server_default false diff --git a/specification/_json_spec/bulk.json b/specification/_json_spec/bulk.json index 3444e1c017..8e35c3fbf1 100644 --- a/specification/_json_spec/bulk.json +++ b/specification/_json_spec/bulk.json @@ -73,6 +73,10 @@ "list_executed_pipelines": { "type": "boolean", "description": "Sets list_executed_pipelines for all incoming documents. Defaults to unset (false)" + }, + "include_source_on_error": { + "type": "boolean", + "description": "True or false if to include the document source in the error message in case of parsing errors. Defaults to true." } }, "body": { From bd17f6b403771f589560879435cfce2195f9813e Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Thu, 6 Feb 2025 11:45:45 +0400 Subject: [PATCH 2/2] Add create, index and update --- output/openapi/elasticsearch-openapi.json | 45 +++++++++++++++++++ .../elasticsearch-serverless-openapi.json | 45 +++++++++++++++++++ output/schema/schema-serverless.json | 45 +++++++++++++++++-- output/schema/schema.json | 45 +++++++++++++++++-- output/typescript/types.ts | 3 ++ specification/_global/create/CreateRequest.ts | 5 +++ specification/_global/index/IndexRequest.ts | 5 +++ specification/_global/update/UpdateRequest.ts | 5 +++ specification/_json_spec/create.json | 4 ++ specification/_json_spec/index.json | 4 ++ specification/_json_spec/update.json | 4 ++ 11 files changed, 204 insertions(+), 6 deletions(-) diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 1c05b8c9e0..f2f08b8861 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -6752,6 +6752,9 @@ { "$ref": "#/components/parameters/create#id" }, + { + "$ref": "#/components/parameters/create#include_source_on_error" + }, { "$ref": "#/components/parameters/create#pipeline" }, @@ -6801,6 +6804,9 @@ { "$ref": "#/components/parameters/create#id" }, + { + "$ref": "#/components/parameters/create#include_source_on_error" + }, { "$ref": "#/components/parameters/create#pipeline" }, @@ -7178,6 +7184,9 @@ { "$ref": "#/components/parameters/index#if_seq_no" }, + { + "$ref": "#/components/parameters/index#include_source_on_error" + }, { "$ref": "#/components/parameters/index#op_type" }, @@ -7238,6 +7247,9 @@ { "$ref": "#/components/parameters/index#if_seq_no" }, + { + "$ref": "#/components/parameters/index#include_source_on_error" + }, { "$ref": "#/components/parameters/index#op_type" }, @@ -11276,6 +11288,9 @@ { "$ref": "#/components/parameters/index#if_seq_no" }, + { + "$ref": "#/components/parameters/index#include_source_on_error" + }, { "$ref": "#/components/parameters/index#op_type" }, @@ -38399,6 +38414,16 @@ }, "style": "form" }, + { + "in": "query", + "name": "include_source_on_error", + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "deprecated": false, + "schema": { + "type": "boolean" + }, + "style": "form" + }, { "in": "query", "name": "lang", @@ -100112,6 +100137,16 @@ }, "style": "simple" }, + "create#include_source_on_error": { + "in": "query", + "name": "include_source_on_error", + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "deprecated": false, + "schema": { + "type": "boolean" + }, + "style": "form" + }, "create#pipeline": { "in": "query", "name": "pipeline", @@ -101259,6 +101294,16 @@ }, "style": "form" }, + "index#include_source_on_error": { + "in": "query", + "name": "include_source_on_error", + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "deprecated": false, + "schema": { + "type": "boolean" + }, + "style": "form" + }, "index#op_type": { "in": "query", "name": "op_type", diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index 77d4c2bb8e..835043f090 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -3482,6 +3482,9 @@ { "$ref": "#/components/parameters/create#id" }, + { + "$ref": "#/components/parameters/create#include_source_on_error" + }, { "$ref": "#/components/parameters/create#pipeline" }, @@ -3531,6 +3534,9 @@ { "$ref": "#/components/parameters/create#id" }, + { + "$ref": "#/components/parameters/create#include_source_on_error" + }, { "$ref": "#/components/parameters/create#pipeline" }, @@ -3732,6 +3738,9 @@ { "$ref": "#/components/parameters/index#if_seq_no" }, + { + "$ref": "#/components/parameters/index#include_source_on_error" + }, { "$ref": "#/components/parameters/index#op_type" }, @@ -3792,6 +3801,9 @@ { "$ref": "#/components/parameters/index#if_seq_no" }, + { + "$ref": "#/components/parameters/index#include_source_on_error" + }, { "$ref": "#/components/parameters/index#op_type" }, @@ -5937,6 +5949,9 @@ { "$ref": "#/components/parameters/index#if_seq_no" }, + { + "$ref": "#/components/parameters/index#include_source_on_error" + }, { "$ref": "#/components/parameters/index#op_type" }, @@ -19870,6 +19885,16 @@ }, "style": "form" }, + { + "in": "query", + "name": "include_source_on_error", + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "deprecated": false, + "schema": { + "type": "boolean" + }, + "style": "form" + }, { "in": "query", "name": "lang", @@ -59393,6 +59418,16 @@ }, "style": "simple" }, + "create#include_source_on_error": { + "in": "query", + "name": "include_source_on_error", + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "deprecated": false, + "schema": { + "type": "boolean" + }, + "style": "form" + }, "create#pipeline": { "in": "query", "name": "pipeline", @@ -59882,6 +59917,16 @@ }, "style": "form" }, + "index#include_source_on_error": { + "in": "query", + "name": "include_source_on_error", + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "deprecated": false, + "schema": { + "type": "boolean" + }, + "style": "form" + }, "index#op_type": { "in": "query", "name": "op_type", diff --git a/output/schema/schema-serverless.json b/output/schema/schema-serverless.json index dd32dfb246..6de411e263 100644 --- a/output/schema/schema-serverless.json +++ b/output/schema/schema-serverless.json @@ -16324,6 +16324,19 @@ } ], "query": [ + { + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "name": "include_source_on_error", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, { "description": "The ID of the pipeline to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request.\nIf a final pipeline is configured, it will always run regardless of the value of this parameter.", "name": "pipeline", @@ -16412,7 +16425,7 @@ } } ], - "specLocation": "_global/create/CreateRequest.ts#L32-L184" + "specLocation": "_global/create/CreateRequest.ts#L32-L189" }, { "body": { @@ -20376,6 +20389,19 @@ } } }, + { + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "name": "include_source_on_error", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, { "description": "Set to `create` to only index the document if it does not already exist (put if absent).\nIf a document with the specified `_id` already exists, the indexing operation will fail.\nThe behavior is the same as using the `/_create` endpoint.\nIf a document ID is specified, this paramater defaults to `index`.\nOtherwise, it defaults to `create`.\nIf the request targets a data stream, an `op_type` of `create` is required.", "name": "op_type", @@ -20489,7 +20515,7 @@ } } ], - "specLocation": "_global/index/IndexRequest.ts#L35-L263" + "specLocation": "_global/index/IndexRequest.ts#L35-L268" }, { "body": { @@ -45344,6 +45370,19 @@ } } }, + { + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "name": "include_source_on_error", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, { "description": "The script language.", "name": "lang", @@ -45472,7 +45511,7 @@ } } ], - "specLocation": "_global/update/UpdateRequest.ts#L38-L189" + "specLocation": "_global/update/UpdateRequest.ts#L38-L194" }, { "body": { diff --git a/output/schema/schema.json b/output/schema/schema.json index 195fabbd6e..3b94eb2ccc 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -23948,6 +23948,19 @@ } ], "query": [ + { + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "name": "include_source_on_error", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, { "description": "The ID of the pipeline to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request.\nIf a final pipeline is configured, it will always run regardless of the value of this parameter.", "name": "pipeline", @@ -24036,7 +24049,7 @@ } } ], - "specLocation": "_global/create/CreateRequest.ts#L32-L184" + "specLocation": "_global/create/CreateRequest.ts#L32-L189" }, { "kind": "response", @@ -28883,6 +28896,19 @@ } } }, + { + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "name": "include_source_on_error", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, { "description": "Set to `create` to only index the document if it does not already exist (put if absent).\nIf a document with the specified `_id` already exists, the indexing operation will fail.\nThe behavior is the same as using the `/_create` endpoint.\nIf a document ID is specified, this paramater defaults to `index`.\nOtherwise, it defaults to `create`.\nIf the request targets a data stream, an `op_type` of `create` is required.", "name": "op_type", @@ -28996,7 +29022,7 @@ } } ], - "specLocation": "_global/index/IndexRequest.ts#L35-L263" + "specLocation": "_global/index/IndexRequest.ts#L35-L268" }, { "kind": "response", @@ -43708,6 +43734,19 @@ } } }, + { + "description": "True or false if to include the document source in the error message in case of parsing errors.", + "name": "include_source_on_error", + "required": false, + "serverDefault": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, { "description": "The script language.", "name": "lang", @@ -43836,7 +43875,7 @@ } } ], - "specLocation": "_global/update/UpdateRequest.ts#L38-L189" + "specLocation": "_global/update/UpdateRequest.ts#L38-L194" }, { "kind": "response", diff --git a/output/typescript/types.ts b/output/typescript/types.ts index 98ab0b27e9..3bc9ab18a2 100644 --- a/output/typescript/types.ts +++ b/output/typescript/types.ts @@ -156,6 +156,7 @@ export interface CountResponse { export interface CreateRequest extends RequestBase { id: Id index: IndexName + include_source_on_error?: boolean pipeline?: string refresh?: Refresh routing?: Routing @@ -654,6 +655,7 @@ export interface IndexRequest extends RequestBase { index: IndexName if_primary_term?: long if_seq_no?: SequenceNumber + include_source_on_error?: boolean op_type?: OpType pipeline?: string refresh?: Refresh @@ -2059,6 +2061,7 @@ export interface UpdateRequest index: IndexName if_primary_term?: long if_seq_no?: SequenceNumber + include_source_on_error?: boolean lang?: string refresh?: Refresh require_alias?: boolean diff --git a/specification/_global/create/CreateRequest.ts b/specification/_global/create/CreateRequest.ts index 9613d0abfa..596c47d552 100644 --- a/specification/_global/create/CreateRequest.ts +++ b/specification/_global/create/CreateRequest.ts @@ -131,6 +131,11 @@ export interface Request extends RequestBase { index: IndexName } query_parameters: { + /** + * True or false if to include the document source in the error message in case of parsing errors. + * @server_default false + */ + include_source_on_error?: boolean /** * The ID of the pipeline to use to preprocess incoming documents. * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. diff --git a/specification/_global/index/IndexRequest.ts b/specification/_global/index/IndexRequest.ts index 7fc9250337..0a937425cb 100644 --- a/specification/_global/index/IndexRequest.ts +++ b/specification/_global/index/IndexRequest.ts @@ -196,6 +196,11 @@ export interface Request extends RequestBase { * @ext_doc_id optimistic-concurrency */ if_seq_no?: SequenceNumber + /** + * True or false if to include the document source in the error message in case of parsing errors. + * @server_default false + */ + include_source_on_error?: boolean /** * Set to `create` to only index the document if it does not already exist (put if absent). * If a document with the specified `_id` already exists, the indexing operation will fail. diff --git a/specification/_global/update/UpdateRequest.ts b/specification/_global/update/UpdateRequest.ts index d61706c18f..97ca3eb94d 100644 --- a/specification/_global/update/UpdateRequest.ts +++ b/specification/_global/update/UpdateRequest.ts @@ -91,6 +91,11 @@ export interface Request extends RequestBase { * @ext_doc_id optimistic-concurrency */ if_seq_no?: SequenceNumber + /** + * True or false if to include the document source in the error message in case of parsing errors. + * @server_default false + */ + include_source_on_error?: boolean /** * The script language. * @server_default painless diff --git a/specification/_json_spec/create.json b/specification/_json_spec/create.json index 0b5f6dda34..ffa5e0f26e 100644 --- a/specification/_json_spec/create.json +++ b/specification/_json_spec/create.json @@ -58,6 +58,10 @@ "pipeline": { "type": "string", "description": "The pipeline id to preprocess incoming documents with" + }, + "include_source_on_error": { + "type": "boolean", + "description": "True or false if to include the document source in the error message in case of parsing errors. Defaults to true." } }, "body": { diff --git a/specification/_json_spec/index.json b/specification/_json_spec/index.json index e6a900e9d8..3c3df4f67a 100644 --- a/specification/_json_spec/index.json +++ b/specification/_json_spec/index.json @@ -89,6 +89,10 @@ "require_data_stream": { "type": "boolean", "description": "When true, requires the destination to be a data stream (existing or to-be-created). Default is false" + }, + "include_source_on_error": { + "type": "boolean", + "description": "True or false if to include the document source in the error message in case of parsing errors. Defaults to true." } }, "body": { diff --git a/specification/_json_spec/update.json b/specification/_json_spec/update.json index c95c11e8af..de6ce04872 100644 --- a/specification/_json_spec/update.json +++ b/specification/_json_spec/update.json @@ -77,6 +77,10 @@ "require_alias": { "type": "boolean", "description": "When true, requires destination is an alias. Default is false" + }, + "include_source_on_error": { + "type": "boolean", + "description": "True or false if to include the document source in the error message in case of parsing errors. Defaults to true." } }, "body": {