diff --git a/compiler-rs/Cargo.lock b/compiler-rs/Cargo.lock index abb9dc0567..96cf6aec4a 100644 --- a/compiler-rs/Cargo.lock +++ b/compiler-rs/Cargo.lock @@ -239,6 +239,7 @@ dependencies = [ "clients_schema_to_openapi", "console_error_panic_hook", "serde_json", + "tracing", "tracing-wasm", "wasm-bindgen", "wasm-bindgen-test", diff --git a/compiler-rs/clients_schema_to_openapi/src/components.rs b/compiler-rs/clients_schema_to_openapi/src/components.rs index 5826890f9f..973cdf8d81 100644 --- a/compiler-rs/clients_schema_to_openapi/src/components.rs +++ b/compiler-rs/clients_schema_to_openapi/src/components.rs @@ -17,7 +17,7 @@ use clients_schema::TypeName; use openapiv3::{Components, Parameter, ReferenceOr, RequestBody, Response, Schema, StatusCode}; - +use crate::Configuration; use crate::utils::SchemaName; // Separator used to combine parts of a component path. @@ -29,13 +29,14 @@ use crate::utils::SchemaName; pub const SEPARATOR: char = '-'; pub struct TypesAndComponents<'a> { + pub config: &'a Configuration, pub model: &'a clients_schema::IndexedModel, pub components: &'a mut Components, } impl<'a> TypesAndComponents<'a> { - pub fn new(model: &'a clients_schema::IndexedModel, components: &'a mut Components) -> TypesAndComponents<'a> { - TypesAndComponents { model, components } + pub fn new(config: &'a Configuration, model: &'a clients_schema::IndexedModel, components: &'a mut Components) -> TypesAndComponents<'a> { + TypesAndComponents { config, model, components } } pub fn add_request_body(&mut self, endpoint: &str, body: RequestBody) -> ReferenceOr { diff --git a/compiler-rs/clients_schema_to_openapi/src/lib.rs b/compiler-rs/clients_schema_to_openapi/src/lib.rs index 2f6048147e..34bbd5cf30 100644 --- a/compiler-rs/clients_schema_to_openapi/src/lib.rs +++ b/compiler-rs/clients_schema_to_openapi/src/lib.rs @@ -27,13 +27,27 @@ use openapiv3::{Components, OpenAPI}; use clients_schema::transform::ExpandConfig; use crate::components::TypesAndComponents; +pub struct Configuration { + pub flavor: Option, + pub lift_enum_descriptions: bool, +} + +impl Default for Configuration { + fn default() -> Self { + Self { + flavor: None, + lift_enum_descriptions: true, + } + } +} + /// Convert an API model into an OpenAPI v3 schema, optionally filtered for a given flavor -pub fn convert_schema(mut schema: IndexedModel, flavor: Option) -> anyhow::Result { +pub fn convert_schema(mut schema: IndexedModel, config: Configuration) -> anyhow::Result { // Expand generics schema = clients_schema::transform::expand_generics(schema, ExpandConfig::default())?; // Filter flavor - let filter: Option) -> bool> = match flavor { + let filter: Option) -> bool> = match config.flavor { None => None, Some(Flavor::Stack) => Some(|a| { // Generate only public items for Stack @@ -49,7 +63,7 @@ pub fn convert_schema(mut schema: IndexedModel, flavor: Option) -> anyho schema = clients_schema::transform::filter_availability(schema, filter)?; } - convert_expanded_schema(&schema) + convert_expanded_schema(&schema, &config) } /// Convert an API model into an OpenAPI v3 schema. The input model must have all generics expanded, conversion @@ -58,7 +72,7 @@ pub fn convert_schema(mut schema: IndexedModel, flavor: Option) -> anyho /// Note: there are ways to represent [generics in JSON Schema], but its unlikely that tooling will understand it. /// /// [generics in JSON Schema]: https://json-schema.org/blog/posts/dynamicref-and-generics -pub fn convert_expanded_schema(model: &IndexedModel) -> anyhow::Result { +pub fn convert_expanded_schema(model: &IndexedModel, config: &Configuration) -> anyhow::Result { let mut openapi = OpenAPI { openapi: "3.0.3".into(), info: info(model), @@ -87,7 +101,7 @@ pub fn convert_expanded_schema(model: &IndexedModel) -> anyhow::Result extensions: Default::default(), }; - let mut tac = TypesAndComponents::new(model, openapi.components.as_mut().unwrap()); + let mut tac = TypesAndComponents::new(config, model, openapi.components.as_mut().unwrap()); // Endpoints for endpoint in &model.endpoints { diff --git a/compiler-rs/clients_schema_to_openapi/src/main.rs b/compiler-rs/clients_schema_to_openapi/src/main.rs index 7f255be80a..2cf3193172 100644 --- a/compiler-rs/clients_schema_to_openapi/src/main.rs +++ b/compiler-rs/clients_schema_to_openapi/src/main.rs @@ -23,6 +23,7 @@ use clients_schema::Flavor; use tracing::Level; use tracing_subscriber::fmt::format::FmtSpan; use tracing_subscriber::FmtSubscriber; +use clients_schema_to_openapi::Configuration; fn main() -> anyhow::Result<()> { let cli = Cli::parse(); @@ -83,7 +84,12 @@ impl Cli { Some(SchemaFlavor::Serverless) => Some(Flavor::Serverless), }; - let openapi = clients_schema_to_openapi::convert_schema(model, flavor)?; + let config = Configuration { + flavor, + ..Default::default() + }; + + let openapi = clients_schema_to_openapi::convert_schema(model, config)?; let output: Box = { if let Some(output) = self.output { diff --git a/compiler-rs/clients_schema_to_openapi/src/paths.rs b/compiler-rs/clients_schema_to_openapi/src/paths.rs index 7ea9ed30c4..22cf4f7a4d 100644 --- a/compiler-rs/clients_schema_to_openapi/src/paths.rs +++ b/compiler-rs/clients_schema_to_openapi/src/paths.rs @@ -62,7 +62,7 @@ pub fn add_endpoint( fn parameter_data(prop: &Property, in_path: bool, tac: &mut TypesAndComponents) -> anyhow::Result { Ok(ParameterData { name: prop.name.clone(), - description: prop.description.clone(), + description: tac.property_description(prop)?, required: in_path || prop.required, // Path parameters are always required deprecated: Some(prop.deprecation.is_some()), format: ParameterSchemaOrContent::Schema(tac.convert_value_of(&prop.typ)?), diff --git a/compiler-rs/clients_schema_to_openapi/src/schemas.rs b/compiler-rs/clients_schema_to_openapi/src/schemas.rs index 210755056b..d7c47fabf0 100644 --- a/compiler-rs/clients_schema_to_openapi/src/schemas.rs +++ b/compiler-rs/clients_schema_to_openapi/src/schemas.rs @@ -15,18 +15,15 @@ // specific language governing permissions and limitations // under the License. +use std::fmt::Write; use anyhow::bail; -use clients_schema::{ - Body, Enum, Interface, LiteralValueValue, PropertiesBody, Property, Request, Response, TypeAlias, - TypeAliasVariants, TypeDefinition, TypeName, ValueOf, -}; +use clients_schema::{ArrayOf, Body, Enum, EnumMember, Interface, LiteralValueValue, PropertiesBody, Property, Request, Response, TypeAlias, TypeAliasVariants, TypeDefinition, TypeName, ValueOf}; use indexmap::IndexMap; use openapiv3::{ AdditionalProperties, ArrayType, Discriminator, ExternalDocumentation, NumberType, ObjectType, ReferenceOr, Schema, SchemaData, SchemaKind, StringType, Type, }; use openapiv3::SchemaKind::AnyOf; - use crate::components::TypesAndComponents; use crate::utils::{IntoSchema, ReferenceOrBoxed, SchemaName}; @@ -249,7 +246,7 @@ impl<'a> TypesAndComponents<'a> { let mut result = self.convert_value_of(&prop.typ)?; // TODO: how can we just wrap a reference so that we can add docs? if let ReferenceOr::Item(ref mut schema) = &mut result { - self.fill_data_with_prop(&mut schema.schema_data, prop); + self.fill_data_with_prop(&mut schema.schema_data, prop)?; } Ok(result) } @@ -468,15 +465,171 @@ impl<'a> TypesAndComponents<'a> { // TODO: base.codegen_names as extension? } - fn fill_data_with_prop(&self, data: &mut SchemaData, prop: &Property) { + fn fill_data_with_prop(&self, data: &mut SchemaData, prop: &Property) -> anyhow::Result<()> { data.external_docs = self.convert_external_docs(prop); data.deprecated = prop.deprecation.is_some(); - data.description = prop.description.clone(); + data.description = self.property_description(prop)?; data.extensions = crate::availability_as_extensions(&prop.availability); // TODO: prop.aliases as extensions // TODO: prop.server_default as extension // TODO: prop.doc_id as extension (new representation of since and stability) // TODO: prop.es_quirk as extension? // TODO: prop.codegen_name as extension? + + Ok(()) + } + + pub fn property_description(&self, prop: &Property) -> anyhow::Result> { + if self.config.lift_enum_descriptions { + Ok(lift_enum_descriptions(prop, &self.model)?.or_else(|| prop.description.clone())) + } else { + Ok(prop.description.clone()) + } + } +} + +/// Unwraps aliases from a value definition, recursively. +/// +/// Returns the end value definition of the alias chain or `None` if the value definition isn't an alias. +fn unwrap_alias<'a> (value: &ValueOf, model: &'a clients_schema::IndexedModel) -> anyhow::Result> { + let ValueOf::InstanceOf(io) = value else { + return Ok(None); + }; + + if io.typ.is_builtin() { + return Ok(None); + } + + let TypeDefinition::TypeAlias(alias) = model.get_type(&io.typ)? else { + return Ok(None); + }; + + // Try to unwrap further or else return the current alias + let result = match unwrap_alias(&alias.typ, model)? { + Some(alias_value) => Some(alias_value), + None => Some(&alias.typ), + }; + + Ok(result) +} + +/// Checks if a value_of is a lenient array definition (i.e. `Foo | Foo[]`) and +/// if successful, returns the value definition. +fn unwrap_lenient_array(value: &ValueOf) -> Option<&ValueOf> { + // Is this a union + let ValueOf::UnionOf(u) = value else { + return None + }; + + // of a value and array_of (in any order) + let (single_value, array_value) = match &u.items.as_slice() { + [v, ValueOf::ArrayOf(ao)] | + [ValueOf::ArrayOf(ao), v] => (v, &*ao.value), + _ => return None, + }; + + // and both value types are the same + if single_value == array_value { + return Some(single_value); + } + + None +} + +fn unwrap_array(value: &ValueOf) -> Option<&ValueOf> { + match value { + ValueOf::ArrayOf(ArrayOf { value }) => Some(value), + _ => None, + } +} + +/// If a property value is an enumeration (possibly via aliases and arrays) +fn lift_enum_descriptions(prop: &Property, model: &clients_schema::IndexedModel) -> anyhow::Result> { + + // FIXME: could be memoized on `prop.typ` as we'll redo this work every time we encounter the same value definition + let value = &prop.typ; + + // Maybe an alias pointing to an array or lenient array + let value = unwrap_alias(value, model)?.unwrap_or(value); + + // Unwrap lenient array + let (lenient_array, value) = match unwrap_lenient_array(value) { + Some(lenient_array) => (true, lenient_array), + None => (false, value), + }; + + // Unwrap array to get to the enum type + let value = unwrap_array(value).unwrap_or(value); + + // Unwrap aliases again, in case the array value was itself an alias + let value = unwrap_alias(value, model)?.unwrap_or(value); + + // Is this an enum? + let ValueOf::InstanceOf(inst) = value else { + return Ok(None); + }; + + if inst.typ.is_builtin() { + return Ok(None); } + + let TypeDefinition::Enum(enum_def) = model.get_type(&inst.typ)? else { + return Ok(None); + }; + + let mut result: String = match &prop.description { + Some(desc) => desc.clone(), + None => String::new(), + }; + + // Do we have at least one enum member description? + if enum_def.members.iter().any(|m| m.description.is_some()) { + // Some descriptions: output a list with descriptions + + // Close description paragraph and add an empty line to start a new paragraph + writeln!(result)?; + writeln!(result)?; + + writeln!(result, "Supported values include:")?; + for member in &enum_def.members { + write!(result, " - ")?; + value_and_aliases(&mut result, member)?; + if let Some(desc) = &member.description { + write!(result, ": {}", desc)?; + } + writeln!(result)?; + } + writeln!(result)?; + + } else { + // No description: inline list of values, only if this wasn't a lenient array. + // Otherwise (enum or enum array), bump.sh will correctly output a list of possible values. + if !lenient_array { + return Ok(None); + } + + // Close description paragraph and add an empty line to start a new paragraph + writeln!(result)?; + writeln!(result)?; + + write!(result, "Supported values include: ")?; + for (idx, member) in enum_def.members.iter().enumerate() { + if idx > 0 { + write!(result, ", ")?; + } + value_and_aliases(&mut result, member)?; + } + write!(result, "\n\n")?; + } + + fn value_and_aliases(out: &mut String, member: &EnumMember) -> anyhow::Result<()> { + write!(out, "`{}`", member.name)?; + if !member.aliases.is_empty() { + write!(out, " (or `{}`)", member.aliases.join("`, `"))?; + } + + Ok(()) + } + + Ok(Some(result)) } diff --git a/compiler-rs/compiler-wasm-lib/Cargo.toml b/compiler-rs/compiler-wasm-lib/Cargo.toml index 873da66fe2..3d162ddf8c 100644 --- a/compiler-rs/compiler-wasm-lib/Cargo.toml +++ b/compiler-rs/compiler-wasm-lib/Cargo.toml @@ -17,6 +17,7 @@ clients_schema = {path="../clients_schema"} clients_schema_to_openapi = {path="../clients_schema_to_openapi"} serde_json = { workspace = true } anyhow = { workspace = true } +tracing = "0.1" console_error_panic_hook = { workspace = true, optional = true } tracing-wasm = "0.2.1" diff --git a/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm b/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm index 60822dfbc7..55d7248b48 100644 Binary files a/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm and b/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm differ diff --git a/compiler-rs/compiler-wasm-lib/src/lib.rs b/compiler-rs/compiler-wasm-lib/src/lib.rs index 4402fcf0da..62ed5d3987 100644 --- a/compiler-rs/compiler-wasm-lib/src/lib.rs +++ b/compiler-rs/compiler-wasm-lib/src/lib.rs @@ -18,6 +18,7 @@ use anyhow::bail; use clients_schema::{Flavor, IndexedModel}; use wasm_bindgen::prelude::*; +use clients_schema_to_openapi::Configuration; #[wasm_bindgen] pub fn convert_schema_to_openapi(json: &str, flavor: &str) -> Result { @@ -33,8 +34,12 @@ fn convert0(json: &str, flavor: &str) -> anyhow::Result { _ => bail!("Unknown flavor {}", flavor), }; + let config = Configuration { + flavor, + ..Default::default() + }; let schema = IndexedModel::from_reader(json.as_bytes())?; - let openapi = clients_schema_to_openapi::convert_schema(schema, flavor)?; + let openapi = clients_schema_to_openapi::convert_schema(schema, config)?; let result = serde_json::to_string_pretty(&openapi)?; Ok(result) } diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 348b8368e4..52145a638a 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -4798,7 +4798,7 @@ { "in": "path", "name": "target", - "description": "Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest.", + "description": "Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest.\n\nSupported values include: `_all`, `http`, `ingest`, `thread_pool`, `script`\n\n", "required": true, "deprecated": false, "schema": { @@ -5930,7 +5930,7 @@ { "in": "query", "name": "job_type", - "description": "A comma-separated list of job types to fetch the sync jobs for", + "description": "A comma-separated list of job types to fetch the sync jobs for\n\nSupported values include: `full`, `incremental`, `access_control`\n\n", "deprecated": false, "schema": { "oneOf": [ @@ -7791,7 +7791,7 @@ { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -8048,7 +8048,7 @@ { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -8208,7 +8208,7 @@ { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -8279,7 +8279,7 @@ { "in": "query", "name": "conflicts", - "description": "What to do if delete by query hits version conflicts: `abort` or `proceed`.", + "description": "What to do if delete by query hits version conflicts: `abort` or `proceed`.\n\nSupported values include:\n - `abort`: Stop reindexing if there are conflicts.\n - `proceed`: Continue reindexing even if there are conflicts.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.Conflicts" @@ -8309,7 +8309,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -8449,7 +8449,7 @@ { "in": "query", "name": "search_type", - "description": "The type of the search operation.\nAvailable options include `query_then_fetch` and `dfs_query_then_fetch`.", + "description": "The type of the search operation.\nAvailable options include `query_then_fetch` and `dfs_query_then_fetch`.\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -9983,7 +9983,7 @@ { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -10120,7 +10120,7 @@ { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -12182,7 +12182,7 @@ { "in": "path", "name": "block", - "description": "The block type to add to the index.", + "description": "The block type to add to the index.\n\nSupported values include:\n - `metadata`: Disable metadata changes, such as closing the index.\n - `read`: Disable read operations.\n - `read_only`: Disable write operations and metadata changes.\n - `write`: Disable write operations. However, metadata changes are still allowed.\n\n", "required": true, "deprecated": false, "schema": { @@ -12203,7 +12203,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -12611,7 +12611,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -12731,7 +12731,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard expressions can match. If the request can target data streams, this argument\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\nsuch as open,hidden.", + "description": "Type of index that wildcard expressions can match. If the request can target data streams, this argument\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\nsuch as open,hidden.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -12791,7 +12791,7 @@ { "in": "query", "name": "features", - "description": "Return only information on specified index features", + "description": "Return only information on specified index features\n\nSupported values include: `aliases`, `mappings`, `settings`\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/indices.get.Features" @@ -12967,7 +12967,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -13050,7 +13050,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -13226,7 +13226,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`.", + "description": "Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -13613,7 +13613,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -13694,7 +13694,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `hidden`, `open`, `closed`, `none`.", + "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `hidden`, `open`, `closed`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -13796,7 +13796,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Whether wildcard expressions should get expanded to open or closed indices (default: open)", + "description": "Whether wildcard expressions should get expanded to open or closed indices (default: open)\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -14304,7 +14304,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -14593,7 +14593,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -15793,7 +15793,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -16236,7 +16236,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -21848,7 +21848,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -22481,7 +22481,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are:\n\n* `all`: Match any data stream or index, including hidden ones.\n* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both.\n* `none`: Wildcard patterns are not accepted.\n* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are:\n\n* `all`: Match any data stream or index, including hidden ones.\n* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both.\n* `none`: Wildcard patterns are not accepted.\n* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -26097,7 +26097,7 @@ { "in": "query", "name": "wait_for", - "description": "Specifies the allocation status to wait for before returning.", + "description": "Specifies the allocation status to wait for before returning.\n\nSupported values include:\n - `started`: The trained model is started on at least one node.\n - `starting`: Trained model deployment is starting but it is not yet deployed on any nodes.\n - `fully_allocated`: Trained model deployment has started on all valid nodes.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/ml._types.DeploymentAllocationState" @@ -26520,7 +26520,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are:\n\n* `all`: Match any data stream or index, including hidden ones.\n* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both.\n* `none`: Wildcard patterns are not accepted.\n* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are:\n\n* `all`: Match any data stream or index, including hidden ones.\n* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both.\n* `none`: Wildcard patterns are not accepted.\n* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -28601,7 +28601,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -37941,7 +37941,7 @@ { "in": "query", "name": "order", - "description": "The sort order.\nValid values are `asc` for ascending and `desc` for descending order.\nThe default behavior is ascending order.", + "description": "The sort order.\nValid values are `asc` for ascending and `desc` for descending order.\nThe default behavior is ascending order.\n\nSupported values include:\n - `asc`: Ascending (smallest to largest)\n - `desc`: Descending (largest to smallest)\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SortOrder" @@ -39918,7 +39918,7 @@ { "in": "query", "name": "group_by", - "description": "A key that is used to group tasks in the response.\nThe task lists can be grouped either by nodes or by parent tasks.", + "description": "A key that is used to group tasks in the response.\nThe task lists can be grouped either by nodes or by parent tasks.\n\nSupported values include:\n - `nodes`: Group tasks by node ID.\n - `parents`: Group tasks by parent task ID.\n - `none`: Do not group tasks.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/tasks._types.GroupBy" @@ -42242,7 +42242,7 @@ { "in": "query", "name": "conflicts", - "description": "The preferred behavior when update by query hits version conflicts: `abort` or `proceed`.", + "description": "The preferred behavior when update by query hits version conflicts: `abort` or `proceed`.\n\nSupported values include:\n - `abort`: Stop reindexing if there are conflicts.\n - `proceed`: Continue reindexing even if there are conflicts.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.Conflicts" @@ -42272,7 +42272,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -42422,7 +42422,7 @@ { "in": "query", "name": "search_type", - "description": "The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`.", + "description": "The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`.\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -52416,7 +52416,7 @@ "description": "Language value, such as _arabic_ or _thai_. Defaults to _english_.\nEach language value corresponds to a predefined list of stop words in Lucene. See Stop words by language for supported language values and their stop words.\nAlso accepts an array of stop words.", "oneOf": [ { - "type": "string" + "$ref": "#/components/schemas/_types.analysis.StopWordLanguage" }, { "type": "array", @@ -52426,6 +52426,48 @@ } ] }, + "_types.analysis.StopWordLanguage": { + "type": "string", + "enum": [ + "_arabic_", + "_armenian_", + "_basque_", + "_bengali_", + "_brazilian_", + "_bulgarian_", + "_catalan_", + "_cjk_", + "_czech_", + "_danish_", + "_dutch_", + "_english_", + "_estonian_", + "_finnish_", + "_french_", + "_galician_", + "_german_", + "_greek_", + "_hindi_", + "_hungarian_", + "_indonesian_", + "_irish_", + "_italian_", + "_latvian_", + "_lithuanian_", + "_norwegian_", + "_persian_", + "_portuguese_", + "_romanian_", + "_russian_", + "_serbian_", + "_sorani_", + "_spanish_", + "_swedish_", + "_thai_", + "_turkish_", + "_none_" + ] + }, "_types.query_dsl.MultiMatchQuery": { "allOf": [ { @@ -61029,6 +61071,7 @@ "$ref": "#/components/schemas/_types.Fields" }, "order": { + "description": "\n\nSupported values include: `asc` (or `ASC`), `desc` (or `DESC`)\n\n", "oneOf": [ { "$ref": "#/components/schemas/indices._types.SegmentSortOrder" @@ -61042,6 +61085,7 @@ ] }, "mode": { + "description": "\n\nSupported values include: `min` (or `MIN`), `max` (or `MAX`)\n\n", "oneOf": [ { "$ref": "#/components/schemas/indices._types.SegmentSortMode" @@ -61055,6 +61099,7 @@ ] }, "missing": { + "description": "\n\nSupported values include: `_last`, `_first`\n\n", "oneOf": [ { "$ref": "#/components/schemas/indices._types.SegmentSortMissing" @@ -61997,25 +62042,30 @@ "_types.analysis.SnowballLanguage": { "type": "string", "enum": [ + "Arabic", "Armenian", "Basque", "Catalan", "Danish", "Dutch", "English", + "Estonian", "Finnish", "French", "German", "German2", "Hungarian", "Italian", + "Irish", "Kp", + "Lithuanian", "Lovins", "Norwegian", "Porter", "Portuguese", "Romanian", "Russian", + "Serbian", "Spanish", "Swedish", "Turkish" @@ -63140,15 +63190,33 @@ "propertyName": "type" }, "oneOf": [ + { + "$ref": "#/components/schemas/_types.analysis.ApostropheTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.ArabicNormalizationTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.AsciiFoldingTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.CjkBigramTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.CjkWidthTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.ClassicTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.CommonGramsTokenFilter" }, { "$ref": "#/components/schemas/_types.analysis.ConditionTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.DecimalDigitTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.DelimitedPayloadTokenFilter" }, @@ -63161,12 +63229,24 @@ { "$ref": "#/components/schemas/_types.analysis.FingerprintTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.FlattenGraphTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.GermanNormalizationTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.HindiNormalizationTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.HunspellTokenFilter" }, { "$ref": "#/components/schemas/_types.analysis.HyphenationDecompounderTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.IndicNormalizationTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.KeepTypesTokenFilter" }, @@ -63176,6 +63256,9 @@ { "$ref": "#/components/schemas/_types.analysis.KeywordMarkerTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.KeywordRepeatTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.KStemTokenFilter" }, @@ -63188,6 +63271,9 @@ { "$ref": "#/components/schemas/_types.analysis.LowercaseTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.MinHashTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.MultiplexerTokenFilter" }, @@ -63203,6 +63289,9 @@ { "$ref": "#/components/schemas/_types.analysis.PatternReplaceTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.PersianNormalizationTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.PorterStemTokenFilter" }, @@ -63215,12 +63304,24 @@ { "$ref": "#/components/schemas/_types.analysis.ReverseTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.ScandinavianFoldingTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.ScandinavianNormalizationTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.SerbianNormalizationTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.ShingleTokenFilter" }, { "$ref": "#/components/schemas/_types.analysis.SnowballTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.SoraniNormalizationTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.StemmerOverrideTokenFilter" }, @@ -63254,6 +63355,9 @@ { "$ref": "#/components/schemas/_types.analysis.WordDelimiterTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.JaStopTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.KuromojiStemmerTokenFilter" }, @@ -63283,7 +63387,7 @@ } ] }, - "_types.analysis.AsciiFoldingTokenFilter": { + "_types.analysis.ApostropheTokenFilter": { "allOf": [ { "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" @@ -63294,11 +63398,8 @@ "type": { "type": "string", "enum": [ - "asciifolding" + "apostrophe" ] - }, - "preserve_original": { - "$ref": "#/components/schemas/_spec_utils.Stringifiedboolean" } }, "required": [ @@ -63315,6 +63416,134 @@ } } }, + "_types.analysis.ArabicNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "arabic_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.AsciiFoldingTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "asciifolding" + ] + }, + "preserve_original": { + "$ref": "#/components/schemas/_spec_utils.Stringifiedboolean" + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.CjkBigramTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "cjk_bigram" + ] + }, + "ignored_scripts": { + "description": "Array of character scripts for which to disable bigrams.", + "type": "array", + "items": { + "$ref": "#/components/schemas/_types.analysis.CjkBigramIgnoredScript" + } + }, + "output_unigrams": { + "description": "If `true`, emit tokens in both bigram and unigram form. If `false`, a CJK character is output in unigram form when it has no adjacent characters. Defaults to `false`.", + "type": "boolean" + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.CjkBigramIgnoredScript": { + "type": "string", + "enum": [ + "han", + "hangul", + "hiragana", + "katakana" + ] + }, + "_types.analysis.CjkWidthTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "cjk_width" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.ClassicTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "classic" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.CommonGramsTokenFilter": { "allOf": [ { @@ -63330,18 +63559,22 @@ ] }, "common_words": { + "description": "A list of tokens. The filter generates bigrams for these tokens.\nEither this or the `common_words_path` parameter is required.", "type": "array", "items": { "type": "string" } }, "common_words_path": { + "description": "Path to a file containing a list of tokens. The filter generates bigrams for these tokens.\nThis path must be absolute or relative to the `config` location. The file must be UTF-8 encoded. Each token in the file must be separated by a line break.\nEither this or the `common_words` parameter is required.", "type": "string" }, "ignore_case": { + "description": "If `true`, matches for common words matching are case-insensitive. Defaults to `false`.", "type": "boolean" }, "query_mode": { + "description": "If `true`, the filter excludes the following tokens from the output:\n- Unigrams for common words\n- Unigrams for terms followed by common words\nDefaults to `false`. We recommend enabling this parameter for search analyzers.", "type": "boolean" } }, @@ -63366,6 +63599,7 @@ ] }, "filter": { + "description": "Array of token filters. If a token matches the predicate script in the `script` parameter, these filters are applied to the token in the order provided.", "type": "array", "items": { "type": "string" @@ -63383,6 +63617,27 @@ } ] }, + "_types.analysis.DecimalDigitTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "decimal_digit" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.DelimitedPayloadTokenFilter": { "allOf": [ { @@ -63398,6 +63653,7 @@ ] }, "delimiter": { + "description": "Character used to separate tokens from payloads. Defaults to `|`.", "type": "string" }, "encoding": { @@ -63433,9 +63689,11 @@ ] }, "max_gram": { + "description": "Maximum character length of a gram. For custom token filters, defaults to `2`. For the built-in edge_ngram filter, defaults to `1`.", "type": "number" }, "min_gram": { + "description": "Minimum character length of a gram. Defaults to `1`.", "type": "number" }, "side": { @@ -63473,12 +63731,14 @@ ] }, "articles": { + "description": "List of elisions to remove.\nTo be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed.\nFor custom `elision` filters, either this parameter or `articles_path` must be specified.", "type": "array", "items": { "type": "string" } }, "articles_path": { + "description": "Path to a file that contains a list of elisions to remove.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each elision in the file must be separated by a line break.\nTo be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed.\nFor custom `elision` filters, either this parameter or `articles` must be specified.", "type": "string" }, "articles_case": { @@ -63506,9 +63766,11 @@ ] }, "max_output_size": { + "description": "Maximum character length, including whitespace, of the output token. Defaults to `255`. Concatenated tokens longer than this will result in no token output.", "type": "number" }, "separator": { + "description": "Character to use to concatenate the token stream input. Defaults to a space.", "type": "string" } }, @@ -63518,6 +63780,69 @@ } ] }, + "_types.analysis.FlattenGraphTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "flatten_graph" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.GermanNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "german_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.HindiNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "hindi_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.HunspellTokenFilter": { "allOf": [ { @@ -63533,15 +63858,19 @@ ] }, "dedup": { + "description": "If `true`, duplicate tokens are removed from the filter’s output. Defaults to `true`.", "type": "boolean" }, "dictionary": { + "description": "One or more `.dic` files (e.g, `en_US.dic`, my_custom.dic) to use for the Hunspell dictionary.\nBy default, the `hunspell` filter uses all `.dic` files in the `<$ES_PATH_CONF>/hunspell/` directory specified using the `lang`, `language`, or `locale` parameter.", "type": "string" }, "locale": { + "description": "Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary.", "type": "string" }, "longest_only": { + "description": "If `true`, only the longest stemmed version of each token is included in the output. If `false`, all stemmed versions of the token are included. Defaults to `false`.", "type": "boolean" } }, @@ -63565,10 +63894,23 @@ "enum": [ "hyphenation_decompounder" ] + }, + "hyphenation_patterns_path": { + "description": "Path to an Apache FOP (Formatting Objects Processor) XML hyphenation pattern file.\nThis path must be absolute or relative to the `config` location. Only FOP v1.2 compatible files are supported.", + "type": "string" + }, + "no_sub_matches": { + "description": "If `true`, do not match sub tokens in tokens that are in the word list. Defaults to `false`.", + "type": "boolean" + }, + "no_overlapping_matches": { + "description": "If `true`, do not allow overlapping tokens. Defaults to `false`.", + "type": "boolean" } }, "required": [ - "type" + "type", + "hyphenation_patterns_path" ] } ] @@ -63581,34 +63923,58 @@ { "type": "object", "properties": { - "hyphenation_patterns_path": { - "type": "string" - }, "max_subword_size": { + "description": "Maximum subword character length. Longer subword tokens are excluded from the output. Defaults to `15`.", "type": "number" }, "min_subword_size": { + "description": "Minimum subword character length. Shorter subword tokens are excluded from the output. Defaults to `2`.", "type": "number" }, "min_word_size": { + "description": "Minimum word character length. Shorter word tokens are excluded from the output. Defaults to `5`.", "type": "number" }, "only_longest_match": { + "description": "If `true`, only include the longest matching subword. Defaults to `false`.", "type": "boolean" }, "word_list": { + "description": "A list of subwords to look for in the token stream. If found, the subword is included in the token output.\nEither this parameter or `word_list_path` must be specified.", "type": "array", "items": { "type": "string" } }, "word_list_path": { + "description": "Path to a file that contains a list of subwords to find in the token stream. If found, the subword is included in the token output.\nThis path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break.\nEither this parameter or `word_list` must be specified.", "type": "string" } } } ] }, + "_types.analysis.IndicNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "indic_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.KeepTypesTokenFilter": { "allOf": [ { @@ -63627,6 +63993,7 @@ "$ref": "#/components/schemas/_types.analysis.KeepTypesMode" }, "types": { + "description": "List of token types to keep or remove.", "type": "array", "items": { "type": "string" @@ -63634,7 +64001,8 @@ } }, "required": [ - "type" + "type", + "types" ] } ] @@ -63661,15 +64029,18 @@ ] }, "keep_words": { + "description": "List of words to keep. Only tokens that match words in this list are included in the output.\nEither this parameter or `keep_words_path` must be specified.", "type": "array", "items": { "type": "string" } }, "keep_words_case": { + "description": "If `true`, lowercase all keep words. Defaults to `false`.", "type": "boolean" }, "keep_words_path": { + "description": "Path to a file that contains a list of words to keep. Only tokens that match words in this list are included in the output.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break.\nEither this parameter or `keep_words` must be specified.", "type": "string" } }, @@ -63694,9 +64065,11 @@ ] }, "ignore_case": { + "description": "If `true`, matching for the `keywords` and `keywords_path` parameters ignores letter case. Defaults to `false`.", "type": "boolean" }, "keywords": { + "description": "Array of keywords. Tokens that match these keywords are not stemmed.\nThis parameter, `keywords_path`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`.", "oneOf": [ { "type": "string" @@ -63710,9 +64083,11 @@ ] }, "keywords_path": { + "description": "Path to a file that contains a list of keywords. Tokens that match these keywords are not stemmed.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break.\nThis parameter, `keywords`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`.", "type": "string" }, "keywords_pattern": { + "description": "Java regular expression used to match tokens. Tokens that match this expression are marked as keywords and not stemmed.\nThis parameter, `keywords`, or `keywords_path` must be specified. You cannot specify this parameter and `keywords` or `keywords_pattern`.", "type": "string" } }, @@ -63722,6 +64097,27 @@ } ] }, + "_types.analysis.KeywordRepeatTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "keyword_repeat" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.KStemTokenFilter": { "allOf": [ { @@ -63758,9 +64154,11 @@ ] }, "max": { + "description": "Maximum character length of a token. Longer tokens are excluded from the output. Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`.", "type": "number" }, "min": { + "description": "Minimum character length of a token. Shorter tokens are excluded from the output. Defaults to `0`.", "type": "number" } }, @@ -63785,6 +64183,7 @@ ] }, "consume_all_tokens": { + "description": "If `true`, the limit filter exhausts the token stream, even if the `max_token_count` has already been reached. Defaults to `false`.", "type": "boolean" }, "max_token_count": { @@ -63812,7 +64211,52 @@ ] }, "language": { - "type": "string" + "$ref": "#/components/schemas/_types.analysis.LowercaseTokenFilterLanguages" + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.LowercaseTokenFilterLanguages": { + "type": "string", + "enum": [ + "greek", + "irish", + "turkish" + ] + }, + "_types.analysis.MinHashTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "min_hash" + ] + }, + "bucket_count": { + "description": "Number of buckets to which hashes are assigned. Defaults to `512`.", + "type": "number" + }, + "hash_count": { + "description": "Number of ways to hash each token in the stream. Defaults to `1`.", + "type": "number" + }, + "hash_set_size": { + "description": "Number of hashes to keep from each bucket. Defaults to `1`.\nHashes are retained by ascending size, starting with the bucket’s smallest hash first.", + "type": "number" + }, + "with_rotation": { + "description": "If `true`, the filter fills empty buckets with the value of the first non-empty bucket to its circular right if the `hash_set_size` is `1`. If the `bucket_count` argument is greater than 1, this parameter defaults to `true`. Otherwise, this parameter defaults to `false`.", + "type": "boolean" } }, "required": [ @@ -63836,6 +64280,7 @@ ] }, "filters": { + "description": "A list of token filters to apply to incoming tokens.", "type": "array", "items": { "type": "string" @@ -63867,9 +64312,11 @@ ] }, "max_gram": { + "description": "Maximum length of characters in a gram. Defaults to `2`.", "type": "number" }, "min_gram": { + "description": "Minimum length of characters in a gram. Defaults to `1`.", "type": "number" }, "preserve_original": { @@ -63897,6 +64344,7 @@ ] }, "stoptags": { + "description": "An array of part-of-speech tags that should be removed.", "type": "array", "items": { "type": "string" @@ -63924,6 +64372,7 @@ ] }, "patterns": { + "description": "A list of regular expressions to match.", "type": "array", "items": { "type": "string" @@ -63955,15 +64404,15 @@ ] }, "all": { + "description": "If `true`, all substrings matching the pattern parameter’s regular expression are replaced. If `false`, the filter replaces only the first matching substring in each token. Defaults to `true`.", "type": "boolean" }, - "flags": { - "type": "string" - }, "pattern": { + "description": "Regular expression, written in Java’s regular expression syntax. The filter replaces token substrings matching this pattern with the substring in the `replacement` parameter.", "type": "string" }, "replacement": { + "description": "Replacement substring. Defaults to an empty substring (`\"\"`).", "type": "string" } }, @@ -63974,6 +64423,27 @@ } ] }, + "_types.analysis.PersianNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "persian_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.PorterStemTokenFilter": { "allOf": [ { @@ -64062,6 +64532,69 @@ } ] }, + "_types.analysis.ScandinavianFoldingTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "scandinavian_folding" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.ScandinavianNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "scandinavian_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.SerbianNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "serbian_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.ShingleTokenFilter": { "allOf": [ { @@ -64077,35 +64610,25 @@ ] }, "filler_token": { + "description": "String used in shingles as a replacement for empty positions that do not contain a token. This filler token is only used in shingles, not original unigrams. Defaults to an underscore (`_`).", "type": "string" }, "max_shingle_size": { - "oneOf": [ - { - "type": "number" - }, - { - "type": "string" - } - ] + "$ref": "#/components/schemas/_spec_utils.Stringifiedinteger" }, "min_shingle_size": { - "oneOf": [ - { - "type": "number" - }, - { - "type": "string" - } - ] + "$ref": "#/components/schemas/_spec_utils.Stringifiedinteger" }, "output_unigrams": { + "description": "If `true`, the output includes the original input tokens. If `false`, the output only includes shingles; the original input tokens are removed. Defaults to `true`.", "type": "boolean" }, "output_unigrams_if_no_shingles": { + "description": "If `true`, the output includes the original input tokens only if no shingles are produced; if shingles are produced, the output only includes shingles. Defaults to `false`.", "type": "boolean" }, "token_separator": { + "description": "Separator used to concatenate adjacent tokens to form a shingle. Defaults to a space (`\" \"`).", "type": "string" } }, @@ -64139,6 +64662,27 @@ } ] }, + "_types.analysis.SoraniNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "sorani_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.StemmerOverrideTokenFilter": { "allOf": [ { @@ -64154,12 +64698,14 @@ ] }, "rules": { + "description": "A list of mapping rules to use.", "type": "array", "items": { "type": "string" } }, "rules_path": { + "description": "A path (either relative to `config` location, or absolute) to a list of mappings.", "type": "string" } }, @@ -64208,15 +64754,18 @@ ] }, "ignore_case": { + "description": "If `true`, stop word matching is case insensitive. For example, if `true`, a stop word of the matches and removes `The`, `THE`, or `the`. Defaults to `false`.", "type": "boolean" }, "remove_trailing": { + "description": "If `true`, the last token of a stream is removed if it’s a stop word. Defaults to `true`.", "type": "boolean" }, "stopwords": { "$ref": "#/components/schemas/_types.analysis.StopWords" }, "stopwords_path": { + "description": "Path to a file that contains a list of stop words to remove.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each stop word in the file must be separated by a line break.", "type": "string" } }, @@ -64229,7 +64778,7 @@ "_types.analysis.SynonymGraphTokenFilter": { "allOf": [ { - "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + "$ref": "#/components/schemas/_types.analysis.SynonymTokenFilterBase" }, { "type": "object", @@ -64239,38 +64788,58 @@ "enum": [ "synonym_graph" ] - }, + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.SynonymTokenFilterBase": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { "expand": { + "description": "Expands definitions for equivalent synonym rules. Defaults to `true`.", "type": "boolean" }, "format": { "$ref": "#/components/schemas/_types.analysis.SynonymFormat" }, "lenient": { + "description": "If `true` ignores errors while parsing the synonym rules. It is important to note that only those synonym rules which cannot get parsed are ignored. Defaults to the value of the `updateable` setting.", "type": "boolean" }, "synonyms": { + "description": "Used to define inline synonyms.", "type": "array", "items": { "type": "string" } }, "synonyms_path": { + "description": "Used to provide a synonym file. This path must be absolute or relative to the `config` location.", "type": "string" }, "synonyms_set": { + "description": "Provide a synonym set created via Synonyms Management APIs.", "type": "string" }, "tokenizer": { + "deprecated": true, + "description": "Controls the tokenizers that will be used to tokenize the synonym, this parameter is for backwards compatibility for indices that created before 6.0.", "type": "string" }, "updateable": { + "description": "If `true` allows reloading search analyzers to pick up changes to synonym files. Only to be used for search analyzers. Defaults to `false`.", "type": "boolean" } - }, - "required": [ - "type" - ] + } } ] }, @@ -64284,7 +64853,7 @@ "_types.analysis.SynonymTokenFilter": { "allOf": [ { - "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + "$ref": "#/components/schemas/_types.analysis.SynonymTokenFilterBase" }, { "type": "object", @@ -64294,33 +64863,6 @@ "enum": [ "synonym" ] - }, - "expand": { - "type": "boolean" - }, - "format": { - "$ref": "#/components/schemas/_types.analysis.SynonymFormat" - }, - "lenient": { - "type": "boolean" - }, - "synonyms": { - "type": "array", - "items": { - "type": "string" - } - }, - "synonyms_path": { - "type": "string" - }, - "synonyms_set": { - "type": "string" - }, - "tokenizer": { - "type": "string" - }, - "updateable": { - "type": "boolean" } }, "required": [ @@ -64365,6 +64907,7 @@ ] }, "length": { + "description": "Character limit for each token. Tokens exceeding this limit are truncated. Defaults to `10`.", "type": "number" } }, @@ -64389,6 +64932,7 @@ ] }, "only_on_same_position": { + "description": "If `true`, only remove duplicate tokens in the same position. Defaults to `false`.", "type": "boolean" } }, @@ -64422,7 +64966,7 @@ "_types.analysis.WordDelimiterGraphTokenFilter": { "allOf": [ { - "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + "$ref": "#/components/schemas/_types.analysis.WordDelimiterTokenFilterBase" }, { "type": "object", @@ -64434,56 +64978,103 @@ ] }, "adjust_offsets": { + "description": "If `true`, the filter adjusts the offsets of split or catenated tokens to better reflect their actual position in the token stream. Defaults to `true`.", "type": "boolean" }, + "ignore_keywords": { + "description": "If `true`, the filter skips tokens with a keyword attribute of true. Defaults to `false`.", + "type": "boolean" + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.WordDelimiterTokenFilterBase": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { "catenate_all": { + "description": "If `true`, the filter produces catenated tokens for chains of alphanumeric characters separated by non-alphabetic delimiters. Defaults to `false`.", "type": "boolean" }, "catenate_numbers": { + "description": "If `true`, the filter produces catenated tokens for chains of numeric characters separated by non-alphabetic delimiters. Defaults to `false`.", "type": "boolean" }, "catenate_words": { + "description": "If `true`, the filter produces catenated tokens for chains of alphabetical characters separated by non-alphabetic delimiters. Defaults to `false`.", "type": "boolean" }, "generate_number_parts": { + "description": "If `true`, the filter includes tokens consisting of only numeric characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`.", "type": "boolean" }, "generate_word_parts": { - "type": "boolean" - }, - "ignore_keywords": { + "description": "If `true`, the filter includes tokens consisting of only alphabetical characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`.", "type": "boolean" }, "preserve_original": { "$ref": "#/components/schemas/_spec_utils.Stringifiedboolean" }, "protected_words": { + "description": "Array of tokens the filter won’t split.", "type": "array", "items": { "type": "string" } }, "protected_words_path": { + "description": "Path to a file that contains a list of tokens the filter won’t split.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break.", "type": "string" }, "split_on_case_change": { + "description": "If `true`, the filter splits tokens at letter case transitions. For example: camelCase -> [ camel, Case ]. Defaults to `true`.", "type": "boolean" }, "split_on_numerics": { + "description": "If `true`, the filter splits tokens at letter-number transitions. For example: j2se -> [ j, 2, se ]. Defaults to `true`.", "type": "boolean" }, "stem_english_possessive": { + "description": "If `true`, the filter removes the English possessive (`'s`) from the end of each token. For example: O'Neil's -> [ O, Neil ]. Defaults to `true`.", "type": "boolean" }, "type_table": { + "description": "Array of custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.", "type": "array", "items": { "type": "string" } }, "type_table_path": { + "description": "Path to a file that contains custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.", "type": "string" } + } + } + ] + }, + "_types.analysis.WordDelimiterTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.WordDelimiterTokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "word_delimiter" + ] + } }, "required": [ "type" @@ -64491,7 +65082,7 @@ } ] }, - "_types.analysis.WordDelimiterTokenFilter": { + "_types.analysis.JaStopTokenFilter": { "allOf": [ { "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" @@ -64502,53 +65093,11 @@ "type": { "type": "string", "enum": [ - "word_delimiter" + "ja_stop" ] }, - "catenate_all": { - "type": "boolean" - }, - "catenate_numbers": { - "type": "boolean" - }, - "catenate_words": { - "type": "boolean" - }, - "generate_number_parts": { - "type": "boolean" - }, - "generate_word_parts": { - "type": "boolean" - }, - "preserve_original": { - "$ref": "#/components/schemas/_spec_utils.Stringifiedboolean" - }, - "protected_words": { - "type": "array", - "items": { - "type": "string" - } - }, - "protected_words_path": { - "type": "string" - }, - "split_on_case_change": { - "type": "boolean" - }, - "split_on_numerics": { - "type": "boolean" - }, - "stem_english_possessive": { - "type": "boolean" - }, - "type_table": { - "type": "array", - "items": { - "type": "string" - } - }, - "type_table_path": { - "type": "string" + "stopwords": { + "$ref": "#/components/schemas/_types.analysis.StopWords" } }, "required": [ @@ -64826,6 +65375,7 @@ "$ref": "#/components/schemas/_types.analysis.PhoneticEncoder" }, "languageset": { + "description": "\n\nSupported values include: `any`, `common`, `cyrillic`, `english`, `french`, `german`, `hebrew`, `hungarian`, `polish`, `romanian`, `russian`, `spanish`\n\n", "oneOf": [ { "$ref": "#/components/schemas/_types.analysis.PhoneticLanguage" @@ -82406,7 +82956,7 @@ "type": "object", "properties": { "actions": { - "description": "The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined.", + "description": "The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined.\n\nSupported values include:\n - `skip_result`: The result will not be created. Unless you also specify `skip_model_update`, the model will be updated as usual with the corresponding series value.\n - `skip_model_update`: The value for that series will not be used to update the model. Unless you also specify `skip_result`, the results will be created as usual. This action is suitable when certain values are expected to be consistently anomalous and they affect the model in a way that negatively impacts the rest of the results.\n\n", "type": "array", "items": { "$ref": "#/components/schemas/ml._types.RuleAction" @@ -104449,7 +104999,7 @@ "async_search.submit-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.", + "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -104539,7 +105089,7 @@ "async_search.submit-search_type": { "in": "query", "name": "search_type", - "description": "Search operation type", + "description": "Search operation type\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -104582,7 +105132,7 @@ "async_search.submit-suggest_mode": { "in": "query", "name": "suggest_mode", - "description": "Specify suggest mode", + "description": "Specify suggest mode\n\nSupported values include:\n - `missing`: Only generate suggestions for terms that are not in the shard.\n - `popular`: Only suggest terms that occur in more docs on the shard than the original term.\n - `always`: Suggest any matching suggestions based on terms in the suggest text.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SuggestMode" @@ -104934,7 +105484,7 @@ "cat.aliases-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -105169,7 +105719,7 @@ "cat.indices-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.", + "description": "The type of index that wildcard patterns can match.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -105179,7 +105729,7 @@ "cat.indices-health": { "in": "query", "name": "health", - "description": "The health status used to limit returned indices. By default, the response includes indices of any health status.", + "description": "The health status used to limit returned indices. By default, the response includes indices of any health status.\n\nSupported values include:\n - `green` (or `GREEN`): All shards are assigned.\n - `yellow` (or `YELLOW`): All primary shards are assigned, but one or more replica shards are unassigned. If a node in the cluster fails, some data could be unavailable until that node is repaired.\n - `red` (or `RED`): One or more primary shards are unassigned, so some data is unavailable. This can occur briefly during cluster startup as primary shards are assigned.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.HealthStatus" @@ -105280,7 +105830,7 @@ "cat.ml_data_frame_analytics-h": { "in": "query", "name": "h", - "description": "Comma-separated list of column names to display.", + "description": "Comma-separated list of column names to display.\n\nSupported values include:\n - `assignment_explanation` (or `ae`): Contains messages relating to the selection of a node.\n - `create_time` (or `ct`, `createTime`): The time when the data frame analytics job was created.\n - `description` (or `d`): A description of a job.\n - `dest_index` (or `di`, `destIndex`): Name of the destination index.\n - `failure_reason` (or `fr`, `failureReason`): Contains messages about the reason why a data frame analytics job failed.\n - `id`: Identifier for the data frame analytics job.\n - `model_memory_limit` (or `mml`, `modelMemoryLimit`): The approximate maximum amount of memory resources that are permitted for\nthe data frame analytics job.\n - `node.address` (or `na`, `nodeAddress`): The network address of the node that the data frame analytics job is\nassigned to.\n - `node.ephemeral_id` (or `ne`, `nodeEphemeralId`): The ephemeral ID of the node that the data frame analytics job is assigned\nto.\n - `node.id` (or `ni`, `nodeId`): The unique identifier of the node that the data frame analytics job is\nassigned to.\n - `node.name` (or `nn`, `nodeName`): The name of the node that the data frame analytics job is assigned to.\n - `progress` (or `p`): The progress report of the data frame analytics job by phase.\n - `source_index` (or `si`, `sourceIndex`): Name of the source index.\n - `state` (or `s`): Current state of the data frame analytics job.\n - `type` (or `t`): The type of analysis that the data frame analytics job performs.\n - `version` (or `v`): The Elasticsearch version number in which the data frame analytics job was\ncreated.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatDfaColumns" @@ -105290,7 +105840,7 @@ "cat.ml_data_frame_analytics-s": { "in": "query", "name": "s", - "description": "Comma-separated list of column names or column aliases used to sort the\nresponse.", + "description": "Comma-separated list of column names or column aliases used to sort the\nresponse.\n\nSupported values include:\n - `assignment_explanation` (or `ae`): Contains messages relating to the selection of a node.\n - `create_time` (or `ct`, `createTime`): The time when the data frame analytics job was created.\n - `description` (or `d`): A description of a job.\n - `dest_index` (or `di`, `destIndex`): Name of the destination index.\n - `failure_reason` (or `fr`, `failureReason`): Contains messages about the reason why a data frame analytics job failed.\n - `id`: Identifier for the data frame analytics job.\n - `model_memory_limit` (or `mml`, `modelMemoryLimit`): The approximate maximum amount of memory resources that are permitted for\nthe data frame analytics job.\n - `node.address` (or `na`, `nodeAddress`): The network address of the node that the data frame analytics job is\nassigned to.\n - `node.ephemeral_id` (or `ne`, `nodeEphemeralId`): The ephemeral ID of the node that the data frame analytics job is assigned\nto.\n - `node.id` (or `ni`, `nodeId`): The unique identifier of the node that the data frame analytics job is\nassigned to.\n - `node.name` (or `nn`, `nodeName`): The name of the node that the data frame analytics job is assigned to.\n - `progress` (or `p`): The progress report of the data frame analytics job by phase.\n - `source_index` (or `si`, `sourceIndex`): Name of the source index.\n - `state` (or `s`): Current state of the data frame analytics job.\n - `type` (or `t`): The type of analysis that the data frame analytics job performs.\n - `version` (or `v`): The Elasticsearch version number in which the data frame analytics job was\ncreated.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatDfaColumns" @@ -105331,7 +105881,7 @@ "cat.ml_datafeeds-h": { "in": "query", "name": "h", - "description": "Comma-separated list of column names to display.", + "description": "Comma-separated list of column names to display.\n\nSupported values include:\n - `ae` (or `assignment_explanation`): For started datafeeds only, contains messages relating to the selection of\na node.\n - `bc` (or `buckets.count`, `bucketsCount`): The number of buckets processed.\n - `id`: A numerical character string that uniquely identifies the datafeed.\n - `na` (or `node.address`, `nodeAddress`): For started datafeeds only, the network address of the node where the\ndatafeed is started.\n - `ne` (or `node.ephemeral_id`, `nodeEphemeralId`): For started datafeeds only, the ephemeral ID of the node where the\ndatafeed is started.\n - `ni` (or `node.id`, `nodeId`): For started datafeeds only, the unique identifier of the node where the\ndatafeed is started.\n - `nn` (or `node.name`, `nodeName`): For started datafeeds only, the name of the node where the datafeed is\nstarted.\n - `sba` (or `search.bucket_avg`, `searchBucketAvg`): The average search time per bucket, in milliseconds.\n - `sc` (or `search.count`, `searchCount`): The number of searches run by the datafeed.\n - `seah` (or `search.exp_avg_hour`, `searchExpAvgHour`): The exponential average search time per hour, in milliseconds.\n - `st` (or `search.time`, `searchTime`): The total time the datafeed spent searching, in milliseconds.\n - `s` (or `state`): The status of the datafeed: `starting`, `started`, `stopping`, or `stopped`.\nIf `starting`, the datafeed has been requested to start but has not yet\nstarted. If `started`, the datafeed is actively receiving data. If\n`stopping`, the datafeed has been requested to stop gracefully and is\ncompleting its final action. If `stopped`, the datafeed is stopped and will\nnot receive data until it is re-started.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatDatafeedColumns" @@ -105341,7 +105891,7 @@ "cat.ml_datafeeds-s": { "in": "query", "name": "s", - "description": "Comma-separated list of column names or column aliases used to sort the response.", + "description": "Comma-separated list of column names or column aliases used to sort the response.\n\nSupported values include:\n - `ae` (or `assignment_explanation`): For started datafeeds only, contains messages relating to the selection of\na node.\n - `bc` (or `buckets.count`, `bucketsCount`): The number of buckets processed.\n - `id`: A numerical character string that uniquely identifies the datafeed.\n - `na` (or `node.address`, `nodeAddress`): For started datafeeds only, the network address of the node where the\ndatafeed is started.\n - `ne` (or `node.ephemeral_id`, `nodeEphemeralId`): For started datafeeds only, the ephemeral ID of the node where the\ndatafeed is started.\n - `ni` (or `node.id`, `nodeId`): For started datafeeds only, the unique identifier of the node where the\ndatafeed is started.\n - `nn` (or `node.name`, `nodeName`): For started datafeeds only, the name of the node where the datafeed is\nstarted.\n - `sba` (or `search.bucket_avg`, `searchBucketAvg`): The average search time per bucket, in milliseconds.\n - `sc` (or `search.count`, `searchCount`): The number of searches run by the datafeed.\n - `seah` (or `search.exp_avg_hour`, `searchExpAvgHour`): The exponential average search time per hour, in milliseconds.\n - `st` (or `search.time`, `searchTime`): The total time the datafeed spent searching, in milliseconds.\n - `s` (or `state`): The status of the datafeed: `starting`, `started`, `stopping`, or `stopped`.\nIf `starting`, the datafeed has been requested to start but has not yet\nstarted. If `started`, the datafeed is actively receiving data. If\n`stopping`, the datafeed has been requested to stop gracefully and is\ncompleting its final action. If `stopped`, the datafeed is stopped and will\nnot receive data until it is re-started.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatDatafeedColumns" @@ -105392,7 +105942,7 @@ "cat.ml_jobs-h": { "in": "query", "name": "h", - "description": "Comma-separated list of column names to display.", + "description": "Comma-separated list of column names to display.\n\nSupported values include:\n - `assignment_explanation` (or `ae`): For open anomaly detection jobs only, contains messages relating to the\nselection of a node to run the job.\n - `buckets.count` (or `bc`, `bucketsCount`): The number of bucket results produced by the job.\n - `buckets.time.exp_avg` (or `btea`, `bucketsTimeExpAvg`): Exponential moving average of all bucket processing times, in milliseconds.\n - `buckets.time.exp_avg_hour` (or `bteah`, `bucketsTimeExpAvgHour`): Exponentially-weighted moving average of bucket processing times calculated\nin a 1 hour time window, in milliseconds.\n - `buckets.time.max` (or `btmax`, `bucketsTimeMax`): Maximum among all bucket processing times, in milliseconds.\n - `buckets.time.min` (or `btmin`, `bucketsTimeMin`): Minimum among all bucket processing times, in milliseconds.\n - `buckets.time.total` (or `btt`, `bucketsTimeTotal`): Sum of all bucket processing times, in milliseconds.\n - `data.buckets` (or `db`, `dataBuckets`): The number of buckets processed.\n - `data.earliest_record` (or `der`, `dataEarliestRecord`): The timestamp of the earliest chronologically input document.\n - `data.empty_buckets` (or `deb`, `dataEmptyBuckets`): The number of buckets which did not contain any data.\n - `data.input_bytes` (or `dib`, `dataInputBytes`): The number of bytes of input data posted to the anomaly detection job.\n - `data.input_fields` (or `dif`, `dataInputFields`): The total number of fields in input documents posted to the anomaly\ndetection job. This count includes fields that are not used in the analysis.\nHowever, be aware that if you are using a datafeed, it extracts only the\nrequired fields from the documents it retrieves before posting them to the job.\n - `data.input_records` (or `dir`, `dataInputRecords`): The number of input documents posted to the anomaly detection job.\n - `data.invalid_dates` (or `did`, `dataInvalidDates`): The number of input documents with either a missing date field or a date\nthat could not be parsed.\n - `data.last` (or `dl`, `dataLast`): The timestamp at which data was last analyzed, according to server time.\n - `data.last_empty_bucket` (or `dleb`, `dataLastEmptyBucket`): The timestamp of the last bucket that did not contain any data.\n - `data.last_sparse_bucket` (or `dlsb`, `dataLastSparseBucket`): The timestamp of the last bucket that was considered sparse.\n - `data.latest_record` (or `dlr`, `dataLatestRecord`): The timestamp of the latest chronologically input document.\n - `data.missing_fields` (or `dmf`, `dataMissingFields`): The number of input documents that are missing a field that the anomaly\ndetection job is configured to analyze. Input documents with missing fields\nare still processed because it is possible that not all fields are missing.\n - `data.out_of_order_timestamps` (or `doot`, `dataOutOfOrderTimestamps`): The number of input documents that have a timestamp chronologically\npreceding the start of the current anomaly detection bucket offset by the\nlatency window. This information is applicable only when you provide data\nto the anomaly detection job by using the post data API. These out of order\ndocuments are discarded, since jobs require time series data to be in\nascending chronological order.\n - `data.processed_fields` (or `dpf`, `dataProcessedFields`): The total number of fields in all the documents that have been processed by\nthe anomaly detection job. Only fields that are specified in the detector\nconfiguration object contribute to this count. The timestamp is not\nincluded in this count.\n - `data.processed_records` (or `dpr`, `dataProcessedRecords`): The number of input documents that have been processed by the anomaly\ndetection job. This value includes documents with missing fields, since\nthey are nonetheless analyzed. If you use datafeeds and have aggregations\nin your search query, the processed record count is the number of\naggregation results processed, not the number of Elasticsearch documents.\n - `data.sparse_buckets` (or `dsb`, `dataSparseBuckets`): The number of buckets that contained few data points compared to the\nexpected number of data points.\n - `forecasts.memory.avg` (or `fmavg`, `forecastsMemoryAvg`): The average memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.memory.max` (or `fmmax`, `forecastsMemoryMax`): The maximum memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.memory.min` (or `fmmin`, `forecastsMemoryMin`): The minimum memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.memory.total` (or `fmt`, `forecastsMemoryTotal`): The total memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.records.avg` (or `fravg`, `forecastsRecordsAvg`): The average number of `m`odel_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.records.max` (or `frmax`, `forecastsRecordsMax`): The maximum number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.records.min` (or `frmin`, `forecastsRecordsMin`): The minimum number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.records.total` (or `frt`, `forecastsRecordsTotal`): The total number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.time.avg` (or `ftavg`, `forecastsTimeAvg`): The average runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.time.max` (or `ftmax`, `forecastsTimeMax`): The maximum runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.time.min` (or `ftmin`, `forecastsTimeMin`): The minimum runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.time.total` (or `ftt`, `forecastsTimeTotal`): The total runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.total` (or `ft`, `forecastsTotal`): The number of individual forecasts currently available for the job.\n - `id`: Identifier for the anomaly detection job.\n - `model.bucket_allocation_failures` (or `mbaf`, `modelBucketAllocationFailures`): The number of buckets for which new entities in incoming data were not\nprocessed due to insufficient model memory.\n - `model.by_fields` (or `mbf`, `modelByFields`): The number of by field values that were analyzed by the models. This value\nis cumulative for all detectors in the job.\n - `model.bytes` (or `mb`, `modelBytes`): The number of bytes of memory used by the models. This is the maximum value\nsince the last time the model was persisted. If the job is closed, this\nvalue indicates the latest size.\n - `model.bytes_exceeded` (or `mbe`, `modelBytesExceeded`): The number of bytes over the high limit for memory usage at the last\nallocation failure.\n - `model.categorization_status` (or `mcs`, `modelCategorizationStatus`): The status of categorization for the job: `ok` or `warn`. If `ok`,\ncategorization is performing acceptably well (or not being used at all). If\n`warn`, categorization is detecting a distribution of categories that\nsuggests the input data is inappropriate for categorization. Problems could\nbe that there is only one category, more than 90% of categories are rare,\nthe number of categories is greater than 50% of the number of categorized\ndocuments, there are no frequently matched categories, or more than 50% of\ncategories are dead.\n - `model.categorized_doc_count` (or `mcdc`, `modelCategorizedDocCount`): The number of documents that have had a field categorized.\n - `model.dead_category_count` (or `mdcc`, `modelDeadCategoryCount`): The number of categories created by categorization that will never be\nassigned again because another category’s definition makes it a superset of\nthe dead category. Dead categories are a side effect of the way\ncategorization has no prior training.\n - `model.failed_category_count` (or `mdcc`, `modelFailedCategoryCount`): The number of times that categorization wanted to create a new category but\ncouldn’t because the job had hit its model memory limit. This count does\nnot track which specific categories failed to be created. Therefore, you\ncannot use this value to determine the number of unique categories that\nwere missed.\n - `model.frequent_category_count` (or `mfcc`, `modelFrequentCategoryCount`): The number of categories that match more than 1% of categorized documents.\n - `model.log_time` (or `mlt`, `modelLogTime`): The timestamp when the model stats were gathered, according to server time.\n - `model.memory_limit` (or `mml`, `modelMemoryLimit`): The timestamp when the model stats were gathered, according to server time.\n - `model.memory_status` (or `mms`, `modelMemoryStatus`): The status of the mathematical models: `ok`, `soft_limit`, or `hard_limit`.\nIf `ok`, the models stayed below the configured value. If `soft_limit`, the\nmodels used more than 60% of the configured memory limit and older unused\nmodels will be pruned to free up space. Additionally, in categorization jobs\nno further category examples will be stored. If `hard_limit`, the models\nused more space than the configured memory limit. As a result, not all\nincoming data was processed.\n - `model.over_fields` (or `mof`, `modelOverFields`): The number of over field values that were analyzed by the models. This\nvalue is cumulative for all detectors in the job.\n - `model.partition_fields` (or `mpf`, `modelPartitionFields`): The number of partition field values that were analyzed by the models. This\nvalue is cumulative for all detectors in the job.\n - `model.rare_category_count` (or `mrcc`, `modelRareCategoryCount`): The number of categories that match just one categorized document.\n - `model.timestamp` (or `mt`, `modelTimestamp`): The timestamp of the last record when the model stats were gathered.\n - `model.total_category_count` (or `mtcc`, `modelTotalCategoryCount`): The number of categories created by categorization.\n - `node.address` (or `na`, `nodeAddress`): The network address of the node that runs the job. This information is\navailable only for open jobs.\n - `node.ephemeral_id` (or `ne`, `nodeEphemeralId`): The ephemeral ID of the node that runs the job. This information is\navailable only for open jobs.\n - `node.id` (or `ni`, `nodeId`): The unique identifier of the node that runs the job. This information is\navailable only for open jobs.\n - `node.name` (or `nn`, `nodeName`): The name of the node that runs the job. This information is available only\nfor open jobs.\n - `opened_time` (or `ot`): For open jobs only, the elapsed time for which the job has been open.\n - `state` (or `s`): The status of the anomaly detection job: `closed`, `closing`, `failed`,\n`opened`, or `opening`. If `closed`, the job finished successfully with its\nmodel state persisted. The job must be opened before it can accept further\ndata. If `closing`, the job close action is in progress and has not yet\ncompleted. A closing job cannot accept further data. If `failed`, the job\ndid not finish successfully due to an error. This situation can occur due\nto invalid input data, a fatal error occurring during the analysis, or an\nexternal interaction such as the process being killed by the Linux out of\nmemory (OOM) killer. If the job had irrevocably failed, it must be force\nclosed and then deleted. If the datafeed can be corrected, the job can be\nclosed and then re-opened. If `opened`, the job is available to receive and\nprocess data. If `opening`, the job open action is in progress and has not\nyet completed.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatAnonalyDetectorColumns" @@ -105402,7 +105952,7 @@ "cat.ml_jobs-s": { "in": "query", "name": "s", - "description": "Comma-separated list of column names or column aliases used to sort the response.", + "description": "Comma-separated list of column names or column aliases used to sort the response.\n\nSupported values include:\n - `assignment_explanation` (or `ae`): For open anomaly detection jobs only, contains messages relating to the\nselection of a node to run the job.\n - `buckets.count` (or `bc`, `bucketsCount`): The number of bucket results produced by the job.\n - `buckets.time.exp_avg` (or `btea`, `bucketsTimeExpAvg`): Exponential moving average of all bucket processing times, in milliseconds.\n - `buckets.time.exp_avg_hour` (or `bteah`, `bucketsTimeExpAvgHour`): Exponentially-weighted moving average of bucket processing times calculated\nin a 1 hour time window, in milliseconds.\n - `buckets.time.max` (or `btmax`, `bucketsTimeMax`): Maximum among all bucket processing times, in milliseconds.\n - `buckets.time.min` (or `btmin`, `bucketsTimeMin`): Minimum among all bucket processing times, in milliseconds.\n - `buckets.time.total` (or `btt`, `bucketsTimeTotal`): Sum of all bucket processing times, in milliseconds.\n - `data.buckets` (or `db`, `dataBuckets`): The number of buckets processed.\n - `data.earliest_record` (or `der`, `dataEarliestRecord`): The timestamp of the earliest chronologically input document.\n - `data.empty_buckets` (or `deb`, `dataEmptyBuckets`): The number of buckets which did not contain any data.\n - `data.input_bytes` (or `dib`, `dataInputBytes`): The number of bytes of input data posted to the anomaly detection job.\n - `data.input_fields` (or `dif`, `dataInputFields`): The total number of fields in input documents posted to the anomaly\ndetection job. This count includes fields that are not used in the analysis.\nHowever, be aware that if you are using a datafeed, it extracts only the\nrequired fields from the documents it retrieves before posting them to the job.\n - `data.input_records` (or `dir`, `dataInputRecords`): The number of input documents posted to the anomaly detection job.\n - `data.invalid_dates` (or `did`, `dataInvalidDates`): The number of input documents with either a missing date field or a date\nthat could not be parsed.\n - `data.last` (or `dl`, `dataLast`): The timestamp at which data was last analyzed, according to server time.\n - `data.last_empty_bucket` (or `dleb`, `dataLastEmptyBucket`): The timestamp of the last bucket that did not contain any data.\n - `data.last_sparse_bucket` (or `dlsb`, `dataLastSparseBucket`): The timestamp of the last bucket that was considered sparse.\n - `data.latest_record` (or `dlr`, `dataLatestRecord`): The timestamp of the latest chronologically input document.\n - `data.missing_fields` (or `dmf`, `dataMissingFields`): The number of input documents that are missing a field that the anomaly\ndetection job is configured to analyze. Input documents with missing fields\nare still processed because it is possible that not all fields are missing.\n - `data.out_of_order_timestamps` (or `doot`, `dataOutOfOrderTimestamps`): The number of input documents that have a timestamp chronologically\npreceding the start of the current anomaly detection bucket offset by the\nlatency window. This information is applicable only when you provide data\nto the anomaly detection job by using the post data API. These out of order\ndocuments are discarded, since jobs require time series data to be in\nascending chronological order.\n - `data.processed_fields` (or `dpf`, `dataProcessedFields`): The total number of fields in all the documents that have been processed by\nthe anomaly detection job. Only fields that are specified in the detector\nconfiguration object contribute to this count. The timestamp is not\nincluded in this count.\n - `data.processed_records` (or `dpr`, `dataProcessedRecords`): The number of input documents that have been processed by the anomaly\ndetection job. This value includes documents with missing fields, since\nthey are nonetheless analyzed. If you use datafeeds and have aggregations\nin your search query, the processed record count is the number of\naggregation results processed, not the number of Elasticsearch documents.\n - `data.sparse_buckets` (or `dsb`, `dataSparseBuckets`): The number of buckets that contained few data points compared to the\nexpected number of data points.\n - `forecasts.memory.avg` (or `fmavg`, `forecastsMemoryAvg`): The average memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.memory.max` (or `fmmax`, `forecastsMemoryMax`): The maximum memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.memory.min` (or `fmmin`, `forecastsMemoryMin`): The minimum memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.memory.total` (or `fmt`, `forecastsMemoryTotal`): The total memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.records.avg` (or `fravg`, `forecastsRecordsAvg`): The average number of `m`odel_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.records.max` (or `frmax`, `forecastsRecordsMax`): The maximum number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.records.min` (or `frmin`, `forecastsRecordsMin`): The minimum number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.records.total` (or `frt`, `forecastsRecordsTotal`): The total number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.time.avg` (or `ftavg`, `forecastsTimeAvg`): The average runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.time.max` (or `ftmax`, `forecastsTimeMax`): The maximum runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.time.min` (or `ftmin`, `forecastsTimeMin`): The minimum runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.time.total` (or `ftt`, `forecastsTimeTotal`): The total runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.total` (or `ft`, `forecastsTotal`): The number of individual forecasts currently available for the job.\n - `id`: Identifier for the anomaly detection job.\n - `model.bucket_allocation_failures` (or `mbaf`, `modelBucketAllocationFailures`): The number of buckets for which new entities in incoming data were not\nprocessed due to insufficient model memory.\n - `model.by_fields` (or `mbf`, `modelByFields`): The number of by field values that were analyzed by the models. This value\nis cumulative for all detectors in the job.\n - `model.bytes` (or `mb`, `modelBytes`): The number of bytes of memory used by the models. This is the maximum value\nsince the last time the model was persisted. If the job is closed, this\nvalue indicates the latest size.\n - `model.bytes_exceeded` (or `mbe`, `modelBytesExceeded`): The number of bytes over the high limit for memory usage at the last\nallocation failure.\n - `model.categorization_status` (or `mcs`, `modelCategorizationStatus`): The status of categorization for the job: `ok` or `warn`. If `ok`,\ncategorization is performing acceptably well (or not being used at all). If\n`warn`, categorization is detecting a distribution of categories that\nsuggests the input data is inappropriate for categorization. Problems could\nbe that there is only one category, more than 90% of categories are rare,\nthe number of categories is greater than 50% of the number of categorized\ndocuments, there are no frequently matched categories, or more than 50% of\ncategories are dead.\n - `model.categorized_doc_count` (or `mcdc`, `modelCategorizedDocCount`): The number of documents that have had a field categorized.\n - `model.dead_category_count` (or `mdcc`, `modelDeadCategoryCount`): The number of categories created by categorization that will never be\nassigned again because another category’s definition makes it a superset of\nthe dead category. Dead categories are a side effect of the way\ncategorization has no prior training.\n - `model.failed_category_count` (or `mdcc`, `modelFailedCategoryCount`): The number of times that categorization wanted to create a new category but\ncouldn’t because the job had hit its model memory limit. This count does\nnot track which specific categories failed to be created. Therefore, you\ncannot use this value to determine the number of unique categories that\nwere missed.\n - `model.frequent_category_count` (or `mfcc`, `modelFrequentCategoryCount`): The number of categories that match more than 1% of categorized documents.\n - `model.log_time` (or `mlt`, `modelLogTime`): The timestamp when the model stats were gathered, according to server time.\n - `model.memory_limit` (or `mml`, `modelMemoryLimit`): The timestamp when the model stats were gathered, according to server time.\n - `model.memory_status` (or `mms`, `modelMemoryStatus`): The status of the mathematical models: `ok`, `soft_limit`, or `hard_limit`.\nIf `ok`, the models stayed below the configured value. If `soft_limit`, the\nmodels used more than 60% of the configured memory limit and older unused\nmodels will be pruned to free up space. Additionally, in categorization jobs\nno further category examples will be stored. If `hard_limit`, the models\nused more space than the configured memory limit. As a result, not all\nincoming data was processed.\n - `model.over_fields` (or `mof`, `modelOverFields`): The number of over field values that were analyzed by the models. This\nvalue is cumulative for all detectors in the job.\n - `model.partition_fields` (or `mpf`, `modelPartitionFields`): The number of partition field values that were analyzed by the models. This\nvalue is cumulative for all detectors in the job.\n - `model.rare_category_count` (or `mrcc`, `modelRareCategoryCount`): The number of categories that match just one categorized document.\n - `model.timestamp` (or `mt`, `modelTimestamp`): The timestamp of the last record when the model stats were gathered.\n - `model.total_category_count` (or `mtcc`, `modelTotalCategoryCount`): The number of categories created by categorization.\n - `node.address` (or `na`, `nodeAddress`): The network address of the node that runs the job. This information is\navailable only for open jobs.\n - `node.ephemeral_id` (or `ne`, `nodeEphemeralId`): The ephemeral ID of the node that runs the job. This information is\navailable only for open jobs.\n - `node.id` (or `ni`, `nodeId`): The unique identifier of the node that runs the job. This information is\navailable only for open jobs.\n - `node.name` (or `nn`, `nodeName`): The name of the node that runs the job. This information is available only\nfor open jobs.\n - `opened_time` (or `ot`): For open jobs only, the elapsed time for which the job has been open.\n - `state` (or `s`): The status of the anomaly detection job: `closed`, `closing`, `failed`,\n`opened`, or `opening`. If `closed`, the job finished successfully with its\nmodel state persisted. The job must be opened before it can accept further\ndata. If `closing`, the job close action is in progress and has not yet\ncompleted. A closing job cannot accept further data. If `failed`, the job\ndid not finish successfully due to an error. This situation can occur due\nto invalid input data, a fatal error occurring during the analysis, or an\nexternal interaction such as the process being killed by the Linux out of\nmemory (OOM) killer. If the job had irrevocably failed, it must be force\nclosed and then deleted. If the datafeed can be corrected, the job can be\nclosed and then re-opened. If `opened`, the job is available to receive and\nprocess data. If `opening`, the job open action is in progress and has not\nyet completed.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatAnonalyDetectorColumns" @@ -105453,7 +106003,7 @@ "cat.ml_trained_models-h": { "in": "query", "name": "h", - "description": "A comma-separated list of column names to display.", + "description": "A comma-separated list of column names to display.\n\nSupported values include:\n - `create_time` (or `ct`): The time when the trained model was created.\n - `created_by` (or `c`, `createdBy`): Information on the creator of the trained model.\n - `data_frame_analytics_id` (or `df`, `dataFrameAnalytics`, `dfid`): Identifier for the data frame analytics job that created the model. Only\ndisplayed if it is still available.\n - `description` (or `d`): The description of the trained model.\n - `heap_size` (or `hs`, `modelHeapSize`): The estimated heap size to keep the trained model in memory.\n - `id`: Identifier for the trained model.\n - `ingest.count` (or `ic`, `ingestCount`): The total number of documents that are processed by the model.\n - `ingest.current` (or `icurr`, `ingestCurrent`): The total number of document that are currently being handled by the\ntrained model.\n - `ingest.failed` (or `if`, `ingestFailed`): The total number of failed ingest attempts with the trained model.\n - `ingest.pipelines` (or `ip`, `ingestPipelines`): The total number of ingest pipelines that are referencing the trained\nmodel.\n - `ingest.time` (or `it`, `ingestTime`): The total time that is spent processing documents with the trained model.\n - `license` (or `l`): The license level of the trained model.\n - `operations` (or `o`, `modelOperations`): The estimated number of operations to use the trained model. This number\nhelps measuring the computational complexity of the model.\n - `version` (or `v`): The Elasticsearch version number in which the trained model was created.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatTrainedModelsColumns" @@ -105463,7 +106013,7 @@ "cat.ml_trained_models-s": { "in": "query", "name": "s", - "description": "A comma-separated list of column names or aliases used to sort the response.", + "description": "A comma-separated list of column names or aliases used to sort the response.\n\nSupported values include:\n - `create_time` (or `ct`): The time when the trained model was created.\n - `created_by` (or `c`, `createdBy`): Information on the creator of the trained model.\n - `data_frame_analytics_id` (or `df`, `dataFrameAnalytics`, `dfid`): Identifier for the data frame analytics job that created the model. Only\ndisplayed if it is still available.\n - `description` (or `d`): The description of the trained model.\n - `heap_size` (or `hs`, `modelHeapSize`): The estimated heap size to keep the trained model in memory.\n - `id`: Identifier for the trained model.\n - `ingest.count` (or `ic`, `ingestCount`): The total number of documents that are processed by the model.\n - `ingest.current` (or `icurr`, `ingestCurrent`): The total number of document that are currently being handled by the\ntrained model.\n - `ingest.failed` (or `if`, `ingestFailed`): The total number of failed ingest attempts with the trained model.\n - `ingest.pipelines` (or `ip`, `ingestPipelines`): The total number of ingest pipelines that are referencing the trained\nmodel.\n - `ingest.time` (or `it`, `ingestTime`): The total time that is spent processing documents with the trained model.\n - `license` (or `l`): The license level of the trained model.\n - `operations` (or `o`, `modelOperations`): The estimated number of operations to use the trained model. This number\nhelps measuring the computational complexity of the model.\n - `version` (or `v`): The Elasticsearch version number in which the trained model was created.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatTrainedModelsColumns" @@ -105910,7 +106460,7 @@ "cat.transforms-h": { "in": "query", "name": "h", - "description": "Comma-separated list of column names to display.", + "description": "Comma-separated list of column names to display.\n\nSupported values include:\n - `changes_last_detection_time` (or `cldt`): The timestamp when changes were last detected in the source indices.\n - `checkpoint` (or `cp`): The sequence number for the checkpoint.\n - `checkpoint_duration_time_exp_avg` (or `cdtea`, `checkpointTimeExpAvg`): Exponential moving average of the duration of the checkpoint, in\nmilliseconds.\n - `checkpoint_progress` (or `c`, `checkpointProgress`): The progress of the next checkpoint that is currently in progress.\n - `create_time` (or `ct`, `createTime`): The time the transform was created.\n - `delete_time` (or `dtime`): The amount of time spent deleting, in milliseconds.\n - `description` (or `d`): The description of the transform.\n - `dest_index` (or `di`, `destIndex`): The destination index for the transform. The mappings of the destination\nindex are deduced based on the source fields when possible. If alternate\nmappings are required, use the Create index API prior to starting the\ntransform.\n - `documents_deleted` (or `docd`): The number of documents that have been deleted from the destination index\ndue to the retention policy for this transform.\n - `documents_indexed` (or `doci`): The number of documents that have been indexed into the destination index\nfor the transform.\n - `docs_per_second` (or `dps`): Specifies a limit on the number of input documents per second. This setting\nthrottles the transform by adding a wait time between search requests. The\ndefault value is `null`, which disables throttling.\n - `documents_processed` (or `docp`): The number of documents that have been processed from the source index of\nthe transform.\n - `frequency` (or `f`): The interval between checks for changes in the source indices when the\ntransform is running continuously. Also determines the retry interval in\nthe event of transient failures while the transform is searching or\nindexing. The minimum value is `1s` and the maximum is `1h`. The default\nvalue is `1m`.\n - `id`: Identifier for the transform.\n - `index_failure` (or `if`): The number of indexing failures.\n - `index_time` (or `itime`): The amount of time spent indexing, in milliseconds.\n - `index_total` (or `it`): The number of index operations.\n - `indexed_documents_exp_avg` (or `idea`): Exponential moving average of the number of new documents that have been\nindexed.\n - `last_search_time` (or `lst`, `lastSearchTime`): The timestamp of the last search in the source indices. This field is only\nshown if the transform is running.\n - `max_page_search_size` (or `mpsz`): Defines the initial page size to use for the composite aggregation for each\ncheckpoint. If circuit breaker exceptions occur, the page size is\ndynamically adjusted to a lower value. The minimum value is `10` and the\nmaximum is `65,536`. The default value is `500`.\n - `pages_processed` (or `pp`): The number of search or bulk index operations processed. Documents are\nprocessed in batches instead of individually.\n - `pipeline` (or `p`): The unique identifier for an ingest pipeline.\n - `processed_documents_exp_avg` (or `pdea`): Exponential moving average of the number of documents that have been\nprocessed.\n - `processing_time` (or `pt`): The amount of time spent processing results, in milliseconds.\n - `reason` (or `r`): If a transform has a `failed` state, this property provides details about\nthe reason for the failure.\n - `search_failure` (or `sf`): The number of search failures.\n - `search_time` (or `stime`): The amount of time spent searching, in milliseconds.\n - `search_total` (or `st`): The number of search operations on the source index for the transform.\n - `source_index` (or `si`, `sourceIndex`): The source indices for the transform. It can be a single index, an index\npattern (for example, `\"my-index-*\"`), an array of indices (for example,\n`[\"my-index-000001\", \"my-index-000002\"]`), or an array of index patterns\n(for example, `[\"my-index-*\", \"my-other-index-*\"]`. For remote indices use\nthe syntax `\"remote_name:index_name\"`. If any indices are in remote\nclusters then the master node and at least one transform node must have the\n`remote_cluster_client` node role.\n - `state` (or `s`): The status of the transform, which can be one of the following values:\n\n* `aborting`: The transform is aborting.\n* `failed`: The transform failed. For more information about the failure,\ncheck the reason field.\n* `indexing`: The transform is actively processing data and creating new\ndocuments.\n* `started`: The transform is running but not actively indexing data.\n* `stopped`: The transform is stopped.\n* `stopping`: The transform is stopping.\n - `transform_type` (or `tt`): Indicates the type of transform: `batch` or `continuous`.\n - `trigger_count` (or `tc`): The number of times the transform has been triggered by the scheduler. For\nexample, the scheduler triggers the transform indexer to check for updates\nor ingest new data at an interval specified in the `frequency` property.\n - `version` (or `v`): The version of Elasticsearch that existed on the node when the transform\nwas created.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatTransformColumns" @@ -105920,7 +106470,7 @@ "cat.transforms-s": { "in": "query", "name": "s", - "description": "Comma-separated list of column names or column aliases used to sort the response.", + "description": "Comma-separated list of column names or column aliases used to sort the response.\n\nSupported values include:\n - `changes_last_detection_time` (or `cldt`): The timestamp when changes were last detected in the source indices.\n - `checkpoint` (or `cp`): The sequence number for the checkpoint.\n - `checkpoint_duration_time_exp_avg` (or `cdtea`, `checkpointTimeExpAvg`): Exponential moving average of the duration of the checkpoint, in\nmilliseconds.\n - `checkpoint_progress` (or `c`, `checkpointProgress`): The progress of the next checkpoint that is currently in progress.\n - `create_time` (or `ct`, `createTime`): The time the transform was created.\n - `delete_time` (or `dtime`): The amount of time spent deleting, in milliseconds.\n - `description` (or `d`): The description of the transform.\n - `dest_index` (or `di`, `destIndex`): The destination index for the transform. The mappings of the destination\nindex are deduced based on the source fields when possible. If alternate\nmappings are required, use the Create index API prior to starting the\ntransform.\n - `documents_deleted` (or `docd`): The number of documents that have been deleted from the destination index\ndue to the retention policy for this transform.\n - `documents_indexed` (or `doci`): The number of documents that have been indexed into the destination index\nfor the transform.\n - `docs_per_second` (or `dps`): Specifies a limit on the number of input documents per second. This setting\nthrottles the transform by adding a wait time between search requests. The\ndefault value is `null`, which disables throttling.\n - `documents_processed` (or `docp`): The number of documents that have been processed from the source index of\nthe transform.\n - `frequency` (or `f`): The interval between checks for changes in the source indices when the\ntransform is running continuously. Also determines the retry interval in\nthe event of transient failures while the transform is searching or\nindexing. The minimum value is `1s` and the maximum is `1h`. The default\nvalue is `1m`.\n - `id`: Identifier for the transform.\n - `index_failure` (or `if`): The number of indexing failures.\n - `index_time` (or `itime`): The amount of time spent indexing, in milliseconds.\n - `index_total` (or `it`): The number of index operations.\n - `indexed_documents_exp_avg` (or `idea`): Exponential moving average of the number of new documents that have been\nindexed.\n - `last_search_time` (or `lst`, `lastSearchTime`): The timestamp of the last search in the source indices. This field is only\nshown if the transform is running.\n - `max_page_search_size` (or `mpsz`): Defines the initial page size to use for the composite aggregation for each\ncheckpoint. If circuit breaker exceptions occur, the page size is\ndynamically adjusted to a lower value. The minimum value is `10` and the\nmaximum is `65,536`. The default value is `500`.\n - `pages_processed` (or `pp`): The number of search or bulk index operations processed. Documents are\nprocessed in batches instead of individually.\n - `pipeline` (or `p`): The unique identifier for an ingest pipeline.\n - `processed_documents_exp_avg` (or `pdea`): Exponential moving average of the number of documents that have been\nprocessed.\n - `processing_time` (or `pt`): The amount of time spent processing results, in milliseconds.\n - `reason` (or `r`): If a transform has a `failed` state, this property provides details about\nthe reason for the failure.\n - `search_failure` (or `sf`): The number of search failures.\n - `search_time` (or `stime`): The amount of time spent searching, in milliseconds.\n - `search_total` (or `st`): The number of search operations on the source index for the transform.\n - `source_index` (or `si`, `sourceIndex`): The source indices for the transform. It can be a single index, an index\npattern (for example, `\"my-index-*\"`), an array of indices (for example,\n`[\"my-index-000001\", \"my-index-000002\"]`), or an array of index patterns\n(for example, `[\"my-index-*\", \"my-other-index-*\"]`. For remote indices use\nthe syntax `\"remote_name:index_name\"`. If any indices are in remote\nclusters then the master node and at least one transform node must have the\n`remote_cluster_client` node role.\n - `state` (or `s`): The status of the transform, which can be one of the following values:\n\n* `aborting`: The transform is aborting.\n* `failed`: The transform failed. For more information about the failure,\ncheck the reason field.\n* `indexing`: The transform is actively processing data and creating new\ndocuments.\n* `started`: The transform is running but not actively indexing data.\n* `stopped`: The transform is stopped.\n* `stopping`: The transform is stopping.\n - `transform_type` (or `tt`): Indicates the type of transform: `batch` or `continuous`.\n - `trigger_count` (or `tc`): The number of times the transform has been triggered by the scheduler. For\nexample, the scheduler triggers the transform indexer to check for updates\nor ingest new data at an interval specified in the `frequency` property.\n - `version` (or `v`): The version of Elasticsearch that existed on the node when the transform\nwas created.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatTransformColumns" @@ -106074,7 +106624,7 @@ "cluster.health-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.", + "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -106174,7 +106724,7 @@ "cluster.health-wait_for_status": { "in": "query", "name": "wait_for_status", - "description": "One of green, yellow or red. Will wait (until the timeout provided) until the status of the cluster changes to the one provided or better, i.e. green > yellow > red. By default, will not wait for any status.", + "description": "One of green, yellow or red. Will wait (until the timeout provided) until the status of the cluster changes to the one provided or better, i.e. green > yellow > red. By default, will not wait for any status.\n\nSupported values include:\n - `green` (or `GREEN`): All shards are assigned.\n - `yellow` (or `YELLOW`): All primary shards are assigned, but one or more replica shards are unassigned. If a node in the cluster fails, some data could be unavailable until that node is repaired.\n - `red` (or `RED`): One or more primary shards are unassigned, so some data is unavailable. This can occur briefly during cluster startup as primary shards are assigned.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.HealthStatus" @@ -106247,7 +106797,7 @@ "cluster.state-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.", + "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -106420,7 +106970,7 @@ "count-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -106562,7 +107112,7 @@ "create-op_type": { "in": "query", "name": "op_type", - "description": "Set to `create` to only index the document if it does not already exist (put if absent).\nIf a document with the specified `_id` already exists, the indexing operation will fail.\nThe behavior is the same as using the `/_create` endpoint.\nIf a document ID is specified, this paramater defaults to `index`.\nOtherwise, it defaults to `create`.\nIf the request targets a data stream, an `op_type` of `create` is required.", + "description": "Set to `create` to only index the document if it does not already exist (put if absent).\nIf a document with the specified `_id` already exists, the indexing operation will fail.\nThe behavior is the same as using the `/_create` endpoint.\nIf a document ID is specified, this paramater defaults to `index`.\nOtherwise, it defaults to `create`.\nIf the request targets a data stream, an `op_type` of `create` is required.\n\nSupported values include:\n - `index`: Overwrite any documents that already exist.\n - `create`: Only index documents that do not already exist.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.OpType" @@ -106642,7 +107192,7 @@ "create-version_type": { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -106723,6 +107273,7 @@ "eql.search-expand_wildcards": { "in": "query", "name": "expand_wildcards", + "description": "\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -106935,7 +107486,7 @@ "field_caps-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -107046,7 +107597,7 @@ "fleet.msearch-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.", + "description": "Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -107106,7 +107657,7 @@ "fleet.msearch-search_type": { "in": "query", "name": "search_type", - "description": "Indicates whether global term and document frequencies should be used when scoring returned documents.", + "description": "Indicates whether global term and document frequencies should be used when scoring returned documents.\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -107249,6 +107800,7 @@ "fleet.search-expand_wildcards": { "in": "query", "name": "expand_wildcards", + "description": "\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -107348,6 +107900,7 @@ "fleet.search-search_type": { "in": "query", "name": "search_type", + "description": "\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -107388,6 +107941,7 @@ "fleet.search-suggest_mode": { "in": "query", "name": "suggest_mode", + "description": "\n\nSupported values include:\n - `missing`: Only generate suggestions for terms that are not in the shard.\n - `popular`: Only suggest terms that occur in more docs on the shard than the original term.\n - `always`: Suggest any matching suggestions based on terms in the suggest text.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SuggestMode" @@ -107749,7 +108303,7 @@ "index-op_type": { "in": "query", "name": "op_type", - "description": "Set to `create` to only index the document if it does not already exist (put if absent).\nIf a document with the specified `_id` already exists, the indexing operation will fail.\nThe behavior is the same as using the `/_create` endpoint.\nIf a document ID is specified, this paramater defaults to `index`.\nOtherwise, it defaults to `create`.\nIf the request targets a data stream, an `op_type` of `create` is required.", + "description": "Set to `create` to only index the document if it does not already exist (put if absent).\nIf a document with the specified `_id` already exists, the indexing operation will fail.\nThe behavior is the same as using the `/_create` endpoint.\nIf a document ID is specified, this paramater defaults to `index`.\nOtherwise, it defaults to `create`.\nIf the request targets a data stream, an `op_type` of `create` is required.\n\nSupported values include:\n - `index`: Overwrite any documents that already exist.\n - `create`: Only index documents that do not already exist.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.OpType" @@ -107809,7 +108363,7 @@ "index-version_type": { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -107891,7 +108445,7 @@ "indices.clear_cache-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -108036,7 +108590,7 @@ "indices.data_streams_stats-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.", + "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -108120,7 +108674,7 @@ "indices.exists_alias-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -108171,7 +108725,7 @@ "indices.flush-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -108232,7 +108786,7 @@ "indices.forcemerge-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.", + "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -108324,7 +108878,7 @@ "indices.get_alias-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -108365,7 +108919,7 @@ "indices.get_data_stream-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.", + "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -108437,7 +108991,7 @@ "indices.get_field_mapping-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -108549,7 +109103,7 @@ "indices.get_mapping-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -108621,7 +109175,7 @@ "indices.get_settings-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -108826,7 +109380,7 @@ "indices.put_mapping-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -108897,7 +109451,7 @@ "indices.put_settings-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match. If the request can target\ndata streams, this argument determines whether wildcard expressions match\nhidden data streams. Supports comma-separated values, such as\n`open,hidden`.", + "description": "Type of index that wildcard patterns can match. If the request can target\ndata streams, this argument determines whether wildcard expressions match\nhidden data streams. Supports comma-separated values, such as\n`open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -109070,7 +109624,7 @@ "indices.refresh-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -109111,7 +109665,7 @@ "indices.reload_search_analyzers-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.", + "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -109162,7 +109716,7 @@ "indices.resolve_cluster-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\nNOTE: This option is only supported when specifying an index expression. You will get an error if you specify index\noptions to the `_resolve/cluster` API endpoint that takes no index expression.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\nNOTE: This option is only supported when specifying an index expression. You will get an error if you specify index\noptions to the `_resolve/cluster` API endpoint that takes no index expression.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -109295,7 +109849,7 @@ "indices.segments-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -109336,7 +109890,7 @@ "indices.shard_stores-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match. If the request can target data streams,\nthis argument determines whether wildcard expressions match hidden data streams.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams,\nthis argument determines whether wildcard expressions match hidden data streams.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -109356,7 +109910,7 @@ "indices.shard_stores-status": { "in": "query", "name": "status", - "description": "List of shard health statuses used to limit the request.", + "description": "List of shard health statuses used to limit the request.\n\nSupported values include:\n - `green`: The primary shard and all replica shards are assigned.\n - `yellow`: One or more replica shards are unassigned.\n - `red`: The primary shard is unassigned.\n - `all`: Return all shards, regardless of health status.\n\n", "deprecated": false, "schema": { "oneOf": [ @@ -109563,7 +110117,7 @@ "indices.stats-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\nsuch as `open,hidden`.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\nsuch as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -109724,7 +110278,7 @@ "indices.validate_query-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -111101,7 +111655,7 @@ "ml.get_trained_models-include": { "in": "query", "name": "include", - "description": "A comma delimited string of optional fields to include in the response\nbody.", + "description": "A comma delimited string of optional fields to include in the response\nbody.\n\nSupported values include:\n - `definition`: Includes the model definition.\n - `feature_importance_baseline`: Includes the baseline for feature importance values.\n - `hyperparameters`: Includes the information about hyperparameters used to train the model.\nThis information consists of the value, the absolute and relative\nimportance of the hyperparameter as well as an indicator of whether it was\nspecified by the user or tuned during hyperparameter optimization.\n - `total_feature_importance`: Includes the total feature importance for the training data set. The\nbaseline and total feature importance values are returned in the metadata\nfield in the response body.\n - `definition_status`: Includes the model definition status.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/ml._types.Include" @@ -111255,7 +111809,7 @@ "msearch-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.", + "description": "Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -111345,7 +111899,7 @@ "msearch-search_type": { "in": "query", "name": "search_type", - "description": "Indicates whether global term and document frequencies should be used when scoring returned documents.", + "description": "Indicates whether global term and document frequencies should be used when scoring returned documents.\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -111396,7 +111950,7 @@ "msearch_template-search_type": { "in": "query", "name": "search_type", - "description": "The type of the search operation.", + "description": "The type of the search operation.\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -111550,7 +112104,7 @@ "mtermvectors-version_type": { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -111935,7 +112489,7 @@ "rank_eval-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.", + "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -112171,7 +112725,7 @@ "search-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -112291,7 +112845,7 @@ "search-search_type": { "in": "query", "name": "search_type", - "description": "Indicates how distributed term frequencies are calculated for relevance scoring.", + "description": "Indicates how distributed term frequencies are calculated for relevance scoring.\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -112334,7 +112888,7 @@ "search-suggest_mode": { "in": "query", "name": "suggest_mode", - "description": "The suggest mode.\nThis parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified.", + "description": "The suggest mode.\nThis parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified.\n\nSupported values include:\n - `missing`: Only generate suggestions for terms that are not in the shard.\n - `popular`: Only suggest terms that occur in more docs on the shard than the original term.\n - `always`: Suggest any matching suggestions based on terms in the suggest text.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SuggestMode" @@ -112705,7 +113259,7 @@ "search_shards-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -112796,7 +113350,7 @@ "search_template-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -112876,7 +113430,7 @@ "search_template-search_type": { "in": "query", "name": "search_type", - "description": "The type of the search operation.", + "description": "The type of the search operation.\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -112937,7 +113491,7 @@ "searchable_snapshots.clear_cache-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.", + "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -113864,7 +114418,7 @@ "termvectors-version_type": { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -114182,7 +114736,7 @@ "watcher.stats-metric": { "in": "path", "name": "metric", - "description": "Defines which additional metrics are included in the response.", + "description": "Defines which additional metrics are included in the response.\n\nSupported values include: `_all` (or `all`), `queued_watches`, `current_watches`, `pending_watches`\n\n", "required": true, "deprecated": false, "schema": { @@ -114213,7 +114767,7 @@ "watcher.stats-metric_": { "in": "query", "name": "metric", - "description": "Defines which additional metrics are included in the response.", + "description": "Defines which additional metrics are included in the response.\n\nSupported values include: `_all` (or `all`), `queued_watches`, `current_watches`, `pending_watches`\n\n", "deprecated": false, "schema": { "oneOf": [ diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index addec436f9..f04be6cab7 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -1824,7 +1824,7 @@ { "in": "path", "name": "target", - "description": "Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest.", + "description": "Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest.\n\nSupported values include: `_all`, `http`, `ingest`, `thread_pool`, `script`\n\n", "required": true, "deprecated": false, "schema": { @@ -2404,7 +2404,7 @@ { "in": "query", "name": "job_type", - "description": "A comma-separated list of job types to fetch the sync jobs for", + "description": "A comma-separated list of job types to fetch the sync jobs for\n\nSupported values include: `full`, `incremental`, `access_control`\n\n", "deprecated": false, "schema": { "oneOf": [ @@ -3938,7 +3938,7 @@ { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -4195,7 +4195,7 @@ { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -4355,7 +4355,7 @@ { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -4426,7 +4426,7 @@ { "in": "query", "name": "conflicts", - "description": "What to do if delete by query hits version conflicts: `abort` or `proceed`.", + "description": "What to do if delete by query hits version conflicts: `abort` or `proceed`.\n\nSupported values include:\n - `abort`: Stop reindexing if there are conflicts.\n - `proceed`: Continue reindexing even if there are conflicts.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.Conflicts" @@ -4456,7 +4456,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -4596,7 +4596,7 @@ { "in": "query", "name": "search_type", - "description": "The type of the search operation.\nAvailable options include `query_then_fetch` and `dfs_query_then_fetch`.", + "description": "The type of the search operation.\nAvailable options include `query_then_fetch` and `dfs_query_then_fetch`.\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -5717,7 +5717,7 @@ { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -5854,7 +5854,7 @@ { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -6324,7 +6324,7 @@ { "in": "path", "name": "block", - "description": "The block type to add to the index.", + "description": "The block type to add to the index.\n\nSupported values include:\n - `metadata`: Disable metadata changes, such as closing the index.\n - `read`: Disable read operations.\n - `read_only`: Disable write operations and metadata changes.\n - `write`: Disable write operations. However, metadata changes are still allowed.\n\n", "required": true, "deprecated": false, "schema": { @@ -6345,7 +6345,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -6562,7 +6562,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard expressions can match. If the request can target data streams, this argument\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\nsuch as open,hidden.", + "description": "Type of index that wildcard expressions can match. If the request can target data streams, this argument\ndetermines whether wildcard expressions match hidden data streams. Supports comma-separated values,\nsuch as open,hidden.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -6622,7 +6622,7 @@ { "in": "query", "name": "features", - "description": "Return only information on specified index features", + "description": "Return only information on specified index features\n\nSupported values include: `aliases`, `mappings`, `settings`\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/indices.get.Features" @@ -6798,7 +6798,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -6881,7 +6881,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -7057,7 +7057,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`.", + "description": "Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -7757,7 +7757,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -7838,7 +7838,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `hidden`, `open`, `closed`, `none`.", + "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `hidden`, `open`, `closed`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -8617,7 +8617,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -12771,7 +12771,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -13193,7 +13193,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are:\n\n* `all`: Match any data stream or index, including hidden ones.\n* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both.\n* `none`: Wildcard patterns are not accepted.\n* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are:\n\n* `all`: Match any data stream or index, including hidden ones.\n* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both.\n* `none`: Wildcard patterns are not accepted.\n* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -15546,7 +15546,7 @@ { "in": "query", "name": "wait_for", - "description": "Specifies the allocation status to wait for before returning.", + "description": "Specifies the allocation status to wait for before returning.\n\nSupported values include:\n - `started`: The trained model is started on at least one node.\n - `starting`: Trained model deployment is starting but it is not yet deployed on any nodes.\n - `fully_allocated`: Trained model deployment has started on all valid nodes.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/ml._types.DeploymentAllocationState" @@ -15969,7 +15969,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are:\n\n* `all`: Match any data stream or index, including hidden ones.\n* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both.\n* `none`: Wildcard patterns are not accepted.\n* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.", + "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines\nwhether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are:\n\n* `all`: Match any data stream or index, including hidden ones.\n* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both.\n* `none`: Wildcard patterns are not accepted.\n* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -17187,7 +17187,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -22943,7 +22943,7 @@ { "in": "query", "name": "conflicts", - "description": "The preferred behavior when update by query hits version conflicts: `abort` or `proceed`.", + "description": "The preferred behavior when update by query hits version conflicts: `abort` or `proceed`.\n\nSupported values include:\n - `abort`: Stop reindexing if there are conflicts.\n - `proceed`: Continue reindexing even if there are conflicts.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.Conflicts" @@ -22973,7 +22973,7 @@ { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -23123,7 +23123,7 @@ { "in": "query", "name": "search_type", - "description": "The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`.", + "description": "The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`.\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -32049,7 +32049,7 @@ "description": "Language value, such as _arabic_ or _thai_. Defaults to _english_.\nEach language value corresponds to a predefined list of stop words in Lucene. See Stop words by language for supported language values and their stop words.\nAlso accepts an array of stop words.", "oneOf": [ { - "type": "string" + "$ref": "#/components/schemas/_types.analysis.StopWordLanguage" }, { "type": "array", @@ -32059,6 +32059,48 @@ } ] }, + "_types.analysis.StopWordLanguage": { + "type": "string", + "enum": [ + "_arabic_", + "_armenian_", + "_basque_", + "_bengali_", + "_brazilian_", + "_bulgarian_", + "_catalan_", + "_cjk_", + "_czech_", + "_danish_", + "_dutch_", + "_english_", + "_estonian_", + "_finnish_", + "_french_", + "_galician_", + "_german_", + "_greek_", + "_hindi_", + "_hungarian_", + "_indonesian_", + "_irish_", + "_italian_", + "_latvian_", + "_lithuanian_", + "_norwegian_", + "_persian_", + "_portuguese_", + "_romanian_", + "_russian_", + "_serbian_", + "_sorani_", + "_spanish_", + "_swedish_", + "_thai_", + "_turkish_", + "_none_" + ] + }, "_types.query_dsl.MultiMatchQuery": { "allOf": [ { @@ -38992,6 +39034,7 @@ "$ref": "#/components/schemas/_types.Fields" }, "order": { + "description": "\n\nSupported values include: `asc` (or `ASC`), `desc` (or `DESC`)\n\n", "oneOf": [ { "$ref": "#/components/schemas/indices._types.SegmentSortOrder" @@ -39005,6 +39048,7 @@ ] }, "mode": { + "description": "\n\nSupported values include: `min` (or `MIN`), `max` (or `MAX`)\n\n", "oneOf": [ { "$ref": "#/components/schemas/indices._types.SegmentSortMode" @@ -39018,6 +39062,7 @@ ] }, "missing": { + "description": "\n\nSupported values include: `_last`, `_first`\n\n", "oneOf": [ { "$ref": "#/components/schemas/indices._types.SegmentSortMissing" @@ -39960,25 +40005,30 @@ "_types.analysis.SnowballLanguage": { "type": "string", "enum": [ + "Arabic", "Armenian", "Basque", "Catalan", "Danish", "Dutch", "English", + "Estonian", "Finnish", "French", "German", "German2", "Hungarian", "Italian", + "Irish", "Kp", + "Lithuanian", "Lovins", "Norwegian", "Porter", "Portuguese", "Romanian", "Russian", + "Serbian", "Spanish", "Swedish", "Turkish" @@ -41103,15 +41153,33 @@ "propertyName": "type" }, "oneOf": [ + { + "$ref": "#/components/schemas/_types.analysis.ApostropheTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.ArabicNormalizationTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.AsciiFoldingTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.CjkBigramTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.CjkWidthTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.ClassicTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.CommonGramsTokenFilter" }, { "$ref": "#/components/schemas/_types.analysis.ConditionTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.DecimalDigitTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.DelimitedPayloadTokenFilter" }, @@ -41124,12 +41192,24 @@ { "$ref": "#/components/schemas/_types.analysis.FingerprintTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.FlattenGraphTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.GermanNormalizationTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.HindiNormalizationTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.HunspellTokenFilter" }, { "$ref": "#/components/schemas/_types.analysis.HyphenationDecompounderTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.IndicNormalizationTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.KeepTypesTokenFilter" }, @@ -41139,6 +41219,9 @@ { "$ref": "#/components/schemas/_types.analysis.KeywordMarkerTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.KeywordRepeatTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.KStemTokenFilter" }, @@ -41151,6 +41234,9 @@ { "$ref": "#/components/schemas/_types.analysis.LowercaseTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.MinHashTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.MultiplexerTokenFilter" }, @@ -41166,6 +41252,9 @@ { "$ref": "#/components/schemas/_types.analysis.PatternReplaceTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.PersianNormalizationTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.PorterStemTokenFilter" }, @@ -41178,12 +41267,24 @@ { "$ref": "#/components/schemas/_types.analysis.ReverseTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.ScandinavianFoldingTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.ScandinavianNormalizationTokenFilter" + }, + { + "$ref": "#/components/schemas/_types.analysis.SerbianNormalizationTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.ShingleTokenFilter" }, { "$ref": "#/components/schemas/_types.analysis.SnowballTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.SoraniNormalizationTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.StemmerOverrideTokenFilter" }, @@ -41217,6 +41318,9 @@ { "$ref": "#/components/schemas/_types.analysis.WordDelimiterTokenFilter" }, + { + "$ref": "#/components/schemas/_types.analysis.JaStopTokenFilter" + }, { "$ref": "#/components/schemas/_types.analysis.KuromojiStemmerTokenFilter" }, @@ -41246,7 +41350,7 @@ } ] }, - "_types.analysis.AsciiFoldingTokenFilter": { + "_types.analysis.ApostropheTokenFilter": { "allOf": [ { "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" @@ -41257,11 +41361,8 @@ "type": { "type": "string", "enum": [ - "asciifolding" + "apostrophe" ] - }, - "preserve_original": { - "$ref": "#/components/schemas/_spec_utils.Stringifiedboolean" } }, "required": [ @@ -41278,6 +41379,134 @@ } } }, + "_types.analysis.ArabicNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "arabic_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.AsciiFoldingTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "asciifolding" + ] + }, + "preserve_original": { + "$ref": "#/components/schemas/_spec_utils.Stringifiedboolean" + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.CjkBigramTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "cjk_bigram" + ] + }, + "ignored_scripts": { + "description": "Array of character scripts for which to disable bigrams.", + "type": "array", + "items": { + "$ref": "#/components/schemas/_types.analysis.CjkBigramIgnoredScript" + } + }, + "output_unigrams": { + "description": "If `true`, emit tokens in both bigram and unigram form. If `false`, a CJK character is output in unigram form when it has no adjacent characters. Defaults to `false`.", + "type": "boolean" + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.CjkBigramIgnoredScript": { + "type": "string", + "enum": [ + "han", + "hangul", + "hiragana", + "katakana" + ] + }, + "_types.analysis.CjkWidthTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "cjk_width" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.ClassicTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "classic" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.CommonGramsTokenFilter": { "allOf": [ { @@ -41293,18 +41522,22 @@ ] }, "common_words": { + "description": "A list of tokens. The filter generates bigrams for these tokens.\nEither this or the `common_words_path` parameter is required.", "type": "array", "items": { "type": "string" } }, "common_words_path": { + "description": "Path to a file containing a list of tokens. The filter generates bigrams for these tokens.\nThis path must be absolute or relative to the `config` location. The file must be UTF-8 encoded. Each token in the file must be separated by a line break.\nEither this or the `common_words` parameter is required.", "type": "string" }, "ignore_case": { + "description": "If `true`, matches for common words matching are case-insensitive. Defaults to `false`.", "type": "boolean" }, "query_mode": { + "description": "If `true`, the filter excludes the following tokens from the output:\n- Unigrams for common words\n- Unigrams for terms followed by common words\nDefaults to `false`. We recommend enabling this parameter for search analyzers.", "type": "boolean" } }, @@ -41329,6 +41562,7 @@ ] }, "filter": { + "description": "Array of token filters. If a token matches the predicate script in the `script` parameter, these filters are applied to the token in the order provided.", "type": "array", "items": { "type": "string" @@ -41346,6 +41580,27 @@ } ] }, + "_types.analysis.DecimalDigitTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "decimal_digit" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.DelimitedPayloadTokenFilter": { "allOf": [ { @@ -41361,6 +41616,7 @@ ] }, "delimiter": { + "description": "Character used to separate tokens from payloads. Defaults to `|`.", "type": "string" }, "encoding": { @@ -41396,9 +41652,11 @@ ] }, "max_gram": { + "description": "Maximum character length of a gram. For custom token filters, defaults to `2`. For the built-in edge_ngram filter, defaults to `1`.", "type": "number" }, "min_gram": { + "description": "Minimum character length of a gram. Defaults to `1`.", "type": "number" }, "side": { @@ -41436,12 +41694,14 @@ ] }, "articles": { + "description": "List of elisions to remove.\nTo be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed.\nFor custom `elision` filters, either this parameter or `articles_path` must be specified.", "type": "array", "items": { "type": "string" } }, "articles_path": { + "description": "Path to a file that contains a list of elisions to remove.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each elision in the file must be separated by a line break.\nTo be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed.\nFor custom `elision` filters, either this parameter or `articles` must be specified.", "type": "string" }, "articles_case": { @@ -41469,9 +41729,11 @@ ] }, "max_output_size": { + "description": "Maximum character length, including whitespace, of the output token. Defaults to `255`. Concatenated tokens longer than this will result in no token output.", "type": "number" }, "separator": { + "description": "Character to use to concatenate the token stream input. Defaults to a space.", "type": "string" } }, @@ -41481,6 +41743,69 @@ } ] }, + "_types.analysis.FlattenGraphTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "flatten_graph" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.GermanNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "german_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.HindiNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "hindi_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.HunspellTokenFilter": { "allOf": [ { @@ -41496,15 +41821,19 @@ ] }, "dedup": { + "description": "If `true`, duplicate tokens are removed from the filter’s output. Defaults to `true`.", "type": "boolean" }, "dictionary": { + "description": "One or more `.dic` files (e.g, `en_US.dic`, my_custom.dic) to use for the Hunspell dictionary.\nBy default, the `hunspell` filter uses all `.dic` files in the `<$ES_PATH_CONF>/hunspell/` directory specified using the `lang`, `language`, or `locale` parameter.", "type": "string" }, "locale": { + "description": "Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary.", "type": "string" }, "longest_only": { + "description": "If `true`, only the longest stemmed version of each token is included in the output. If `false`, all stemmed versions of the token are included. Defaults to `false`.", "type": "boolean" } }, @@ -41528,10 +41857,23 @@ "enum": [ "hyphenation_decompounder" ] + }, + "hyphenation_patterns_path": { + "description": "Path to an Apache FOP (Formatting Objects Processor) XML hyphenation pattern file.\nThis path must be absolute or relative to the `config` location. Only FOP v1.2 compatible files are supported.", + "type": "string" + }, + "no_sub_matches": { + "description": "If `true`, do not match sub tokens in tokens that are in the word list. Defaults to `false`.", + "type": "boolean" + }, + "no_overlapping_matches": { + "description": "If `true`, do not allow overlapping tokens. Defaults to `false`.", + "type": "boolean" } }, "required": [ - "type" + "type", + "hyphenation_patterns_path" ] } ] @@ -41544,34 +41886,58 @@ { "type": "object", "properties": { - "hyphenation_patterns_path": { - "type": "string" - }, "max_subword_size": { + "description": "Maximum subword character length. Longer subword tokens are excluded from the output. Defaults to `15`.", "type": "number" }, "min_subword_size": { + "description": "Minimum subword character length. Shorter subword tokens are excluded from the output. Defaults to `2`.", "type": "number" }, "min_word_size": { + "description": "Minimum word character length. Shorter word tokens are excluded from the output. Defaults to `5`.", "type": "number" }, "only_longest_match": { + "description": "If `true`, only include the longest matching subword. Defaults to `false`.", "type": "boolean" }, "word_list": { + "description": "A list of subwords to look for in the token stream. If found, the subword is included in the token output.\nEither this parameter or `word_list_path` must be specified.", "type": "array", "items": { "type": "string" } }, "word_list_path": { + "description": "Path to a file that contains a list of subwords to find in the token stream. If found, the subword is included in the token output.\nThis path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break.\nEither this parameter or `word_list` must be specified.", "type": "string" } } } ] }, + "_types.analysis.IndicNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "indic_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.KeepTypesTokenFilter": { "allOf": [ { @@ -41590,6 +41956,7 @@ "$ref": "#/components/schemas/_types.analysis.KeepTypesMode" }, "types": { + "description": "List of token types to keep or remove.", "type": "array", "items": { "type": "string" @@ -41597,7 +41964,8 @@ } }, "required": [ - "type" + "type", + "types" ] } ] @@ -41624,15 +41992,18 @@ ] }, "keep_words": { + "description": "List of words to keep. Only tokens that match words in this list are included in the output.\nEither this parameter or `keep_words_path` must be specified.", "type": "array", "items": { "type": "string" } }, "keep_words_case": { + "description": "If `true`, lowercase all keep words. Defaults to `false`.", "type": "boolean" }, "keep_words_path": { + "description": "Path to a file that contains a list of words to keep. Only tokens that match words in this list are included in the output.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break.\nEither this parameter or `keep_words` must be specified.", "type": "string" } }, @@ -41657,9 +42028,11 @@ ] }, "ignore_case": { + "description": "If `true`, matching for the `keywords` and `keywords_path` parameters ignores letter case. Defaults to `false`.", "type": "boolean" }, "keywords": { + "description": "Array of keywords. Tokens that match these keywords are not stemmed.\nThis parameter, `keywords_path`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`.", "oneOf": [ { "type": "string" @@ -41673,9 +42046,11 @@ ] }, "keywords_path": { + "description": "Path to a file that contains a list of keywords. Tokens that match these keywords are not stemmed.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break.\nThis parameter, `keywords`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`.", "type": "string" }, "keywords_pattern": { + "description": "Java regular expression used to match tokens. Tokens that match this expression are marked as keywords and not stemmed.\nThis parameter, `keywords`, or `keywords_path` must be specified. You cannot specify this parameter and `keywords` or `keywords_pattern`.", "type": "string" } }, @@ -41685,6 +42060,27 @@ } ] }, + "_types.analysis.KeywordRepeatTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "keyword_repeat" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.KStemTokenFilter": { "allOf": [ { @@ -41721,9 +42117,11 @@ ] }, "max": { + "description": "Maximum character length of a token. Longer tokens are excluded from the output. Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`.", "type": "number" }, "min": { + "description": "Minimum character length of a token. Shorter tokens are excluded from the output. Defaults to `0`.", "type": "number" } }, @@ -41748,6 +42146,7 @@ ] }, "consume_all_tokens": { + "description": "If `true`, the limit filter exhausts the token stream, even if the `max_token_count` has already been reached. Defaults to `false`.", "type": "boolean" }, "max_token_count": { @@ -41775,7 +42174,52 @@ ] }, "language": { - "type": "string" + "$ref": "#/components/schemas/_types.analysis.LowercaseTokenFilterLanguages" + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.LowercaseTokenFilterLanguages": { + "type": "string", + "enum": [ + "greek", + "irish", + "turkish" + ] + }, + "_types.analysis.MinHashTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "min_hash" + ] + }, + "bucket_count": { + "description": "Number of buckets to which hashes are assigned. Defaults to `512`.", + "type": "number" + }, + "hash_count": { + "description": "Number of ways to hash each token in the stream. Defaults to `1`.", + "type": "number" + }, + "hash_set_size": { + "description": "Number of hashes to keep from each bucket. Defaults to `1`.\nHashes are retained by ascending size, starting with the bucket’s smallest hash first.", + "type": "number" + }, + "with_rotation": { + "description": "If `true`, the filter fills empty buckets with the value of the first non-empty bucket to its circular right if the `hash_set_size` is `1`. If the `bucket_count` argument is greater than 1, this parameter defaults to `true`. Otherwise, this parameter defaults to `false`.", + "type": "boolean" } }, "required": [ @@ -41799,6 +42243,7 @@ ] }, "filters": { + "description": "A list of token filters to apply to incoming tokens.", "type": "array", "items": { "type": "string" @@ -41830,9 +42275,11 @@ ] }, "max_gram": { + "description": "Maximum length of characters in a gram. Defaults to `2`.", "type": "number" }, "min_gram": { + "description": "Minimum length of characters in a gram. Defaults to `1`.", "type": "number" }, "preserve_original": { @@ -41860,6 +42307,7 @@ ] }, "stoptags": { + "description": "An array of part-of-speech tags that should be removed.", "type": "array", "items": { "type": "string" @@ -41887,6 +42335,7 @@ ] }, "patterns": { + "description": "A list of regular expressions to match.", "type": "array", "items": { "type": "string" @@ -41918,15 +42367,15 @@ ] }, "all": { + "description": "If `true`, all substrings matching the pattern parameter’s regular expression are replaced. If `false`, the filter replaces only the first matching substring in each token. Defaults to `true`.", "type": "boolean" }, - "flags": { - "type": "string" - }, "pattern": { + "description": "Regular expression, written in Java’s regular expression syntax. The filter replaces token substrings matching this pattern with the substring in the `replacement` parameter.", "type": "string" }, "replacement": { + "description": "Replacement substring. Defaults to an empty substring (`\"\"`).", "type": "string" } }, @@ -41937,6 +42386,27 @@ } ] }, + "_types.analysis.PersianNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "persian_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.PorterStemTokenFilter": { "allOf": [ { @@ -42025,6 +42495,69 @@ } ] }, + "_types.analysis.ScandinavianFoldingTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "scandinavian_folding" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.ScandinavianNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "scandinavian_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.SerbianNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "serbian_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.ShingleTokenFilter": { "allOf": [ { @@ -42040,35 +42573,25 @@ ] }, "filler_token": { + "description": "String used in shingles as a replacement for empty positions that do not contain a token. This filler token is only used in shingles, not original unigrams. Defaults to an underscore (`_`).", "type": "string" }, "max_shingle_size": { - "oneOf": [ - { - "type": "number" - }, - { - "type": "string" - } - ] + "$ref": "#/components/schemas/_spec_utils.Stringifiedinteger" }, "min_shingle_size": { - "oneOf": [ - { - "type": "number" - }, - { - "type": "string" - } - ] + "$ref": "#/components/schemas/_spec_utils.Stringifiedinteger" }, "output_unigrams": { + "description": "If `true`, the output includes the original input tokens. If `false`, the output only includes shingles; the original input tokens are removed. Defaults to `true`.", "type": "boolean" }, "output_unigrams_if_no_shingles": { + "description": "If `true`, the output includes the original input tokens only if no shingles are produced; if shingles are produced, the output only includes shingles. Defaults to `false`.", "type": "boolean" }, "token_separator": { + "description": "Separator used to concatenate adjacent tokens to form a shingle. Defaults to a space (`\" \"`).", "type": "string" } }, @@ -42102,6 +42625,27 @@ } ] }, + "_types.analysis.SoraniNormalizationTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "sorani_normalization" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "_types.analysis.StemmerOverrideTokenFilter": { "allOf": [ { @@ -42117,12 +42661,14 @@ ] }, "rules": { + "description": "A list of mapping rules to use.", "type": "array", "items": { "type": "string" } }, "rules_path": { + "description": "A path (either relative to `config` location, or absolute) to a list of mappings.", "type": "string" } }, @@ -42171,15 +42717,18 @@ ] }, "ignore_case": { + "description": "If `true`, stop word matching is case insensitive. For example, if `true`, a stop word of the matches and removes `The`, `THE`, or `the`. Defaults to `false`.", "type": "boolean" }, "remove_trailing": { + "description": "If `true`, the last token of a stream is removed if it’s a stop word. Defaults to `true`.", "type": "boolean" }, "stopwords": { "$ref": "#/components/schemas/_types.analysis.StopWords" }, "stopwords_path": { + "description": "Path to a file that contains a list of stop words to remove.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each stop word in the file must be separated by a line break.", "type": "string" } }, @@ -42192,7 +42741,7 @@ "_types.analysis.SynonymGraphTokenFilter": { "allOf": [ { - "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + "$ref": "#/components/schemas/_types.analysis.SynonymTokenFilterBase" }, { "type": "object", @@ -42202,38 +42751,58 @@ "enum": [ "synonym_graph" ] - }, + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.SynonymTokenFilterBase": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { "expand": { + "description": "Expands definitions for equivalent synonym rules. Defaults to `true`.", "type": "boolean" }, "format": { "$ref": "#/components/schemas/_types.analysis.SynonymFormat" }, "lenient": { + "description": "If `true` ignores errors while parsing the synonym rules. It is important to note that only those synonym rules which cannot get parsed are ignored. Defaults to the value of the `updateable` setting.", "type": "boolean" }, "synonyms": { + "description": "Used to define inline synonyms.", "type": "array", "items": { "type": "string" } }, "synonyms_path": { + "description": "Used to provide a synonym file. This path must be absolute or relative to the `config` location.", "type": "string" }, "synonyms_set": { + "description": "Provide a synonym set created via Synonyms Management APIs.", "type": "string" }, "tokenizer": { + "deprecated": true, + "description": "Controls the tokenizers that will be used to tokenize the synonym, this parameter is for backwards compatibility for indices that created before 6.0.", "type": "string" }, "updateable": { + "description": "If `true` allows reloading search analyzers to pick up changes to synonym files. Only to be used for search analyzers. Defaults to `false`.", "type": "boolean" } - }, - "required": [ - "type" - ] + } } ] }, @@ -42247,7 +42816,7 @@ "_types.analysis.SynonymTokenFilter": { "allOf": [ { - "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + "$ref": "#/components/schemas/_types.analysis.SynonymTokenFilterBase" }, { "type": "object", @@ -42257,33 +42826,6 @@ "enum": [ "synonym" ] - }, - "expand": { - "type": "boolean" - }, - "format": { - "$ref": "#/components/schemas/_types.analysis.SynonymFormat" - }, - "lenient": { - "type": "boolean" - }, - "synonyms": { - "type": "array", - "items": { - "type": "string" - } - }, - "synonyms_path": { - "type": "string" - }, - "synonyms_set": { - "type": "string" - }, - "tokenizer": { - "type": "string" - }, - "updateable": { - "type": "boolean" } }, "required": [ @@ -42328,6 +42870,7 @@ ] }, "length": { + "description": "Character limit for each token. Tokens exceeding this limit are truncated. Defaults to `10`.", "type": "number" } }, @@ -42352,6 +42895,7 @@ ] }, "only_on_same_position": { + "description": "If `true`, only remove duplicate tokens in the same position. Defaults to `false`.", "type": "boolean" } }, @@ -42385,7 +42929,7 @@ "_types.analysis.WordDelimiterGraphTokenFilter": { "allOf": [ { - "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + "$ref": "#/components/schemas/_types.analysis.WordDelimiterTokenFilterBase" }, { "type": "object", @@ -42397,56 +42941,103 @@ ] }, "adjust_offsets": { + "description": "If `true`, the filter adjusts the offsets of split or catenated tokens to better reflect their actual position in the token stream. Defaults to `true`.", "type": "boolean" }, + "ignore_keywords": { + "description": "If `true`, the filter skips tokens with a keyword attribute of true. Defaults to `false`.", + "type": "boolean" + } + }, + "required": [ + "type" + ] + } + ] + }, + "_types.analysis.WordDelimiterTokenFilterBase": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" + }, + { + "type": "object", + "properties": { "catenate_all": { + "description": "If `true`, the filter produces catenated tokens for chains of alphanumeric characters separated by non-alphabetic delimiters. Defaults to `false`.", "type": "boolean" }, "catenate_numbers": { + "description": "If `true`, the filter produces catenated tokens for chains of numeric characters separated by non-alphabetic delimiters. Defaults to `false`.", "type": "boolean" }, "catenate_words": { + "description": "If `true`, the filter produces catenated tokens for chains of alphabetical characters separated by non-alphabetic delimiters. Defaults to `false`.", "type": "boolean" }, "generate_number_parts": { + "description": "If `true`, the filter includes tokens consisting of only numeric characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`.", "type": "boolean" }, "generate_word_parts": { - "type": "boolean" - }, - "ignore_keywords": { + "description": "If `true`, the filter includes tokens consisting of only alphabetical characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`.", "type": "boolean" }, "preserve_original": { "$ref": "#/components/schemas/_spec_utils.Stringifiedboolean" }, "protected_words": { + "description": "Array of tokens the filter won’t split.", "type": "array", "items": { "type": "string" } }, "protected_words_path": { + "description": "Path to a file that contains a list of tokens the filter won’t split.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break.", "type": "string" }, "split_on_case_change": { + "description": "If `true`, the filter splits tokens at letter case transitions. For example: camelCase -> [ camel, Case ]. Defaults to `true`.", "type": "boolean" }, "split_on_numerics": { + "description": "If `true`, the filter splits tokens at letter-number transitions. For example: j2se -> [ j, 2, se ]. Defaults to `true`.", "type": "boolean" }, "stem_english_possessive": { + "description": "If `true`, the filter removes the English possessive (`'s`) from the end of each token. For example: O'Neil's -> [ O, Neil ]. Defaults to `true`.", "type": "boolean" }, "type_table": { + "description": "Array of custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.", "type": "array", "items": { "type": "string" } }, "type_table_path": { + "description": "Path to a file that contains custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.", "type": "string" } + } + } + ] + }, + "_types.analysis.WordDelimiterTokenFilter": { + "allOf": [ + { + "$ref": "#/components/schemas/_types.analysis.WordDelimiterTokenFilterBase" + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "word_delimiter" + ] + } }, "required": [ "type" @@ -42454,7 +43045,7 @@ } ] }, - "_types.analysis.WordDelimiterTokenFilter": { + "_types.analysis.JaStopTokenFilter": { "allOf": [ { "$ref": "#/components/schemas/_types.analysis.TokenFilterBase" @@ -42465,53 +43056,11 @@ "type": { "type": "string", "enum": [ - "word_delimiter" + "ja_stop" ] }, - "catenate_all": { - "type": "boolean" - }, - "catenate_numbers": { - "type": "boolean" - }, - "catenate_words": { - "type": "boolean" - }, - "generate_number_parts": { - "type": "boolean" - }, - "generate_word_parts": { - "type": "boolean" - }, - "preserve_original": { - "$ref": "#/components/schemas/_spec_utils.Stringifiedboolean" - }, - "protected_words": { - "type": "array", - "items": { - "type": "string" - } - }, - "protected_words_path": { - "type": "string" - }, - "split_on_case_change": { - "type": "boolean" - }, - "split_on_numerics": { - "type": "boolean" - }, - "stem_english_possessive": { - "type": "boolean" - }, - "type_table": { - "type": "array", - "items": { - "type": "string" - } - }, - "type_table_path": { - "type": "string" + "stopwords": { + "$ref": "#/components/schemas/_types.analysis.StopWords" } }, "required": [ @@ -42789,6 +43338,7 @@ "$ref": "#/components/schemas/_types.analysis.PhoneticEncoder" }, "languageset": { + "description": "\n\nSupported values include: `any`, `common`, `cyrillic`, `english`, `french`, `german`, `hebrew`, `hungarian`, `polish`, `romanian`, `russian`, `spanish`\n\n", "oneOf": [ { "$ref": "#/components/schemas/_types.analysis.PhoneticLanguage" @@ -53612,7 +54162,7 @@ "type": "object", "properties": { "actions": { - "description": "The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined.", + "description": "The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined.\n\nSupported values include:\n - `skip_result`: The result will not be created. Unless you also specify `skip_model_update`, the model will be updated as usual with the corresponding series value.\n - `skip_model_update`: The value for that series will not be used to update the model. Unless you also specify `skip_result`, the results will be created as usual. This action is suitable when certain values are expected to be consistently anomalous and they affect the model in a way that negatively impacts the rest of the results.\n\n", "type": "array", "items": { "$ref": "#/components/schemas/ml._types.RuleAction" @@ -62824,7 +63374,7 @@ "async_search.submit-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.", + "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -62914,7 +63464,7 @@ "async_search.submit-search_type": { "in": "query", "name": "search_type", - "description": "Search operation type", + "description": "Search operation type\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -62957,7 +63507,7 @@ "async_search.submit-suggest_mode": { "in": "query", "name": "suggest_mode", - "description": "Specify suggest mode", + "description": "Specify suggest mode\n\nSupported values include:\n - `missing`: Only generate suggestions for terms that are not in the shard.\n - `popular`: Only suggest terms that occur in more docs on the shard than the original term.\n - `always`: Suggest any matching suggestions based on terms in the suggest text.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SuggestMode" @@ -63309,7 +63859,7 @@ "cat.aliases-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -63432,7 +63982,7 @@ "cat.indices-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.", + "description": "The type of index that wildcard patterns can match.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -63442,7 +63992,7 @@ "cat.indices-health": { "in": "query", "name": "health", - "description": "The health status used to limit returned indices. By default, the response includes indices of any health status.", + "description": "The health status used to limit returned indices. By default, the response includes indices of any health status.\n\nSupported values include:\n - `green` (or `GREEN`): All shards are assigned.\n - `yellow` (or `YELLOW`): All primary shards are assigned, but one or more replica shards are unassigned. If a node in the cluster fails, some data could be unavailable until that node is repaired.\n - `red` (or `RED`): One or more primary shards are unassigned, so some data is unavailable. This can occur briefly during cluster startup as primary shards are assigned.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.HealthStatus" @@ -63543,7 +64093,7 @@ "cat.ml_data_frame_analytics-h": { "in": "query", "name": "h", - "description": "Comma-separated list of column names to display.", + "description": "Comma-separated list of column names to display.\n\nSupported values include:\n - `assignment_explanation` (or `ae`): Contains messages relating to the selection of a node.\n - `create_time` (or `ct`, `createTime`): The time when the data frame analytics job was created.\n - `description` (or `d`): A description of a job.\n - `dest_index` (or `di`, `destIndex`): Name of the destination index.\n - `failure_reason` (or `fr`, `failureReason`): Contains messages about the reason why a data frame analytics job failed.\n - `id`: Identifier for the data frame analytics job.\n - `model_memory_limit` (or `mml`, `modelMemoryLimit`): The approximate maximum amount of memory resources that are permitted for\nthe data frame analytics job.\n - `node.address` (or `na`, `nodeAddress`): The network address of the node that the data frame analytics job is\nassigned to.\n - `node.ephemeral_id` (or `ne`, `nodeEphemeralId`): The ephemeral ID of the node that the data frame analytics job is assigned\nto.\n - `node.id` (or `ni`, `nodeId`): The unique identifier of the node that the data frame analytics job is\nassigned to.\n - `node.name` (or `nn`, `nodeName`): The name of the node that the data frame analytics job is assigned to.\n - `progress` (or `p`): The progress report of the data frame analytics job by phase.\n - `source_index` (or `si`, `sourceIndex`): Name of the source index.\n - `state` (or `s`): Current state of the data frame analytics job.\n - `type` (or `t`): The type of analysis that the data frame analytics job performs.\n - `version` (or `v`): The Elasticsearch version number in which the data frame analytics job was\ncreated.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatDfaColumns" @@ -63553,7 +64103,7 @@ "cat.ml_data_frame_analytics-s": { "in": "query", "name": "s", - "description": "Comma-separated list of column names or column aliases used to sort the\nresponse.", + "description": "Comma-separated list of column names or column aliases used to sort the\nresponse.\n\nSupported values include:\n - `assignment_explanation` (or `ae`): Contains messages relating to the selection of a node.\n - `create_time` (or `ct`, `createTime`): The time when the data frame analytics job was created.\n - `description` (or `d`): A description of a job.\n - `dest_index` (or `di`, `destIndex`): Name of the destination index.\n - `failure_reason` (or `fr`, `failureReason`): Contains messages about the reason why a data frame analytics job failed.\n - `id`: Identifier for the data frame analytics job.\n - `model_memory_limit` (or `mml`, `modelMemoryLimit`): The approximate maximum amount of memory resources that are permitted for\nthe data frame analytics job.\n - `node.address` (or `na`, `nodeAddress`): The network address of the node that the data frame analytics job is\nassigned to.\n - `node.ephemeral_id` (or `ne`, `nodeEphemeralId`): The ephemeral ID of the node that the data frame analytics job is assigned\nto.\n - `node.id` (or `ni`, `nodeId`): The unique identifier of the node that the data frame analytics job is\nassigned to.\n - `node.name` (or `nn`, `nodeName`): The name of the node that the data frame analytics job is assigned to.\n - `progress` (or `p`): The progress report of the data frame analytics job by phase.\n - `source_index` (or `si`, `sourceIndex`): Name of the source index.\n - `state` (or `s`): Current state of the data frame analytics job.\n - `type` (or `t`): The type of analysis that the data frame analytics job performs.\n - `version` (or `v`): The Elasticsearch version number in which the data frame analytics job was\ncreated.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatDfaColumns" @@ -63594,7 +64144,7 @@ "cat.ml_datafeeds-h": { "in": "query", "name": "h", - "description": "Comma-separated list of column names to display.", + "description": "Comma-separated list of column names to display.\n\nSupported values include:\n - `ae` (or `assignment_explanation`): For started datafeeds only, contains messages relating to the selection of\na node.\n - `bc` (or `buckets.count`, `bucketsCount`): The number of buckets processed.\n - `id`: A numerical character string that uniquely identifies the datafeed.\n - `na` (or `node.address`, `nodeAddress`): For started datafeeds only, the network address of the node where the\ndatafeed is started.\n - `ne` (or `node.ephemeral_id`, `nodeEphemeralId`): For started datafeeds only, the ephemeral ID of the node where the\ndatafeed is started.\n - `ni` (or `node.id`, `nodeId`): For started datafeeds only, the unique identifier of the node where the\ndatafeed is started.\n - `nn` (or `node.name`, `nodeName`): For started datafeeds only, the name of the node where the datafeed is\nstarted.\n - `sba` (or `search.bucket_avg`, `searchBucketAvg`): The average search time per bucket, in milliseconds.\n - `sc` (or `search.count`, `searchCount`): The number of searches run by the datafeed.\n - `seah` (or `search.exp_avg_hour`, `searchExpAvgHour`): The exponential average search time per hour, in milliseconds.\n - `st` (or `search.time`, `searchTime`): The total time the datafeed spent searching, in milliseconds.\n - `s` (or `state`): The status of the datafeed: `starting`, `started`, `stopping`, or `stopped`.\nIf `starting`, the datafeed has been requested to start but has not yet\nstarted. If `started`, the datafeed is actively receiving data. If\n`stopping`, the datafeed has been requested to stop gracefully and is\ncompleting its final action. If `stopped`, the datafeed is stopped and will\nnot receive data until it is re-started.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatDatafeedColumns" @@ -63604,7 +64154,7 @@ "cat.ml_datafeeds-s": { "in": "query", "name": "s", - "description": "Comma-separated list of column names or column aliases used to sort the response.", + "description": "Comma-separated list of column names or column aliases used to sort the response.\n\nSupported values include:\n - `ae` (or `assignment_explanation`): For started datafeeds only, contains messages relating to the selection of\na node.\n - `bc` (or `buckets.count`, `bucketsCount`): The number of buckets processed.\n - `id`: A numerical character string that uniquely identifies the datafeed.\n - `na` (or `node.address`, `nodeAddress`): For started datafeeds only, the network address of the node where the\ndatafeed is started.\n - `ne` (or `node.ephemeral_id`, `nodeEphemeralId`): For started datafeeds only, the ephemeral ID of the node where the\ndatafeed is started.\n - `ni` (or `node.id`, `nodeId`): For started datafeeds only, the unique identifier of the node where the\ndatafeed is started.\n - `nn` (or `node.name`, `nodeName`): For started datafeeds only, the name of the node where the datafeed is\nstarted.\n - `sba` (or `search.bucket_avg`, `searchBucketAvg`): The average search time per bucket, in milliseconds.\n - `sc` (or `search.count`, `searchCount`): The number of searches run by the datafeed.\n - `seah` (or `search.exp_avg_hour`, `searchExpAvgHour`): The exponential average search time per hour, in milliseconds.\n - `st` (or `search.time`, `searchTime`): The total time the datafeed spent searching, in milliseconds.\n - `s` (or `state`): The status of the datafeed: `starting`, `started`, `stopping`, or `stopped`.\nIf `starting`, the datafeed has been requested to start but has not yet\nstarted. If `started`, the datafeed is actively receiving data. If\n`stopping`, the datafeed has been requested to stop gracefully and is\ncompleting its final action. If `stopped`, the datafeed is stopped and will\nnot receive data until it is re-started.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatDatafeedColumns" @@ -63655,7 +64205,7 @@ "cat.ml_jobs-h": { "in": "query", "name": "h", - "description": "Comma-separated list of column names to display.", + "description": "Comma-separated list of column names to display.\n\nSupported values include:\n - `assignment_explanation` (or `ae`): For open anomaly detection jobs only, contains messages relating to the\nselection of a node to run the job.\n - `buckets.count` (or `bc`, `bucketsCount`): The number of bucket results produced by the job.\n - `buckets.time.exp_avg` (or `btea`, `bucketsTimeExpAvg`): Exponential moving average of all bucket processing times, in milliseconds.\n - `buckets.time.exp_avg_hour` (or `bteah`, `bucketsTimeExpAvgHour`): Exponentially-weighted moving average of bucket processing times calculated\nin a 1 hour time window, in milliseconds.\n - `buckets.time.max` (or `btmax`, `bucketsTimeMax`): Maximum among all bucket processing times, in milliseconds.\n - `buckets.time.min` (or `btmin`, `bucketsTimeMin`): Minimum among all bucket processing times, in milliseconds.\n - `buckets.time.total` (or `btt`, `bucketsTimeTotal`): Sum of all bucket processing times, in milliseconds.\n - `data.buckets` (or `db`, `dataBuckets`): The number of buckets processed.\n - `data.earliest_record` (or `der`, `dataEarliestRecord`): The timestamp of the earliest chronologically input document.\n - `data.empty_buckets` (or `deb`, `dataEmptyBuckets`): The number of buckets which did not contain any data.\n - `data.input_bytes` (or `dib`, `dataInputBytes`): The number of bytes of input data posted to the anomaly detection job.\n - `data.input_fields` (or `dif`, `dataInputFields`): The total number of fields in input documents posted to the anomaly\ndetection job. This count includes fields that are not used in the analysis.\nHowever, be aware that if you are using a datafeed, it extracts only the\nrequired fields from the documents it retrieves before posting them to the job.\n - `data.input_records` (or `dir`, `dataInputRecords`): The number of input documents posted to the anomaly detection job.\n - `data.invalid_dates` (or `did`, `dataInvalidDates`): The number of input documents with either a missing date field or a date\nthat could not be parsed.\n - `data.last` (or `dl`, `dataLast`): The timestamp at which data was last analyzed, according to server time.\n - `data.last_empty_bucket` (or `dleb`, `dataLastEmptyBucket`): The timestamp of the last bucket that did not contain any data.\n - `data.last_sparse_bucket` (or `dlsb`, `dataLastSparseBucket`): The timestamp of the last bucket that was considered sparse.\n - `data.latest_record` (or `dlr`, `dataLatestRecord`): The timestamp of the latest chronologically input document.\n - `data.missing_fields` (or `dmf`, `dataMissingFields`): The number of input documents that are missing a field that the anomaly\ndetection job is configured to analyze. Input documents with missing fields\nare still processed because it is possible that not all fields are missing.\n - `data.out_of_order_timestamps` (or `doot`, `dataOutOfOrderTimestamps`): The number of input documents that have a timestamp chronologically\npreceding the start of the current anomaly detection bucket offset by the\nlatency window. This information is applicable only when you provide data\nto the anomaly detection job by using the post data API. These out of order\ndocuments are discarded, since jobs require time series data to be in\nascending chronological order.\n - `data.processed_fields` (or `dpf`, `dataProcessedFields`): The total number of fields in all the documents that have been processed by\nthe anomaly detection job. Only fields that are specified in the detector\nconfiguration object contribute to this count. The timestamp is not\nincluded in this count.\n - `data.processed_records` (or `dpr`, `dataProcessedRecords`): The number of input documents that have been processed by the anomaly\ndetection job. This value includes documents with missing fields, since\nthey are nonetheless analyzed. If you use datafeeds and have aggregations\nin your search query, the processed record count is the number of\naggregation results processed, not the number of Elasticsearch documents.\n - `data.sparse_buckets` (or `dsb`, `dataSparseBuckets`): The number of buckets that contained few data points compared to the\nexpected number of data points.\n - `forecasts.memory.avg` (or `fmavg`, `forecastsMemoryAvg`): The average memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.memory.max` (or `fmmax`, `forecastsMemoryMax`): The maximum memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.memory.min` (or `fmmin`, `forecastsMemoryMin`): The minimum memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.memory.total` (or `fmt`, `forecastsMemoryTotal`): The total memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.records.avg` (or `fravg`, `forecastsRecordsAvg`): The average number of `m`odel_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.records.max` (or `frmax`, `forecastsRecordsMax`): The maximum number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.records.min` (or `frmin`, `forecastsRecordsMin`): The minimum number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.records.total` (or `frt`, `forecastsRecordsTotal`): The total number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.time.avg` (or `ftavg`, `forecastsTimeAvg`): The average runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.time.max` (or `ftmax`, `forecastsTimeMax`): The maximum runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.time.min` (or `ftmin`, `forecastsTimeMin`): The minimum runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.time.total` (or `ftt`, `forecastsTimeTotal`): The total runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.total` (or `ft`, `forecastsTotal`): The number of individual forecasts currently available for the job.\n - `id`: Identifier for the anomaly detection job.\n - `model.bucket_allocation_failures` (or `mbaf`, `modelBucketAllocationFailures`): The number of buckets for which new entities in incoming data were not\nprocessed due to insufficient model memory.\n - `model.by_fields` (or `mbf`, `modelByFields`): The number of by field values that were analyzed by the models. This value\nis cumulative for all detectors in the job.\n - `model.bytes` (or `mb`, `modelBytes`): The number of bytes of memory used by the models. This is the maximum value\nsince the last time the model was persisted. If the job is closed, this\nvalue indicates the latest size.\n - `model.bytes_exceeded` (or `mbe`, `modelBytesExceeded`): The number of bytes over the high limit for memory usage at the last\nallocation failure.\n - `model.categorization_status` (or `mcs`, `modelCategorizationStatus`): The status of categorization for the job: `ok` or `warn`. If `ok`,\ncategorization is performing acceptably well (or not being used at all). If\n`warn`, categorization is detecting a distribution of categories that\nsuggests the input data is inappropriate for categorization. Problems could\nbe that there is only one category, more than 90% of categories are rare,\nthe number of categories is greater than 50% of the number of categorized\ndocuments, there are no frequently matched categories, or more than 50% of\ncategories are dead.\n - `model.categorized_doc_count` (or `mcdc`, `modelCategorizedDocCount`): The number of documents that have had a field categorized.\n - `model.dead_category_count` (or `mdcc`, `modelDeadCategoryCount`): The number of categories created by categorization that will never be\nassigned again because another category’s definition makes it a superset of\nthe dead category. Dead categories are a side effect of the way\ncategorization has no prior training.\n - `model.failed_category_count` (or `mdcc`, `modelFailedCategoryCount`): The number of times that categorization wanted to create a new category but\ncouldn’t because the job had hit its model memory limit. This count does\nnot track which specific categories failed to be created. Therefore, you\ncannot use this value to determine the number of unique categories that\nwere missed.\n - `model.frequent_category_count` (or `mfcc`, `modelFrequentCategoryCount`): The number of categories that match more than 1% of categorized documents.\n - `model.log_time` (or `mlt`, `modelLogTime`): The timestamp when the model stats were gathered, according to server time.\n - `model.memory_limit` (or `mml`, `modelMemoryLimit`): The timestamp when the model stats were gathered, according to server time.\n - `model.memory_status` (or `mms`, `modelMemoryStatus`): The status of the mathematical models: `ok`, `soft_limit`, or `hard_limit`.\nIf `ok`, the models stayed below the configured value. If `soft_limit`, the\nmodels used more than 60% of the configured memory limit and older unused\nmodels will be pruned to free up space. Additionally, in categorization jobs\nno further category examples will be stored. If `hard_limit`, the models\nused more space than the configured memory limit. As a result, not all\nincoming data was processed.\n - `model.over_fields` (or `mof`, `modelOverFields`): The number of over field values that were analyzed by the models. This\nvalue is cumulative for all detectors in the job.\n - `model.partition_fields` (or `mpf`, `modelPartitionFields`): The number of partition field values that were analyzed by the models. This\nvalue is cumulative for all detectors in the job.\n - `model.rare_category_count` (or `mrcc`, `modelRareCategoryCount`): The number of categories that match just one categorized document.\n - `model.timestamp` (or `mt`, `modelTimestamp`): The timestamp of the last record when the model stats were gathered.\n - `model.total_category_count` (or `mtcc`, `modelTotalCategoryCount`): The number of categories created by categorization.\n - `node.address` (or `na`, `nodeAddress`): The network address of the node that runs the job. This information is\navailable only for open jobs.\n - `node.ephemeral_id` (or `ne`, `nodeEphemeralId`): The ephemeral ID of the node that runs the job. This information is\navailable only for open jobs.\n - `node.id` (or `ni`, `nodeId`): The unique identifier of the node that runs the job. This information is\navailable only for open jobs.\n - `node.name` (or `nn`, `nodeName`): The name of the node that runs the job. This information is available only\nfor open jobs.\n - `opened_time` (or `ot`): For open jobs only, the elapsed time for which the job has been open.\n - `state` (or `s`): The status of the anomaly detection job: `closed`, `closing`, `failed`,\n`opened`, or `opening`. If `closed`, the job finished successfully with its\nmodel state persisted. The job must be opened before it can accept further\ndata. If `closing`, the job close action is in progress and has not yet\ncompleted. A closing job cannot accept further data. If `failed`, the job\ndid not finish successfully due to an error. This situation can occur due\nto invalid input data, a fatal error occurring during the analysis, or an\nexternal interaction such as the process being killed by the Linux out of\nmemory (OOM) killer. If the job had irrevocably failed, it must be force\nclosed and then deleted. If the datafeed can be corrected, the job can be\nclosed and then re-opened. If `opened`, the job is available to receive and\nprocess data. If `opening`, the job open action is in progress and has not\nyet completed.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatAnonalyDetectorColumns" @@ -63665,7 +64215,7 @@ "cat.ml_jobs-s": { "in": "query", "name": "s", - "description": "Comma-separated list of column names or column aliases used to sort the response.", + "description": "Comma-separated list of column names or column aliases used to sort the response.\n\nSupported values include:\n - `assignment_explanation` (or `ae`): For open anomaly detection jobs only, contains messages relating to the\nselection of a node to run the job.\n - `buckets.count` (or `bc`, `bucketsCount`): The number of bucket results produced by the job.\n - `buckets.time.exp_avg` (or `btea`, `bucketsTimeExpAvg`): Exponential moving average of all bucket processing times, in milliseconds.\n - `buckets.time.exp_avg_hour` (or `bteah`, `bucketsTimeExpAvgHour`): Exponentially-weighted moving average of bucket processing times calculated\nin a 1 hour time window, in milliseconds.\n - `buckets.time.max` (or `btmax`, `bucketsTimeMax`): Maximum among all bucket processing times, in milliseconds.\n - `buckets.time.min` (or `btmin`, `bucketsTimeMin`): Minimum among all bucket processing times, in milliseconds.\n - `buckets.time.total` (or `btt`, `bucketsTimeTotal`): Sum of all bucket processing times, in milliseconds.\n - `data.buckets` (or `db`, `dataBuckets`): The number of buckets processed.\n - `data.earliest_record` (or `der`, `dataEarliestRecord`): The timestamp of the earliest chronologically input document.\n - `data.empty_buckets` (or `deb`, `dataEmptyBuckets`): The number of buckets which did not contain any data.\n - `data.input_bytes` (or `dib`, `dataInputBytes`): The number of bytes of input data posted to the anomaly detection job.\n - `data.input_fields` (or `dif`, `dataInputFields`): The total number of fields in input documents posted to the anomaly\ndetection job. This count includes fields that are not used in the analysis.\nHowever, be aware that if you are using a datafeed, it extracts only the\nrequired fields from the documents it retrieves before posting them to the job.\n - `data.input_records` (or `dir`, `dataInputRecords`): The number of input documents posted to the anomaly detection job.\n - `data.invalid_dates` (or `did`, `dataInvalidDates`): The number of input documents with either a missing date field or a date\nthat could not be parsed.\n - `data.last` (or `dl`, `dataLast`): The timestamp at which data was last analyzed, according to server time.\n - `data.last_empty_bucket` (or `dleb`, `dataLastEmptyBucket`): The timestamp of the last bucket that did not contain any data.\n - `data.last_sparse_bucket` (or `dlsb`, `dataLastSparseBucket`): The timestamp of the last bucket that was considered sparse.\n - `data.latest_record` (or `dlr`, `dataLatestRecord`): The timestamp of the latest chronologically input document.\n - `data.missing_fields` (or `dmf`, `dataMissingFields`): The number of input documents that are missing a field that the anomaly\ndetection job is configured to analyze. Input documents with missing fields\nare still processed because it is possible that not all fields are missing.\n - `data.out_of_order_timestamps` (or `doot`, `dataOutOfOrderTimestamps`): The number of input documents that have a timestamp chronologically\npreceding the start of the current anomaly detection bucket offset by the\nlatency window. This information is applicable only when you provide data\nto the anomaly detection job by using the post data API. These out of order\ndocuments are discarded, since jobs require time series data to be in\nascending chronological order.\n - `data.processed_fields` (or `dpf`, `dataProcessedFields`): The total number of fields in all the documents that have been processed by\nthe anomaly detection job. Only fields that are specified in the detector\nconfiguration object contribute to this count. The timestamp is not\nincluded in this count.\n - `data.processed_records` (or `dpr`, `dataProcessedRecords`): The number of input documents that have been processed by the anomaly\ndetection job. This value includes documents with missing fields, since\nthey are nonetheless analyzed. If you use datafeeds and have aggregations\nin your search query, the processed record count is the number of\naggregation results processed, not the number of Elasticsearch documents.\n - `data.sparse_buckets` (or `dsb`, `dataSparseBuckets`): The number of buckets that contained few data points compared to the\nexpected number of data points.\n - `forecasts.memory.avg` (or `fmavg`, `forecastsMemoryAvg`): The average memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.memory.max` (or `fmmax`, `forecastsMemoryMax`): The maximum memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.memory.min` (or `fmmin`, `forecastsMemoryMin`): The minimum memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.memory.total` (or `fmt`, `forecastsMemoryTotal`): The total memory usage in bytes for forecasts related to the anomaly\ndetection job.\n - `forecasts.records.avg` (or `fravg`, `forecastsRecordsAvg`): The average number of `m`odel_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.records.max` (or `frmax`, `forecastsRecordsMax`): The maximum number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.records.min` (or `frmin`, `forecastsRecordsMin`): The minimum number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.records.total` (or `frt`, `forecastsRecordsTotal`): The total number of `model_forecast` documents written for forecasts\nrelated to the anomaly detection job.\n - `forecasts.time.avg` (or `ftavg`, `forecastsTimeAvg`): The average runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.time.max` (or `ftmax`, `forecastsTimeMax`): The maximum runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.time.min` (or `ftmin`, `forecastsTimeMin`): The minimum runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.time.total` (or `ftt`, `forecastsTimeTotal`): The total runtime in milliseconds for forecasts related to the anomaly\ndetection job.\n - `forecasts.total` (or `ft`, `forecastsTotal`): The number of individual forecasts currently available for the job.\n - `id`: Identifier for the anomaly detection job.\n - `model.bucket_allocation_failures` (or `mbaf`, `modelBucketAllocationFailures`): The number of buckets for which new entities in incoming data were not\nprocessed due to insufficient model memory.\n - `model.by_fields` (or `mbf`, `modelByFields`): The number of by field values that were analyzed by the models. This value\nis cumulative for all detectors in the job.\n - `model.bytes` (or `mb`, `modelBytes`): The number of bytes of memory used by the models. This is the maximum value\nsince the last time the model was persisted. If the job is closed, this\nvalue indicates the latest size.\n - `model.bytes_exceeded` (or `mbe`, `modelBytesExceeded`): The number of bytes over the high limit for memory usage at the last\nallocation failure.\n - `model.categorization_status` (or `mcs`, `modelCategorizationStatus`): The status of categorization for the job: `ok` or `warn`. If `ok`,\ncategorization is performing acceptably well (or not being used at all). If\n`warn`, categorization is detecting a distribution of categories that\nsuggests the input data is inappropriate for categorization. Problems could\nbe that there is only one category, more than 90% of categories are rare,\nthe number of categories is greater than 50% of the number of categorized\ndocuments, there are no frequently matched categories, or more than 50% of\ncategories are dead.\n - `model.categorized_doc_count` (or `mcdc`, `modelCategorizedDocCount`): The number of documents that have had a field categorized.\n - `model.dead_category_count` (or `mdcc`, `modelDeadCategoryCount`): The number of categories created by categorization that will never be\nassigned again because another category’s definition makes it a superset of\nthe dead category. Dead categories are a side effect of the way\ncategorization has no prior training.\n - `model.failed_category_count` (or `mdcc`, `modelFailedCategoryCount`): The number of times that categorization wanted to create a new category but\ncouldn’t because the job had hit its model memory limit. This count does\nnot track which specific categories failed to be created. Therefore, you\ncannot use this value to determine the number of unique categories that\nwere missed.\n - `model.frequent_category_count` (or `mfcc`, `modelFrequentCategoryCount`): The number of categories that match more than 1% of categorized documents.\n - `model.log_time` (or `mlt`, `modelLogTime`): The timestamp when the model stats were gathered, according to server time.\n - `model.memory_limit` (or `mml`, `modelMemoryLimit`): The timestamp when the model stats were gathered, according to server time.\n - `model.memory_status` (or `mms`, `modelMemoryStatus`): The status of the mathematical models: `ok`, `soft_limit`, or `hard_limit`.\nIf `ok`, the models stayed below the configured value. If `soft_limit`, the\nmodels used more than 60% of the configured memory limit and older unused\nmodels will be pruned to free up space. Additionally, in categorization jobs\nno further category examples will be stored. If `hard_limit`, the models\nused more space than the configured memory limit. As a result, not all\nincoming data was processed.\n - `model.over_fields` (or `mof`, `modelOverFields`): The number of over field values that were analyzed by the models. This\nvalue is cumulative for all detectors in the job.\n - `model.partition_fields` (or `mpf`, `modelPartitionFields`): The number of partition field values that were analyzed by the models. This\nvalue is cumulative for all detectors in the job.\n - `model.rare_category_count` (or `mrcc`, `modelRareCategoryCount`): The number of categories that match just one categorized document.\n - `model.timestamp` (or `mt`, `modelTimestamp`): The timestamp of the last record when the model stats were gathered.\n - `model.total_category_count` (or `mtcc`, `modelTotalCategoryCount`): The number of categories created by categorization.\n - `node.address` (or `na`, `nodeAddress`): The network address of the node that runs the job. This information is\navailable only for open jobs.\n - `node.ephemeral_id` (or `ne`, `nodeEphemeralId`): The ephemeral ID of the node that runs the job. This information is\navailable only for open jobs.\n - `node.id` (or `ni`, `nodeId`): The unique identifier of the node that runs the job. This information is\navailable only for open jobs.\n - `node.name` (or `nn`, `nodeName`): The name of the node that runs the job. This information is available only\nfor open jobs.\n - `opened_time` (or `ot`): For open jobs only, the elapsed time for which the job has been open.\n - `state` (or `s`): The status of the anomaly detection job: `closed`, `closing`, `failed`,\n`opened`, or `opening`. If `closed`, the job finished successfully with its\nmodel state persisted. The job must be opened before it can accept further\ndata. If `closing`, the job close action is in progress and has not yet\ncompleted. A closing job cannot accept further data. If `failed`, the job\ndid not finish successfully due to an error. This situation can occur due\nto invalid input data, a fatal error occurring during the analysis, or an\nexternal interaction such as the process being killed by the Linux out of\nmemory (OOM) killer. If the job had irrevocably failed, it must be force\nclosed and then deleted. If the datafeed can be corrected, the job can be\nclosed and then re-opened. If `opened`, the job is available to receive and\nprocess data. If `opening`, the job open action is in progress and has not\nyet completed.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatAnonalyDetectorColumns" @@ -63716,7 +64266,7 @@ "cat.ml_trained_models-h": { "in": "query", "name": "h", - "description": "A comma-separated list of column names to display.", + "description": "A comma-separated list of column names to display.\n\nSupported values include:\n - `create_time` (or `ct`): The time when the trained model was created.\n - `created_by` (or `c`, `createdBy`): Information on the creator of the trained model.\n - `data_frame_analytics_id` (or `df`, `dataFrameAnalytics`, `dfid`): Identifier for the data frame analytics job that created the model. Only\ndisplayed if it is still available.\n - `description` (or `d`): The description of the trained model.\n - `heap_size` (or `hs`, `modelHeapSize`): The estimated heap size to keep the trained model in memory.\n - `id`: Identifier for the trained model.\n - `ingest.count` (or `ic`, `ingestCount`): The total number of documents that are processed by the model.\n - `ingest.current` (or `icurr`, `ingestCurrent`): The total number of document that are currently being handled by the\ntrained model.\n - `ingest.failed` (or `if`, `ingestFailed`): The total number of failed ingest attempts with the trained model.\n - `ingest.pipelines` (or `ip`, `ingestPipelines`): The total number of ingest pipelines that are referencing the trained\nmodel.\n - `ingest.time` (or `it`, `ingestTime`): The total time that is spent processing documents with the trained model.\n - `license` (or `l`): The license level of the trained model.\n - `operations` (or `o`, `modelOperations`): The estimated number of operations to use the trained model. This number\nhelps measuring the computational complexity of the model.\n - `version` (or `v`): The Elasticsearch version number in which the trained model was created.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatTrainedModelsColumns" @@ -63726,7 +64276,7 @@ "cat.ml_trained_models-s": { "in": "query", "name": "s", - "description": "A comma-separated list of column names or aliases used to sort the response.", + "description": "A comma-separated list of column names or aliases used to sort the response.\n\nSupported values include:\n - `create_time` (or `ct`): The time when the trained model was created.\n - `created_by` (or `c`, `createdBy`): Information on the creator of the trained model.\n - `data_frame_analytics_id` (or `df`, `dataFrameAnalytics`, `dfid`): Identifier for the data frame analytics job that created the model. Only\ndisplayed if it is still available.\n - `description` (or `d`): The description of the trained model.\n - `heap_size` (or `hs`, `modelHeapSize`): The estimated heap size to keep the trained model in memory.\n - `id`: Identifier for the trained model.\n - `ingest.count` (or `ic`, `ingestCount`): The total number of documents that are processed by the model.\n - `ingest.current` (or `icurr`, `ingestCurrent`): The total number of document that are currently being handled by the\ntrained model.\n - `ingest.failed` (or `if`, `ingestFailed`): The total number of failed ingest attempts with the trained model.\n - `ingest.pipelines` (or `ip`, `ingestPipelines`): The total number of ingest pipelines that are referencing the trained\nmodel.\n - `ingest.time` (or `it`, `ingestTime`): The total time that is spent processing documents with the trained model.\n - `license` (or `l`): The license level of the trained model.\n - `operations` (or `o`, `modelOperations`): The estimated number of operations to use the trained model. This number\nhelps measuring the computational complexity of the model.\n - `version` (or `v`): The Elasticsearch version number in which the trained model was created.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatTrainedModelsColumns" @@ -63797,7 +64347,7 @@ "cat.transforms-h": { "in": "query", "name": "h", - "description": "Comma-separated list of column names to display.", + "description": "Comma-separated list of column names to display.\n\nSupported values include:\n - `changes_last_detection_time` (or `cldt`): The timestamp when changes were last detected in the source indices.\n - `checkpoint` (or `cp`): The sequence number for the checkpoint.\n - `checkpoint_duration_time_exp_avg` (or `cdtea`, `checkpointTimeExpAvg`): Exponential moving average of the duration of the checkpoint, in\nmilliseconds.\n - `checkpoint_progress` (or `c`, `checkpointProgress`): The progress of the next checkpoint that is currently in progress.\n - `create_time` (or `ct`, `createTime`): The time the transform was created.\n - `delete_time` (or `dtime`): The amount of time spent deleting, in milliseconds.\n - `description` (or `d`): The description of the transform.\n - `dest_index` (or `di`, `destIndex`): The destination index for the transform. The mappings of the destination\nindex are deduced based on the source fields when possible. If alternate\nmappings are required, use the Create index API prior to starting the\ntransform.\n - `documents_deleted` (or `docd`): The number of documents that have been deleted from the destination index\ndue to the retention policy for this transform.\n - `documents_indexed` (or `doci`): The number of documents that have been indexed into the destination index\nfor the transform.\n - `docs_per_second` (or `dps`): Specifies a limit on the number of input documents per second. This setting\nthrottles the transform by adding a wait time between search requests. The\ndefault value is `null`, which disables throttling.\n - `documents_processed` (or `docp`): The number of documents that have been processed from the source index of\nthe transform.\n - `frequency` (or `f`): The interval between checks for changes in the source indices when the\ntransform is running continuously. Also determines the retry interval in\nthe event of transient failures while the transform is searching or\nindexing. The minimum value is `1s` and the maximum is `1h`. The default\nvalue is `1m`.\n - `id`: Identifier for the transform.\n - `index_failure` (or `if`): The number of indexing failures.\n - `index_time` (or `itime`): The amount of time spent indexing, in milliseconds.\n - `index_total` (or `it`): The number of index operations.\n - `indexed_documents_exp_avg` (or `idea`): Exponential moving average of the number of new documents that have been\nindexed.\n - `last_search_time` (or `lst`, `lastSearchTime`): The timestamp of the last search in the source indices. This field is only\nshown if the transform is running.\n - `max_page_search_size` (or `mpsz`): Defines the initial page size to use for the composite aggregation for each\ncheckpoint. If circuit breaker exceptions occur, the page size is\ndynamically adjusted to a lower value. The minimum value is `10` and the\nmaximum is `65,536`. The default value is `500`.\n - `pages_processed` (or `pp`): The number of search or bulk index operations processed. Documents are\nprocessed in batches instead of individually.\n - `pipeline` (or `p`): The unique identifier for an ingest pipeline.\n - `processed_documents_exp_avg` (or `pdea`): Exponential moving average of the number of documents that have been\nprocessed.\n - `processing_time` (or `pt`): The amount of time spent processing results, in milliseconds.\n - `reason` (or `r`): If a transform has a `failed` state, this property provides details about\nthe reason for the failure.\n - `search_failure` (or `sf`): The number of search failures.\n - `search_time` (or `stime`): The amount of time spent searching, in milliseconds.\n - `search_total` (or `st`): The number of search operations on the source index for the transform.\n - `source_index` (or `si`, `sourceIndex`): The source indices for the transform. It can be a single index, an index\npattern (for example, `\"my-index-*\"`), an array of indices (for example,\n`[\"my-index-000001\", \"my-index-000002\"]`), or an array of index patterns\n(for example, `[\"my-index-*\", \"my-other-index-*\"]`. For remote indices use\nthe syntax `\"remote_name:index_name\"`. If any indices are in remote\nclusters then the master node and at least one transform node must have the\n`remote_cluster_client` node role.\n - `state` (or `s`): The status of the transform, which can be one of the following values:\n\n* `aborting`: The transform is aborting.\n* `failed`: The transform failed. For more information about the failure,\ncheck the reason field.\n* `indexing`: The transform is actively processing data and creating new\ndocuments.\n* `started`: The transform is running but not actively indexing data.\n* `stopped`: The transform is stopped.\n* `stopping`: The transform is stopping.\n - `transform_type` (or `tt`): Indicates the type of transform: `batch` or `continuous`.\n - `trigger_count` (or `tc`): The number of times the transform has been triggered by the scheduler. For\nexample, the scheduler triggers the transform indexer to check for updates\nor ingest new data at an interval specified in the `frequency` property.\n - `version` (or `v`): The version of Elasticsearch that existed on the node when the transform\nwas created.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatTransformColumns" @@ -63807,7 +64357,7 @@ "cat.transforms-s": { "in": "query", "name": "s", - "description": "Comma-separated list of column names or column aliases used to sort the response.", + "description": "Comma-separated list of column names or column aliases used to sort the response.\n\nSupported values include:\n - `changes_last_detection_time` (or `cldt`): The timestamp when changes were last detected in the source indices.\n - `checkpoint` (or `cp`): The sequence number for the checkpoint.\n - `checkpoint_duration_time_exp_avg` (or `cdtea`, `checkpointTimeExpAvg`): Exponential moving average of the duration of the checkpoint, in\nmilliseconds.\n - `checkpoint_progress` (or `c`, `checkpointProgress`): The progress of the next checkpoint that is currently in progress.\n - `create_time` (or `ct`, `createTime`): The time the transform was created.\n - `delete_time` (or `dtime`): The amount of time spent deleting, in milliseconds.\n - `description` (or `d`): The description of the transform.\n - `dest_index` (or `di`, `destIndex`): The destination index for the transform. The mappings of the destination\nindex are deduced based on the source fields when possible. If alternate\nmappings are required, use the Create index API prior to starting the\ntransform.\n - `documents_deleted` (or `docd`): The number of documents that have been deleted from the destination index\ndue to the retention policy for this transform.\n - `documents_indexed` (or `doci`): The number of documents that have been indexed into the destination index\nfor the transform.\n - `docs_per_second` (or `dps`): Specifies a limit on the number of input documents per second. This setting\nthrottles the transform by adding a wait time between search requests. The\ndefault value is `null`, which disables throttling.\n - `documents_processed` (or `docp`): The number of documents that have been processed from the source index of\nthe transform.\n - `frequency` (or `f`): The interval between checks for changes in the source indices when the\ntransform is running continuously. Also determines the retry interval in\nthe event of transient failures while the transform is searching or\nindexing. The minimum value is `1s` and the maximum is `1h`. The default\nvalue is `1m`.\n - `id`: Identifier for the transform.\n - `index_failure` (or `if`): The number of indexing failures.\n - `index_time` (or `itime`): The amount of time spent indexing, in milliseconds.\n - `index_total` (or `it`): The number of index operations.\n - `indexed_documents_exp_avg` (or `idea`): Exponential moving average of the number of new documents that have been\nindexed.\n - `last_search_time` (or `lst`, `lastSearchTime`): The timestamp of the last search in the source indices. This field is only\nshown if the transform is running.\n - `max_page_search_size` (or `mpsz`): Defines the initial page size to use for the composite aggregation for each\ncheckpoint. If circuit breaker exceptions occur, the page size is\ndynamically adjusted to a lower value. The minimum value is `10` and the\nmaximum is `65,536`. The default value is `500`.\n - `pages_processed` (or `pp`): The number of search or bulk index operations processed. Documents are\nprocessed in batches instead of individually.\n - `pipeline` (or `p`): The unique identifier for an ingest pipeline.\n - `processed_documents_exp_avg` (or `pdea`): Exponential moving average of the number of documents that have been\nprocessed.\n - `processing_time` (or `pt`): The amount of time spent processing results, in milliseconds.\n - `reason` (or `r`): If a transform has a `failed` state, this property provides details about\nthe reason for the failure.\n - `search_failure` (or `sf`): The number of search failures.\n - `search_time` (or `stime`): The amount of time spent searching, in milliseconds.\n - `search_total` (or `st`): The number of search operations on the source index for the transform.\n - `source_index` (or `si`, `sourceIndex`): The source indices for the transform. It can be a single index, an index\npattern (for example, `\"my-index-*\"`), an array of indices (for example,\n`[\"my-index-000001\", \"my-index-000002\"]`), or an array of index patterns\n(for example, `[\"my-index-*\", \"my-other-index-*\"]`. For remote indices use\nthe syntax `\"remote_name:index_name\"`. If any indices are in remote\nclusters then the master node and at least one transform node must have the\n`remote_cluster_client` node role.\n - `state` (or `s`): The status of the transform, which can be one of the following values:\n\n* `aborting`: The transform is aborting.\n* `failed`: The transform failed. For more information about the failure,\ncheck the reason field.\n* `indexing`: The transform is actively processing data and creating new\ndocuments.\n* `started`: The transform is running but not actively indexing data.\n* `stopped`: The transform is stopped.\n* `stopping`: The transform is stopping.\n - `transform_type` (or `tt`): Indicates the type of transform: `batch` or `continuous`.\n - `trigger_count` (or `tc`): The number of times the transform has been triggered by the scheduler. For\nexample, the scheduler triggers the transform indexer to check for updates\nor ingest new data at an interval specified in the `frequency` property.\n - `version` (or `v`): The version of Elasticsearch that existed on the node when the transform\nwas created.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/cat._types.CatTransformColumns" @@ -64002,7 +64552,7 @@ "count-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -64144,7 +64694,7 @@ "create-op_type": { "in": "query", "name": "op_type", - "description": "Set to `create` to only index the document if it does not already exist (put if absent).\nIf a document with the specified `_id` already exists, the indexing operation will fail.\nThe behavior is the same as using the `/_create` endpoint.\nIf a document ID is specified, this paramater defaults to `index`.\nOtherwise, it defaults to `create`.\nIf the request targets a data stream, an `op_type` of `create` is required.", + "description": "Set to `create` to only index the document if it does not already exist (put if absent).\nIf a document with the specified `_id` already exists, the indexing operation will fail.\nThe behavior is the same as using the `/_create` endpoint.\nIf a document ID is specified, this paramater defaults to `index`.\nOtherwise, it defaults to `create`.\nIf the request targets a data stream, an `op_type` of `create` is required.\n\nSupported values include:\n - `index`: Overwrite any documents that already exist.\n - `create`: Only index documents that do not already exist.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.OpType" @@ -64224,7 +64774,7 @@ "create-version_type": { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -64305,6 +64855,7 @@ "eql.search-expand_wildcards": { "in": "query", "name": "expand_wildcards", + "description": "\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -64517,7 +65068,7 @@ "field_caps-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -64673,7 +65224,7 @@ "index-op_type": { "in": "query", "name": "op_type", - "description": "Set to `create` to only index the document if it does not already exist (put if absent).\nIf a document with the specified `_id` already exists, the indexing operation will fail.\nThe behavior is the same as using the `/_create` endpoint.\nIf a document ID is specified, this paramater defaults to `index`.\nOtherwise, it defaults to `create`.\nIf the request targets a data stream, an `op_type` of `create` is required.", + "description": "Set to `create` to only index the document if it does not already exist (put if absent).\nIf a document with the specified `_id` already exists, the indexing operation will fail.\nThe behavior is the same as using the `/_create` endpoint.\nIf a document ID is specified, this paramater defaults to `index`.\nOtherwise, it defaults to `create`.\nIf the request targets a data stream, an `op_type` of `create` is required.\n\nSupported values include:\n - `index`: Overwrite any documents that already exist.\n - `create`: Only index documents that do not already exist.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.OpType" @@ -64733,7 +65284,7 @@ "index-version_type": { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -64858,7 +65409,7 @@ "indices.exists_alias-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -64920,7 +65471,7 @@ "indices.get_alias-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -64961,7 +65512,7 @@ "indices.get_data_stream-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.", + "description": "Type of data stream that wildcard patterns can match.\nSupports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -65073,7 +65624,7 @@ "indices.get_mapping-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -65145,7 +65696,7 @@ "indices.get_settings-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -65309,7 +65860,7 @@ "indices.put_mapping-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -65380,7 +65931,7 @@ "indices.put_settings-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match. If the request can target\ndata streams, this argument determines whether wildcard expressions match\nhidden data streams. Supports comma-separated values, such as\n`open,hidden`.", + "description": "Type of index that wildcard patterns can match. If the request can target\ndata streams, this argument determines whether wildcard expressions match\nhidden data streams. Supports comma-separated values, such as\n`open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -65471,7 +66022,7 @@ "indices.refresh-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -65685,7 +66236,7 @@ "indices.validate_query-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -66424,7 +66975,7 @@ "ml.get_trained_models-include": { "in": "query", "name": "include", - "description": "A comma delimited string of optional fields to include in the response\nbody.", + "description": "A comma delimited string of optional fields to include in the response\nbody.\n\nSupported values include:\n - `definition`: Includes the model definition.\n - `feature_importance_baseline`: Includes the baseline for feature importance values.\n - `hyperparameters`: Includes the information about hyperparameters used to train the model.\nThis information consists of the value, the absolute and relative\nimportance of the hyperparameter as well as an indicator of whether it was\nspecified by the user or tuned during hyperparameter optimization.\n - `total_feature_importance`: Includes the total feature importance for the training data set. The\nbaseline and total feature importance values are returned in the metadata\nfield in the response body.\n - `definition_status`: Includes the model definition status.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/ml._types.Include" @@ -66578,7 +67129,7 @@ "msearch-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.", + "description": "Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -66668,7 +67219,7 @@ "msearch-search_type": { "in": "query", "name": "search_type", - "description": "Indicates whether global term and document frequencies should be used when scoring returned documents.", + "description": "Indicates whether global term and document frequencies should be used when scoring returned documents.\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -66719,7 +67270,7 @@ "msearch_template-search_type": { "in": "query", "name": "search_type", - "description": "The type of the search operation.", + "description": "The type of the search operation.\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -66873,7 +67424,7 @@ "mtermvectors-version_type": { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" @@ -66956,7 +67507,7 @@ "rank_eval-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.", + "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -67139,7 +67690,7 @@ "search-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values such as `open,hidden`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -67259,7 +67810,7 @@ "search-search_type": { "in": "query", "name": "search_type", - "description": "Indicates how distributed term frequencies are calculated for relevance scoring.", + "description": "Indicates how distributed term frequencies are calculated for relevance scoring.\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -67302,7 +67853,7 @@ "search-suggest_mode": { "in": "query", "name": "suggest_mode", - "description": "The suggest mode.\nThis parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified.", + "description": "The suggest mode.\nThis parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified.\n\nSupported values include:\n - `missing`: Only generate suggestions for terms that are not in the shard.\n - `popular`: Only suggest terms that occur in more docs on the shard than the original term.\n - `always`: Suggest any matching suggestions based on terms in the suggest text.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SuggestMode" @@ -67683,7 +68234,7 @@ "search_template-expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.\n\nSupported values include:\n - `all`: Match any data stream or index, including hidden ones.\n - `open`: Match open, non-hidden indices. Also matches any non-hidden data stream.\n - `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed.\n - `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.\n - `none`: Wildcard expressions are not accepted.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.ExpandWildcards" @@ -67763,7 +68314,7 @@ "search_template-search_type": { "in": "query", "name": "search_type", - "description": "The type of the search operation.", + "description": "The type of the search operation.\n\nSupported values include:\n - `query_then_fetch`: Documents are scored using local term and document frequencies for the shard. This is usually faster but less accurate.\n - `dfs_query_then_fetch`: Documents are scored using global term and document frequencies across all shards. This is usually slower but more accurate.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.SearchType" @@ -68019,7 +68570,7 @@ "termvectors-version_type": { "in": "query", "name": "version_type", - "description": "The version type.", + "description": "The version type.\n\nSupported values include:\n - `internal`: Use internal versioning that starts at 1 and increments with each update or delete.\n - `external`: Only index the document if the specified version is strictly higher than the version of the stored document or if there is no existing document.\n - `external_gte`: Only index the document if the specified version is equal or higher than the version of the stored document or if there is no existing document.\nNOTE: The `external_gte` version type is meant for special use cases and should be used with care.\nIf used incorrectly, it can result in loss of data.\n - `force`: This option is deprecated because it can cause primary and replica shards to diverge.\n\n", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.VersionType" diff --git a/output/schema/schema-serverless.json b/output/schema/schema-serverless.json index f4d13b8984..5ece2376e9 100644 --- a/output/schema/schema-serverless.json +++ b/output/schema/schema-serverless.json @@ -22627,7 +22627,7 @@ }, "path": [ { - "description": "Name of the index you wish to create.", + "description": "Name of the index you wish to create.\nIndex names must meet the following criteria:\n\n* Lowercase only\n* Cannot include `\\`, `/`, `*`, `?`, `\"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#`\n* Indices prior to 7.0 could contain a colon (`:`), but that has been deprecated and will not be supported in later versions\n* Cannot start with `-`, `_`, or `+`\n* Cannot be `.` or `..`\n* Cannot be longer than 255 bytes (note thtat it is bytes, so multi-byte characters will reach the limit faster)\n* Names starting with `.` are deprecated, except for hidden indices and internal indices managed by plugins", "name": "index", "required": true, "type": { @@ -22680,7 +22680,7 @@ } } ], - "specLocation": "indices/create/IndicesCreateRequest.ts#L28-L108" + "specLocation": "indices/create/IndicesCreateRequest.ts#L28-L115" }, { "body": { @@ -42784,7 +42784,7 @@ } }, { - "description": "The nodes and shards used for the search.\nBy default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness.\nValid values are:\n\n* `_only_local` to run the search only on shards on the local node.\n* `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method.\n* `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method.\n* `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method.\n`_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`.\n`` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order.", + "description": "The nodes and shards used for the search.\nBy default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness.\nValid values are:\n\n* `_only_local` to run the search only on shards on the local node.\n* `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method.\n* `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method.\n* `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method.\n* `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`.\n* `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order.", "name": "preference", "required": false, "type": { @@ -58687,10 +58687,13 @@ "name": "fields", "required": false, "type": { - "kind": "instance_of", - "type": { - "name": "Fields", - "namespace": "_types" + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "Field", + "namespace": "_types" + } } } }, @@ -62423,14 +62426,14 @@ "name": "StopWords", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/StopWords.ts#L20-L26", + "specLocation": "_types/analysis/StopWords.ts#L60-L66", "type": { "items": [ { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "StopWordLanguage", + "namespace": "_types.analysis" } }, { @@ -62447,6 +62450,127 @@ "kind": "union_of" } }, + { + "kind": "enum", + "members": [ + { + "name": "_arabic_" + }, + { + "name": "_armenian_" + }, + { + "name": "_basque_" + }, + { + "name": "_bengali_" + }, + { + "name": "_brazilian_" + }, + { + "name": "_bulgarian_" + }, + { + "name": "_catalan_" + }, + { + "name": "_cjk_" + }, + { + "name": "_czech_" + }, + { + "name": "_danish_" + }, + { + "name": "_dutch_" + }, + { + "name": "_english_" + }, + { + "name": "_estonian_" + }, + { + "name": "_finnish_" + }, + { + "name": "_french_" + }, + { + "name": "_galician_" + }, + { + "name": "_german_" + }, + { + "name": "_greek_" + }, + { + "name": "_hindi_" + }, + { + "name": "_hungarian_" + }, + { + "name": "_indonesian_" + }, + { + "name": "_irish_" + }, + { + "name": "_italian_" + }, + { + "name": "_latvian_" + }, + { + "name": "_lithuanian_" + }, + { + "name": "_norwegian_" + }, + { + "name": "_persian_" + }, + { + "name": "_portuguese_" + }, + { + "name": "_romanian_" + }, + { + "name": "_russian_" + }, + { + "name": "_serbian_" + }, + { + "name": "_sorani_" + }, + { + "name": "_spanish_" + }, + { + "name": "_swedish_" + }, + { + "name": "_thai_" + }, + { + "name": "_turkish_" + }, + { + "name": "_none_" + } + ], + "name": { + "name": "StopWordLanguage", + "namespace": "_types.analysis" + }, + "specLocation": "_types/analysis/StopWords.ts#L20-L58" + }, { "extDocId": "query-dsl-multi-match-query", "extDocUrl": "https://www.elastic.co/docs/reference/query-languages/query-dsl/query-dsl-multi-match-query", @@ -86434,7 +86558,7 @@ "name": "NoriDecompoundMode", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/nori-plugin.ts#L22-L26" + "specLocation": "_types/analysis/nori-plugin.ts#L23-L27" }, { "kind": "interface", @@ -86825,7 +86949,7 @@ } } ], - "specLocation": "_types/analysis/kuromoji-plugin.ts#L25-L29" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L26-L30" }, { "kind": "enum", @@ -86844,7 +86968,7 @@ "name": "KuromojiTokenizationMode", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/kuromoji-plugin.ts#L52-L56" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L58-L62" }, { "kind": "interface", @@ -86904,6 +87028,9 @@ { "kind": "enum", "members": [ + { + "name": "Arabic" + }, { "name": "Armenian" }, @@ -86922,6 +87049,9 @@ { "name": "English" }, + { + "name": "Estonian" + }, { "name": "Finnish" }, @@ -86940,9 +87070,15 @@ { "name": "Italian" }, + { + "name": "Irish" + }, { "name": "Kp" }, + { + "name": "Lithuanian" + }, { "name": "Lovins" }, @@ -86961,6 +87097,9 @@ { "name": "Russian" }, + { + "name": "Serbian" + }, { "name": "Spanish" }, @@ -86975,7 +87114,7 @@ "name": "SnowballLanguage", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/languages.ts#L20-L43" + "specLocation": "_types/analysis/languages.ts#L20-L48" }, { "kind": "interface", @@ -89204,7 +89343,29 @@ } } ], - "specLocation": "_types/analysis/kuromoji-plugin.ts#L31-L35" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L37-L41" + }, + { + "kind": "enum", + "members": [ + { + "name": "han" + }, + { + "name": "hangul" + }, + { + "name": "hiragana" + }, + { + "name": "katakana" + } + ], + "name": { + "name": "CjkBigramIgnoredScript", + "namespace": "_types.analysis" + }, + "specLocation": "_types/analysis/token_filters.ts#L458-L463" }, { "kind": "enum", @@ -89223,7 +89384,7 @@ "name": "DelimitedPayloadEncoding", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/token_filters.ts#L61-L65" + "specLocation": "_types/analysis/token_filters.ts#L78-L82" }, { "kind": "enum", @@ -89239,7 +89400,7 @@ "name": "EdgeNGramSide", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/token_filters.ts#L73-L76" + "specLocation": "_types/analysis/token_filters.ts#L92-L95" }, { "kind": "enum", @@ -89344,7 +89505,26 @@ "name": "KeepTypesMode", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/token_filters.ts#L214-L217" + "specLocation": "_types/analysis/token_filters.ts#L282-L285" + }, + { + "kind": "enum", + "members": [ + { + "name": "greek" + }, + { + "name": "irish" + }, + { + "name": "turkish" + } + ], + "name": { + "name": "LowercaseTokenFilterLanguages", + "namespace": "_types.analysis" + }, + "specLocation": "_types/analysis/token_filters.ts#L344-L348" }, { "docId": "analysis-normalizers", @@ -89585,7 +89765,7 @@ "name": "SynonymFormat", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/token_filters.ts#L104-L107" + "specLocation": "_types/analysis/token_filters.ts#L138-L141" }, { "kind": "enum", @@ -89627,7 +89807,7 @@ "name": "TokenFilter", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/token_filters.ts#L345-L350", + "specLocation": "_types/analysis/token_filters.ts#L538-L543", "type": { "items": [ { @@ -89654,9 +89834,23 @@ "name": "TokenFilterDefinition", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/token_filters.ts#L352-L404", + "specLocation": "_types/analysis/token_filters.ts#L545-L614", "type": { "items": [ + { + "kind": "instance_of", + "type": { + "name": "ApostropheTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "ArabicNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -89664,6 +89858,27 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "CjkBigramTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "CjkWidthTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "ClassicTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -89678,6 +89893,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "DecimalDigitTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -89706,6 +89928,27 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "FlattenGraphTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "GermanNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "HindiNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -89720,6 +89963,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "IndicNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -89741,6 +89991,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "KeywordRepeatTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -89769,6 +90026,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "MinHashTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -89804,6 +90068,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "PersianNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -89832,6 +90103,27 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "ScandinavianFoldingTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "ScandinavianNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "SerbianNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -89846,6 +90138,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "SoraniNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -89923,6 +90222,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "JaStopTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -90004,7 +90310,7 @@ }, "kind": "interface", "name": { - "name": "AsciiFoldingTokenFilter", + "name": "ApostropheTokenFilter", "namespace": "_types.analysis" }, "properties": [ @@ -90013,31 +90319,11 @@ "required": true, "type": { "kind": "literal_value", - "value": "asciifolding" - } - }, - { - "name": "preserve_original", - "required": false, - "type": { - "generics": [ - { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - ], - "kind": "instance_of", - "type": { - "name": "Stringified", - "namespace": "_spec_utils" - } + "value": "apostrophe" } } ], - "specLocation": "_types/analysis/token_filters.ts#L169-L172" + "specLocation": "_types/analysis/token_filters.ts#L450-L452" }, { "kind": "interface", @@ -90058,7 +90344,175 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L39-L41" + "specLocation": "_types/analysis/token_filters.ts#L41-L43" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "ArabicNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "arabic_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L454-L456" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "AsciiFoldingTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "asciifolding" + } + }, + { + "description": "If `true`, emit both original tokens and folded tokens. Defaults to `false`.", + "name": "preserve_original", + "required": false, + "type": { + "generics": [ + { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + ], + "kind": "instance_of", + "type": { + "name": "Stringified", + "namespace": "_spec_utils" + } + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L213-L217" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "CjkBigramTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "cjk_bigram" + } + }, + { + "description": "Array of character scripts for which to disable bigrams.", + "name": "ignored_scripts", + "required": false, + "type": { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "CjkBigramIgnoredScript", + "namespace": "_types.analysis" + } + } + } + }, + { + "description": "If `true`, emit tokens in both bigram and unigram form. If `false`, a CJK character is output in unigram form when it has no adjacent characters. Defaults to `false`.", + "name": "output_unigrams", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L465-L471" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "CjkWidthTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "cjk_width" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L473-L475" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "ClassicTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "classic" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L477-L479" }, { "inherits": { @@ -90082,6 +90536,7 @@ } }, { + "description": "A list of tokens. The filter generates bigrams for these tokens.\nEither this or the `common_words_path` parameter is required.", "name": "common_words", "required": false, "type": { @@ -90096,6 +90551,7 @@ } }, { + "description": "Path to a file containing a list of tokens. The filter generates bigrams for these tokens.\nThis path must be absolute or relative to the `config` location. The file must be UTF-8 encoded. Each token in the file must be separated by a line break.\nEither this or the `common_words` parameter is required.", "name": "common_words_path", "required": false, "type": { @@ -90107,6 +90563,7 @@ } }, { + "description": "If `true`, matches for common words matching are case-insensitive. Defaults to `false`.", "name": "ignore_case", "required": false, "type": { @@ -90118,6 +90575,7 @@ } }, { + "description": "If `true`, the filter excludes the following tokens from the output:\n- Unigrams for common words\n- Unigrams for terms followed by common words\nDefaults to `false`. We recommend enabling this parameter for search analyzers.", "name": "query_mode", "required": false, "type": { @@ -90129,7 +90587,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L174-L180" + "specLocation": "_types/analysis/token_filters.ts#L219-L235" }, { "inherits": { @@ -90153,6 +90611,7 @@ } }, { + "description": "Array of token filters. If a token matches the predicate script in the `script` parameter, these filters are applied to the token in the order provided.", "name": "filter", "required": true, "type": { @@ -90167,6 +90626,7 @@ } }, { + "description": "Predicate script used to apply token filters. If a token matches this script, the filters in the `filter` parameter are applied to the token.", "name": "script", "required": true, "type": { @@ -90178,7 +90638,31 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L182-L186" + "specLocation": "_types/analysis/token_filters.ts#L237-L243" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "DecimalDigitTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "decimal_digit" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L481-L483" }, { "inherits": { @@ -90202,6 +90686,7 @@ } }, { + "description": "Character used to separate tokens from payloads. Defaults to `|`.", "name": "delimiter", "required": false, "type": { @@ -90213,6 +90698,7 @@ } }, { + "description": "Data type for the stored payload.", "name": "encoding", "required": false, "type": { @@ -90224,7 +90710,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L67-L71" + "specLocation": "_types/analysis/token_filters.ts#L84-L90" }, { "inherits": { @@ -90248,6 +90734,7 @@ } }, { + "description": "Maximum character length of a gram. For custom token filters, defaults to `2`. For the built-in edge_ngram filter, defaults to `1`.", "name": "max_gram", "required": false, "type": { @@ -90259,6 +90746,7 @@ } }, { + "description": "Minimum character length of a gram. Defaults to `1`.", "name": "min_gram", "required": false, "type": { @@ -90270,6 +90758,7 @@ } }, { + "description": "Indicates whether to truncate tokens from the `front` or `back`. Defaults to `front`.", "name": "side", "required": false, "type": { @@ -90281,6 +90770,7 @@ } }, { + "description": "Emits original token when set to `true`. Defaults to `false`.", "name": "preserve_original", "required": false, "type": { @@ -90301,7 +90791,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L78-L84" + "specLocation": "_types/analysis/token_filters.ts#L97-L107" }, { "inherits": { @@ -90325,6 +90815,7 @@ } }, { + "description": "List of elisions to remove.\nTo be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed.\nFor custom `elision` filters, either this parameter or `articles_path` must be specified.", "name": "articles", "required": false, "type": { @@ -90339,6 +90830,7 @@ } }, { + "description": "Path to a file that contains a list of elisions to remove.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each elision in the file must be separated by a line break.\nTo be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed.\nFor custom `elision` filters, either this parameter or `articles` must be specified.", "name": "articles_path", "required": false, "type": { @@ -90350,6 +90842,7 @@ } }, { + "description": "If `true`, elision matching is case insensitive. If `false`, elision matching is case sensitive. Defaults to `false`.", "name": "articles_case", "required": false, "type": { @@ -90370,7 +90863,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L188-L193" + "specLocation": "_types/analysis/token_filters.ts#L245-L258" }, { "inherits": { @@ -90394,6 +90887,7 @@ } }, { + "description": "Maximum character length, including whitespace, of the output token. Defaults to `255`. Concatenated tokens longer than this will result in no token output.", "name": "max_output_size", "required": false, "type": { @@ -90405,6 +90899,7 @@ } }, { + "description": "Character to use to concatenate the token stream input. Defaults to a space.", "name": "separator", "required": false, "type": { @@ -90416,7 +90911,79 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L195-L199" + "specLocation": "_types/analysis/token_filters.ts#L260-L266" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "FlattenGraphTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "flatten_graph" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L485-L487" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "GermanNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "german_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L489-L491" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "HindiNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "hindi_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L493-L495" }, { "inherits": { @@ -90440,6 +91007,7 @@ } }, { + "description": "If `true`, duplicate tokens are removed from the filter’s output. Defaults to `true`.", "name": "dedup", "required": false, "type": { @@ -90451,6 +91019,7 @@ } }, { + "description": "One or more `.dic` files (e.g, `en_US.dic`, my_custom.dic) to use for the Hunspell dictionary.\nBy default, the `hunspell` filter uses all `.dic` files in the `<$ES_PATH_CONF>/hunspell/` directory specified using the `lang`, `language`, or `locale` parameter.", "name": "dictionary", "required": false, "type": { @@ -90462,6 +91031,11 @@ } }, { + "aliases": [ + "lang", + "language" + ], + "description": "Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary.", "name": "locale", "required": true, "type": { @@ -90473,6 +91047,7 @@ } }, { + "description": "If `true`, only the longest stemmed version of each token is included in the output. If `false`, all stemmed versions of the token are included. Defaults to `false`.", "name": "longest_only", "required": false, "type": { @@ -90484,7 +91059,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L201-L207" + "specLocation": "_types/analysis/token_filters.ts#L268-L280" }, { "inherits": { @@ -90506,9 +91081,45 @@ "kind": "literal_value", "value": "hyphenation_decompounder" } + }, + { + "description": "Path to an Apache FOP (Formatting Objects Processor) XML hyphenation pattern file.\nThis path must be absolute or relative to the `config` location. Only FOP v1.2 compatible files are supported.", + "name": "hyphenation_patterns_path", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "If `true`, do not match sub tokens in tokens that are in the word list. Defaults to `false`.", + "name": "no_sub_matches", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "If `true`, do not allow overlapping tokens. Defaults to `false`.", + "name": "no_overlapping_matches", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } } ], - "specLocation": "_types/analysis/token_filters.ts#L57-L59" + "specLocation": "_types/analysis/token_filters.ts#L67-L76" }, { "inherits": { @@ -90524,17 +91135,7 @@ }, "properties": [ { - "name": "hyphenation_patterns_path", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { + "description": "Maximum subword character length. Longer subword tokens are excluded from the output. Defaults to `15`.", "name": "max_subword_size", "required": false, "type": { @@ -90546,6 +91147,7 @@ } }, { + "description": "Minimum subword character length. Shorter subword tokens are excluded from the output. Defaults to `2`.", "name": "min_subword_size", "required": false, "type": { @@ -90557,6 +91159,7 @@ } }, { + "description": "Minimum word character length. Shorter word tokens are excluded from the output. Defaults to `5`.", "name": "min_word_size", "required": false, "type": { @@ -90568,6 +91171,7 @@ } }, { + "description": "If `true`, only include the longest matching subword. Defaults to `false`.", "name": "only_longest_match", "required": false, "type": { @@ -90579,6 +91183,7 @@ } }, { + "description": "A list of subwords to look for in the token stream. If found, the subword is included in the token output.\nEither this parameter or `word_list_path` must be specified.", "name": "word_list", "required": false, "type": { @@ -90593,6 +91198,7 @@ } }, { + "description": "Path to a file that contains a list of subwords to find in the token stream. If found, the subword is included in the token output.\nThis path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break.\nEither this parameter or `word_list` must be specified.", "name": "word_list_path", "required": false, "type": { @@ -90604,7 +91210,31 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L43-L51" + "specLocation": "_types/analysis/token_filters.ts#L45-L61" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "IndicNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "indic_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L497-L499" }, { "inherits": { @@ -90628,6 +91258,7 @@ } }, { + "description": "Indicates whether to keep or remove the specified token types.", "name": "mode", "required": false, "type": { @@ -90639,8 +91270,9 @@ } }, { + "description": "List of token types to keep or remove.", "name": "types", - "required": false, + "required": true, "type": { "kind": "array_of", "value": { @@ -90653,7 +91285,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L219-L223" + "specLocation": "_types/analysis/token_filters.ts#L287-L293" }, { "inherits": { @@ -90677,6 +91309,7 @@ } }, { + "description": "List of words to keep. Only tokens that match words in this list are included in the output.\nEither this parameter or `keep_words_path` must be specified.", "name": "keep_words", "required": false, "type": { @@ -90691,6 +91324,7 @@ } }, { + "description": "If `true`, lowercase all keep words. Defaults to `false`.", "name": "keep_words_case", "required": false, "type": { @@ -90702,6 +91336,7 @@ } }, { + "description": "Path to a file that contains a list of words to keep. Only tokens that match words in this list are included in the output.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break.\nEither this parameter or `keep_words` must be specified.", "name": "keep_words_path", "required": false, "type": { @@ -90713,7 +91348,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L225-L230" + "specLocation": "_types/analysis/token_filters.ts#L295-L306" }, { "inherits": { @@ -90737,6 +91372,7 @@ } }, { + "description": "If `true`, matching for the `keywords` and `keywords_path` parameters ignores letter case. Defaults to `false`.", "name": "ignore_case", "required": false, "type": { @@ -90748,6 +91384,7 @@ } }, { + "description": "Array of keywords. Tokens that match these keywords are not stemmed.\nThis parameter, `keywords_path`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`.", "name": "keywords", "required": false, "type": { @@ -90774,6 +91411,7 @@ } }, { + "description": "Path to a file that contains a list of keywords. Tokens that match these keywords are not stemmed.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break.\nThis parameter, `keywords`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`.", "name": "keywords_path", "required": false, "type": { @@ -90785,6 +91423,7 @@ } }, { + "description": "Java regular expression used to match tokens. Tokens that match this expression are marked as keywords and not stemmed.\nThis parameter, `keywords`, or `keywords_path` must be specified. You cannot specify this parameter and `keywords` or `keywords_pattern`.", "name": "keywords_pattern", "required": false, "type": { @@ -90796,7 +91435,31 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L232-L238" + "specLocation": "_types/analysis/token_filters.ts#L308-L322" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "KeywordRepeatTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "keyword_repeat" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L501-L503" }, { "inherits": { @@ -90820,7 +91483,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L240-L242" + "specLocation": "_types/analysis/token_filters.ts#L324-L326" }, { "inherits": { @@ -90844,6 +91507,7 @@ } }, { + "description": "Maximum character length of a token. Longer tokens are excluded from the output. Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`.", "name": "max", "required": false, "type": { @@ -90855,6 +91519,7 @@ } }, { + "description": "Minimum character length of a token. Shorter tokens are excluded from the output. Defaults to `0`.", "name": "min", "required": false, "type": { @@ -90866,7 +91531,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L244-L248" + "specLocation": "_types/analysis/token_filters.ts#L328-L334" }, { "inherits": { @@ -90890,6 +91555,7 @@ } }, { + "description": "If `true`, the limit filter exhausts the token stream, even if the `max_token_count` has already been reached. Defaults to `false`.", "name": "consume_all_tokens", "required": false, "type": { @@ -90901,6 +91567,7 @@ } }, { + "description": "Maximum number of tokens to keep. Once this limit is reached, any remaining tokens are excluded from the output. Defaults to `1`.", "name": "max_token_count", "required": false, "type": { @@ -90921,7 +91588,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L250-L254" + "specLocation": "_types/analysis/token_filters.ts#L336-L342" }, { "inherits": { @@ -90945,18 +91612,91 @@ } }, { + "description": "Language-specific lowercase token filter to use.", "name": "language", "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", + "name": "LowercaseTokenFilterLanguages", + "namespace": "_types.analysis" + } + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L350-L354" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "MinHashTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "min_hash" + } + }, + { + "description": "Number of buckets to which hashes are assigned. Defaults to `512`.", + "name": "bucket_count", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + }, + { + "description": "Number of ways to hash each token in the stream. Defaults to `1`.", + "name": "hash_count", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + }, + { + "description": "Number of hashes to keep from each bucket. Defaults to `1`.\nHashes are retained by ascending size, starting with the bucket’s smallest hash first.", + "name": "hash_set_size", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + }, + { + "description": "If `true`, the filter fills empty buckets with the value of the first non-empty bucket to its circular right if the `hash_set_size` is `1`. If the `bucket_count` argument is greater than 1, this parameter defaults to `true`. Otherwise, this parameter defaults to `false`.", + "name": "with_rotation", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", "namespace": "_builtins" } } } ], - "specLocation": "_types/analysis/token_filters.ts#L256-L259" + "specLocation": "_types/analysis/token_filters.ts#L505-L516" }, { "inherits": { @@ -90980,6 +91720,7 @@ } }, { + "description": "A list of token filters to apply to incoming tokens.", "name": "filters", "required": true, "type": { @@ -90994,6 +91735,7 @@ } }, { + "description": "If `true` (the default) then emit the original token in addition to the filtered tokens.", "name": "preserve_original", "required": false, "type": { @@ -91014,7 +91756,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L261-L265" + "specLocation": "_types/analysis/token_filters.ts#L356-L362" }, { "inherits": { @@ -91038,6 +91780,7 @@ } }, { + "description": "Maximum length of characters in a gram. Defaults to `2`.", "name": "max_gram", "required": false, "type": { @@ -91049,6 +91792,7 @@ } }, { + "description": "Minimum length of characters in a gram. Defaults to `1`.", "name": "min_gram", "required": false, "type": { @@ -91060,6 +91804,7 @@ } }, { + "description": "Emits original token when set to `true`. Defaults to `false`.", "name": "preserve_original", "required": false, "type": { @@ -91080,7 +91825,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L267-L272" + "specLocation": "_types/analysis/token_filters.ts#L364-L372" }, { "inherits": { @@ -91104,6 +91849,7 @@ } }, { + "description": "An array of part-of-speech tags that should be removed.", "name": "stoptags", "required": false, "type": { @@ -91118,7 +91864,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L274-L277" + "specLocation": "_types/analysis/nori-plugin.ts#L37-L41" }, { "inherits": { @@ -91142,6 +91888,7 @@ } }, { + "description": "A list of regular expressions to match.", "name": "patterns", "required": true, "type": { @@ -91156,6 +91903,7 @@ } }, { + "description": "If set to `true` (the default) it will emit the original token.", "name": "preserve_original", "required": false, "type": { @@ -91176,7 +91924,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L279-L283" + "specLocation": "_types/analysis/token_filters.ts#L374-L380" }, { "inherits": { @@ -91200,6 +91948,7 @@ } }, { + "description": "If `true`, all substrings matching the pattern parameter’s regular expression are replaced. If `false`, the filter replaces only the first matching substring in each token. Defaults to `true`.", "name": "all", "required": false, "type": { @@ -91211,17 +91960,7 @@ } }, { - "name": "flags", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { + "description": "Regular expression, written in Java’s regular expression syntax. The filter replaces token substrings matching this pattern with the substring in the `replacement` parameter.", "name": "pattern", "required": true, "type": { @@ -91233,6 +91972,7 @@ } }, { + "description": "Replacement substring. Defaults to an empty substring (`\"\"`).", "name": "replacement", "required": false, "type": { @@ -91244,7 +91984,31 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L285-L291" + "specLocation": "_types/analysis/token_filters.ts#L382-L390" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "PersianNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "persian_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L518-L520" }, { "inherits": { @@ -91268,7 +92032,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L293-L295" + "specLocation": "_types/analysis/token_filters.ts#L392-L394" }, { "inherits": { @@ -91292,6 +92056,7 @@ } }, { + "description": "Script containing a condition used to filter incoming tokens. Only tokens that match this script are included in the output.", "name": "script", "required": true, "type": { @@ -91303,7 +92068,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L297-L300" + "specLocation": "_types/analysis/token_filters.ts#L396-L400" }, { "inherits": { @@ -91327,7 +92092,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L302-L304" + "specLocation": "_types/analysis/token_filters.ts#L402-L404" }, { "inherits": { @@ -91351,7 +92116,79 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L306-L308" + "specLocation": "_types/analysis/token_filters.ts#L406-L408" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "ScandinavianFoldingTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "scandinavian_folding" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L522-L524" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "ScandinavianNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "scandinavian_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L526-L528" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "SerbianNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "serbian_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L530-L532" }, { "inherits": { @@ -91375,6 +92212,7 @@ } }, { + "description": "String used in shingles as a replacement for empty positions that do not contain a token. This filler token is only used in shingles, not original unigrams. Defaults to an underscore (`_`).", "name": "filler_token", "required": false, "type": { @@ -91386,52 +92224,49 @@ } }, { + "description": "Maximum number of tokens to concatenate when creating shingles. Defaults to `2`.", "name": "max_shingle_size", "required": false, "type": { - "items": [ + "generics": [ { "kind": "instance_of", "type": { "name": "integer", "namespace": "_types" } - }, - { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } } ], - "kind": "union_of" + "kind": "instance_of", + "type": { + "name": "Stringified", + "namespace": "_spec_utils" + } } }, { + "description": "Minimum number of tokens to concatenate when creating shingles. Defaults to `2`.", "name": "min_shingle_size", "required": false, "type": { - "items": [ + "generics": [ { "kind": "instance_of", "type": { "name": "integer", "namespace": "_types" } - }, - { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } } ], - "kind": "union_of" + "kind": "instance_of", + "type": { + "name": "Stringified", + "namespace": "_spec_utils" + } } }, { + "description": "If `true`, the output includes the original input tokens. If `false`, the output only includes shingles; the original input tokens are removed. Defaults to `true`.", "name": "output_unigrams", "required": false, "type": { @@ -91443,6 +92278,7 @@ } }, { + "description": "If `true`, the output includes the original input tokens only if no shingles are produced; if shingles are produced, the output only includes shingles. Defaults to `false`.", "name": "output_unigrams_if_no_shingles", "required": false, "type": { @@ -91454,6 +92290,7 @@ } }, { + "description": "Separator used to concatenate adjacent tokens to form a shingle. Defaults to a space (`\" \"`).", "name": "token_separator", "required": false, "type": { @@ -91465,7 +92302,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L86-L94" + "specLocation": "_types/analysis/token_filters.ts#L109-L123" }, { "inherits": { @@ -91489,6 +92326,7 @@ } }, { + "description": "Controls the language used by the stemmer.", "name": "language", "required": false, "type": { @@ -91500,7 +92338,31 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L310-L313" + "specLocation": "_types/analysis/token_filters.ts#L410-L414" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "SoraniNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "sorani_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L534-L536" }, { "inherits": { @@ -91524,6 +92386,7 @@ } }, { + "description": "A list of mapping rules to use.", "name": "rules", "required": false, "type": { @@ -91538,6 +92401,7 @@ } }, { + "description": "A path (either relative to `config` location, or absolute) to a list of mappings.", "name": "rules_path", "required": false, "type": { @@ -91549,7 +92413,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L315-L319" + "specLocation": "_types/analysis/token_filters.ts#L416-L422" }, { "inherits": { @@ -91587,7 +92451,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L321-L325" + "specLocation": "_types/analysis/token_filters.ts#L424-L428" }, { "inherits": { @@ -91611,6 +92475,7 @@ } }, { + "description": "If `true`, stop word matching is case insensitive. For example, if `true`, a stop word of the matches and removes `The`, `THE`, or `the`. Defaults to `false`.", "name": "ignore_case", "required": false, "type": { @@ -91622,6 +92487,7 @@ } }, { + "description": "If `true`, the last token of a stream is removed if it’s a stop word. Defaults to `true`.", "name": "remove_trailing", "required": false, "type": { @@ -91633,6 +92499,7 @@ } }, { + "description": "Language value, such as `_arabic_` or `_thai_`. Defaults to `_english_`.", "name": "stopwords", "required": false, "type": { @@ -91644,6 +92511,7 @@ } }, { + "description": "Path to a file that contains a list of stop words to remove.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each stop word in the file must be separated by a line break.", "name": "stopwords_path", "required": false, "type": { @@ -91655,12 +92523,12 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L96-L102" + "specLocation": "_types/analysis/token_filters.ts#L125-L136" }, { "inherits": { "type": { - "name": "TokenFilterBase", + "name": "SynonymTokenFilterBase", "namespace": "_types.analysis" } }, @@ -91677,8 +92545,25 @@ "kind": "literal_value", "value": "synonym_graph" } - }, + } + ], + "specLocation": "_types/analysis/token_filters.ts#L163-L165" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "SynonymTokenFilterBase", + "namespace": "_types.analysis" + }, + "properties": [ { + "description": "Expands definitions for equivalent synonym rules. Defaults to `true`.", "name": "expand", "required": false, "type": { @@ -91690,6 +92575,7 @@ } }, { + "description": "Sets the synonym rules format.", "name": "format", "required": false, "type": { @@ -91701,6 +92587,7 @@ } }, { + "description": "If `true` ignores errors while parsing the synonym rules. It is important to note that only those synonym rules which cannot get parsed are ignored. Defaults to the value of the `updateable` setting.", "name": "lenient", "required": false, "type": { @@ -91712,6 +92599,7 @@ } }, { + "description": "Used to define inline synonyms.", "name": "synonyms", "required": false, "type": { @@ -91726,6 +92614,7 @@ } }, { + "description": "Used to provide a synonym file. This path must be absolute or relative to the `config` location.", "name": "synonyms_path", "required": false, "type": { @@ -91737,6 +92626,7 @@ } }, { + "description": "Provide a synonym set created via Synonyms Management APIs.", "name": "synonyms_set", "required": false, "type": { @@ -91748,6 +92638,11 @@ } }, { + "deprecation": { + "description": "", + "version": "6.0.0" + }, + "description": "Controls the tokenizers that will be used to tokenize the synonym, this parameter is for backwards compatibility for indices that created before 6.0.", "name": "tokenizer", "required": false, "type": { @@ -91759,6 +92654,7 @@ } }, { + "description": "If `true` allows reloading search analyzers to pick up changes to synonym files. Only to be used for search analyzers. Defaults to `false`.", "name": "updateable", "required": false, "type": { @@ -91770,12 +92666,12 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L109-L119" + "specLocation": "_types/analysis/token_filters.ts#L143-L161" }, { "inherits": { "type": { - "name": "TokenFilterBase", + "name": "SynonymTokenFilterBase", "namespace": "_types.analysis" } }, @@ -91792,100 +92688,9 @@ "kind": "literal_value", "value": "synonym" } - }, - { - "name": "expand", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "format", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "SynonymFormat", - "namespace": "_types.analysis" - } - } - }, - { - "name": "lenient", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "synonyms", - "required": false, - "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - } - }, - { - "name": "synonyms_path", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "name": "synonyms_set", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "name": "tokenizer", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "name": "updateable", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } } ], - "specLocation": "_types/analysis/token_filters.ts#L121-L131" + "specLocation": "_types/analysis/token_filters.ts#L167-L169" }, { "inherits": { @@ -91909,7 +92714,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L327-L329" + "specLocation": "_types/analysis/token_filters.ts#L430-L432" }, { "inherits": { @@ -91933,6 +92738,7 @@ } }, { + "description": "Character limit for each token. Tokens exceeding this limit are truncated. Defaults to `10`.", "name": "length", "required": false, "type": { @@ -91944,7 +92750,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L331-L334" + "specLocation": "_types/analysis/token_filters.ts#L434-L438" }, { "inherits": { @@ -91968,6 +92774,7 @@ } }, { + "description": "If `true`, only remove duplicate tokens in the same position. Defaults to `false`.", "name": "only_on_same_position", "required": false, "type": { @@ -91979,7 +92786,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L336-L339" + "specLocation": "_types/analysis/token_filters.ts#L440-L444" }, { "inherits": { @@ -92003,12 +92810,12 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L341-L343" + "specLocation": "_types/analysis/token_filters.ts#L446-L448" }, { "inherits": { "type": { - "name": "TokenFilterBase", + "name": "WordDelimiterTokenFilterBase", "namespace": "_types.analysis" } }, @@ -92027,6 +92834,7 @@ } }, { + "description": "If `true`, the filter adjusts the offsets of split or catenated tokens to better reflect their actual position in the token stream. Defaults to `true`.", "name": "adjust_offsets", "required": false, "type": { @@ -92038,7 +92846,8 @@ } }, { - "name": "catenate_all", + "description": "If `true`, the filter skips tokens with a keyword attribute of true. Defaults to `false`.", + "name": "ignore_keywords", "required": false, "type": { "kind": "instance_of", @@ -92047,9 +92856,26 @@ "namespace": "_builtins" } } - }, + } + ], + "specLocation": "_types/analysis/token_filters.ts#L205-L211" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "WordDelimiterTokenFilterBase", + "namespace": "_types.analysis" + }, + "properties": [ { - "name": "catenate_numbers", + "description": "If `true`, the filter produces catenated tokens for chains of alphanumeric characters separated by non-alphabetic delimiters. Defaults to `false`.", + "name": "catenate_all", "required": false, "type": { "kind": "instance_of", @@ -92060,7 +92886,8 @@ } }, { - "name": "catenate_words", + "description": "If `true`, the filter produces catenated tokens for chains of numeric characters separated by non-alphabetic delimiters. Defaults to `false`.", + "name": "catenate_numbers", "required": false, "type": { "kind": "instance_of", @@ -92071,7 +92898,8 @@ } }, { - "name": "generate_number_parts", + "description": "If `true`, the filter produces catenated tokens for chains of alphabetical characters separated by non-alphabetic delimiters. Defaults to `false`.", + "name": "catenate_words", "required": false, "type": { "kind": "instance_of", @@ -92082,7 +92910,8 @@ } }, { - "name": "generate_word_parts", + "description": "If `true`, the filter includes tokens consisting of only numeric characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`.", + "name": "generate_number_parts", "required": false, "type": { "kind": "instance_of", @@ -92093,7 +92922,8 @@ } }, { - "name": "ignore_keywords", + "description": "If `true`, the filter includes tokens consisting of only alphabetical characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`.", + "name": "generate_word_parts", "required": false, "type": { "kind": "instance_of", @@ -92104,6 +92934,7 @@ } }, { + "description": "If `true`, the filter includes the original version of any split tokens in the output. This original version includes non-alphanumeric delimiters. Defaults to `false`.", "name": "preserve_original", "required": false, "type": { @@ -92124,6 +92955,7 @@ } }, { + "description": "Array of tokens the filter won’t split.", "name": "protected_words", "required": false, "type": { @@ -92138,6 +92970,7 @@ } }, { + "description": "Path to a file that contains a list of tokens the filter won’t split.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break.", "name": "protected_words_path", "required": false, "type": { @@ -92149,6 +92982,7 @@ } }, { + "description": "If `true`, the filter splits tokens at letter case transitions. For example: camelCase -> [ camel, Case ]. Defaults to `true`.", "name": "split_on_case_change", "required": false, "type": { @@ -92160,6 +92994,7 @@ } }, { + "description": "If `true`, the filter splits tokens at letter-number transitions. For example: j2se -> [ j, 2, se ]. Defaults to `true`.", "name": "split_on_numerics", "required": false, "type": { @@ -92171,6 +93006,7 @@ } }, { + "description": "If `true`, the filter removes the English possessive (`'s`) from the end of each token. For example: O'Neil's -> [ O, Neil ]. Defaults to `true`.", "name": "stem_english_possessive", "required": false, "type": { @@ -92182,6 +93018,7 @@ } }, { + "description": "Array of custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.", "name": "type_table", "required": false, "type": { @@ -92196,6 +93033,7 @@ } }, { + "description": "Path to a file that contains custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.", "name": "type_table_path", "required": false, "type": { @@ -92207,12 +93045,12 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L150-L167" + "specLocation": "_types/analysis/token_filters.ts#L171-L199" }, { "inherits": { "type": { - "name": "TokenFilterBase", + "name": "WordDelimiterTokenFilterBase", "namespace": "_types.analysis" } }, @@ -92229,167 +93067,44 @@ "kind": "literal_value", "value": "word_delimiter" } - }, - { - "name": "catenate_all", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "catenate_numbers", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "catenate_words", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "generate_number_parts", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "generate_word_parts", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "preserve_original", - "required": false, - "type": { - "generics": [ - { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - ], - "kind": "instance_of", - "type": { - "name": "Stringified", - "namespace": "_spec_utils" - } - } - }, - { - "name": "protected_words", - "required": false, - "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - } - }, - { - "name": "protected_words_path", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "name": "split_on_case_change", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "split_on_numerics", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "stem_english_possessive", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, + } + ], + "specLocation": "_types/analysis/token_filters.ts#L201-L203" + }, + { + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "kind": "interface", + "name": { + "name": "JaStopTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ { - "name": "type_table", - "required": false, + "name": "type", + "required": true, "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } + "kind": "literal_value", + "value": "ja_stop" } }, { - "name": "type_table_path", + "name": "stopwords", "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "StopWords", + "namespace": "_types.analysis" } } } ], - "specLocation": "_types/analysis/token_filters.ts#L133-L148" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L32-L35" }, { "inherits": { @@ -92424,7 +93139,7 @@ } } ], - "specLocation": "_types/analysis/kuromoji-plugin.ts#L47-L50" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L53-L56" }, { "inherits": { @@ -92459,7 +93174,7 @@ } } ], - "specLocation": "_types/analysis/kuromoji-plugin.ts#L42-L45" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L48-L51" }, { "inherits": { @@ -92497,7 +93212,7 @@ } } ], - "specLocation": "_types/analysis/kuromoji-plugin.ts#L37-L40" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L43-L46" }, { "inherits": { @@ -92898,7 +93613,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L53-L55" + "specLocation": "_types/analysis/token_filters.ts#L63-L65" }, { "codegenNames": [ @@ -93909,7 +94624,7 @@ } } ], - "specLocation": "_types/analysis/kuromoji-plugin.ts#L58-L67" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L64-L73" }, { "inherits": { @@ -93980,7 +94695,7 @@ } } ], - "specLocation": "_types/analysis/nori-plugin.ts#L28-L34" + "specLocation": "_types/analysis/nori-plugin.ts#L29-L35" }, { "kind": "enum", @@ -101725,7 +102440,7 @@ }, { "name": "phase", - "required": true, + "required": false, "type": { "kind": "instance_of", "type": { @@ -104409,6 +105124,31 @@ }, "specLocation": "ingest/_types/Processors.ts#L1170-L1175" }, + { + "kind": "enum", + "members": [ + { + "name": "success" + }, + { + "name": "error" + }, + { + "name": "error_ignored" + }, + { + "name": "skipped" + }, + { + "name": "dropped" + } + ], + "name": { + "name": "PipelineSimulationStatusOptions", + "namespace": "ingest._types" + }, + "specLocation": "ingest/_types/Simulation.ts#L51-L57" + }, { "kind": "enum", "members": [ @@ -131672,7 +132412,7 @@ } } ], - "specLocation": "ingest/_types/Simulation.ts#L62-L76" + "specLocation": "ingest/_types/Simulation.ts#L69-L83" }, { "kind": "interface", @@ -131711,14 +132451,14 @@ "value": { "kind": "instance_of", "type": { - "name": "PipelineSimulation", + "name": "PipelineProcessorResult", "namespace": "ingest._types" } } } } ], - "specLocation": "ingest/_types/Simulation.ts#L46-L50" + "specLocation": "ingest/_types/Simulation.ts#L45-L49" }, { "attachedBehaviors": [ @@ -131858,7 +132598,7 @@ } } ], - "specLocation": "ingest/_types/Simulation.ts#L78-L108" + "specLocation": "ingest/_types/Simulation.ts#L85-L115" }, { "kind": "interface", @@ -131907,7 +132647,7 @@ } } ], - "specLocation": "ingest/_types/Simulation.ts#L29-L37" + "specLocation": "ingest/_types/Simulation.ts#L28-L36" }, { "kind": "interface", @@ -131929,12 +132669,12 @@ } } ], - "specLocation": "ingest/_types/Simulation.ts#L39-L44" + "specLocation": "ingest/_types/Simulation.ts#L38-L43" }, { "kind": "interface", "name": { - "name": "PipelineSimulation", + "name": "PipelineProcessorResult", "namespace": "ingest._types" }, "properties": [ @@ -131977,8 +132717,8 @@ "type": { "kind": "instance_of", "type": { - "name": "ActionStatusOptions", - "namespace": "watcher._types" + "name": "PipelineSimulationStatusOptions", + "namespace": "ingest._types" } } }, @@ -132016,7 +132756,7 @@ } } ], - "specLocation": "ingest/_types/Simulation.ts#L52-L60" + "specLocation": "ingest/_types/Simulation.ts#L59-L67" }, { "kind": "interface", diff --git a/output/schema/schema.json b/output/schema/schema.json index fa35d35aca..4f06536f72 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -68646,6 +68646,30 @@ "tag": "type" } }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "ApostropheTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "apostrophe" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L450-L452" + }, { "kind": "interface", "name": { @@ -68700,6 +68724,30 @@ ], "specLocation": "_types/analysis/analyzers.ts#L72-L77" }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "ArabicNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "arabic_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L454-L456" + }, { "kind": "interface", "name": { @@ -68776,6 +68824,7 @@ } }, { + "description": "If `true`, emit both original tokens and folded tokens. Defaults to `false`.", "name": "preserve_original", "required": false, "type": { @@ -68796,7 +68845,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L169-L172" + "specLocation": "_types/analysis/token_filters.ts#L213-L217" }, { "kind": "interface", @@ -69290,6 +69339,127 @@ ], "specLocation": "_types/analysis/analyzers.ts#L126-L130" }, + { + "kind": "enum", + "members": [ + { + "name": "han" + }, + { + "name": "hangul" + }, + { + "name": "hiragana" + }, + { + "name": "katakana" + } + ], + "name": { + "name": "CjkBigramIgnoredScript", + "namespace": "_types.analysis" + }, + "specLocation": "_types/analysis/token_filters.ts#L458-L463" + }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "CjkBigramTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "cjk_bigram" + } + }, + { + "description": "Array of character scripts for which to disable bigrams.", + "name": "ignored_scripts", + "required": false, + "type": { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "CjkBigramIgnoredScript", + "namespace": "_types.analysis" + } + } + } + }, + { + "description": "If `true`, emit tokens in both bigram and unigram form. If `false`, a CJK character is output in unigram form when it has no adjacent characters. Defaults to `false`.", + "name": "output_unigrams", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L465-L471" + }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "CjkWidthTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "cjk_width" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L473-L475" + }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "ClassicTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "classic" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L477-L479" + }, { "kind": "interface", "inherits": { @@ -69347,6 +69517,7 @@ } }, { + "description": "A list of tokens. The filter generates bigrams for these tokens.\nEither this or the `common_words_path` parameter is required.", "name": "common_words", "required": false, "type": { @@ -69361,6 +69532,7 @@ } }, { + "description": "Path to a file containing a list of tokens. The filter generates bigrams for these tokens.\nThis path must be absolute or relative to the `config` location. The file must be UTF-8 encoded. Each token in the file must be separated by a line break.\nEither this or the `common_words` parameter is required.", "name": "common_words_path", "required": false, "type": { @@ -69372,6 +69544,7 @@ } }, { + "description": "If `true`, matches for common words matching are case-insensitive. Defaults to `false`.", "name": "ignore_case", "required": false, "type": { @@ -69383,6 +69556,7 @@ } }, { + "description": "If `true`, the filter excludes the following tokens from the output:\n- Unigrams for common words\n- Unigrams for terms followed by common words\nDefaults to `false`. We recommend enabling this parameter for search analyzers.", "name": "query_mode", "required": false, "type": { @@ -69394,7 +69568,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L174-L180" + "specLocation": "_types/analysis/token_filters.ts#L219-L235" }, { "kind": "interface", @@ -69410,17 +69584,7 @@ }, "properties": [ { - "name": "hyphenation_patterns_path", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { + "description": "Maximum subword character length. Longer subword tokens are excluded from the output. Defaults to `15`.", "name": "max_subword_size", "required": false, "type": { @@ -69432,6 +69596,7 @@ } }, { + "description": "Minimum subword character length. Shorter subword tokens are excluded from the output. Defaults to `2`.", "name": "min_subword_size", "required": false, "type": { @@ -69443,6 +69608,7 @@ } }, { + "description": "Minimum word character length. Shorter word tokens are excluded from the output. Defaults to `5`.", "name": "min_word_size", "required": false, "type": { @@ -69454,6 +69620,7 @@ } }, { + "description": "If `true`, only include the longest matching subword. Defaults to `false`.", "name": "only_longest_match", "required": false, "type": { @@ -69465,6 +69632,7 @@ } }, { + "description": "A list of subwords to look for in the token stream. If found, the subword is included in the token output.\nEither this parameter or `word_list_path` must be specified.", "name": "word_list", "required": false, "type": { @@ -69479,6 +69647,7 @@ } }, { + "description": "Path to a file that contains a list of subwords to find in the token stream. If found, the subword is included in the token output.\nThis path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break.\nEither this parameter or `word_list` must be specified.", "name": "word_list_path", "required": false, "type": { @@ -69490,7 +69659,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L43-L51" + "specLocation": "_types/analysis/token_filters.ts#L45-L61" }, { "kind": "interface", @@ -69514,6 +69683,7 @@ } }, { + "description": "Array of token filters. If a token matches the predicate script in the `script` parameter, these filters are applied to the token in the order provided.", "name": "filter", "required": true, "type": { @@ -69528,6 +69698,7 @@ } }, { + "description": "Predicate script used to apply token filters. If a token matches this script, the filters in the `filter` parameter are applied to the token.", "name": "script", "required": true, "type": { @@ -69539,7 +69710,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L182-L186" + "specLocation": "_types/analysis/token_filters.ts#L237-L243" }, { "kind": "interface", @@ -69784,6 +69955,30 @@ ], "specLocation": "_types/analysis/analyzers.ts#L139-L143" }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "DecimalDigitTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "decimal_digit" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L481-L483" + }, { "kind": "enum", "members": [ @@ -69801,7 +69996,7 @@ "name": "DelimitedPayloadEncoding", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/token_filters.ts#L61-L65" + "specLocation": "_types/analysis/token_filters.ts#L78-L82" }, { "kind": "interface", @@ -69825,6 +70020,7 @@ } }, { + "description": "Character used to separate tokens from payloads. Defaults to `|`.", "name": "delimiter", "required": false, "type": { @@ -69836,6 +70032,7 @@ } }, { + "description": "Data type for the stored payload.", "name": "encoding", "required": false, "type": { @@ -69847,7 +70044,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L67-L71" + "specLocation": "_types/analysis/token_filters.ts#L84-L90" }, { "kind": "interface", @@ -69871,7 +70068,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L53-L55" + "specLocation": "_types/analysis/token_filters.ts#L63-L65" }, { "kind": "interface", @@ -69941,7 +70138,7 @@ "name": "EdgeNGramSide", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/token_filters.ts#L73-L76" + "specLocation": "_types/analysis/token_filters.ts#L92-L95" }, { "kind": "interface", @@ -69965,6 +70162,7 @@ } }, { + "description": "Maximum character length of a gram. For custom token filters, defaults to `2`. For the built-in edge_ngram filter, defaults to `1`.", "name": "max_gram", "required": false, "type": { @@ -69976,6 +70174,7 @@ } }, { + "description": "Minimum character length of a gram. Defaults to `1`.", "name": "min_gram", "required": false, "type": { @@ -69987,6 +70186,7 @@ } }, { + "description": "Indicates whether to truncate tokens from the `front` or `back`. Defaults to `front`.", "name": "side", "required": false, "type": { @@ -69998,6 +70198,7 @@ } }, { + "description": "Emits original token when set to `true`. Defaults to `false`.", "name": "preserve_original", "required": false, "type": { @@ -70018,7 +70219,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L78-L84" + "specLocation": "_types/analysis/token_filters.ts#L97-L107" }, { "kind": "interface", @@ -70114,6 +70315,7 @@ } }, { + "description": "List of elisions to remove.\nTo be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed.\nFor custom `elision` filters, either this parameter or `articles_path` must be specified.", "name": "articles", "required": false, "type": { @@ -70128,6 +70330,7 @@ } }, { + "description": "Path to a file that contains a list of elisions to remove.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each elision in the file must be separated by a line break.\nTo be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed.\nFor custom `elision` filters, either this parameter or `articles` must be specified.", "name": "articles_path", "required": false, "type": { @@ -70139,6 +70342,7 @@ } }, { + "description": "If `true`, elision matching is case insensitive. If `false`, elision matching is case sensitive. Defaults to `false`.", "name": "articles_case", "required": false, "type": { @@ -70159,7 +70363,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L188-L193" + "specLocation": "_types/analysis/token_filters.ts#L245-L258" }, { "kind": "interface", @@ -70360,6 +70564,7 @@ } }, { + "description": "Maximum character length, including whitespace, of the output token. Defaults to `255`. Concatenated tokens longer than this will result in no token output.", "name": "max_output_size", "required": false, "type": { @@ -70371,6 +70576,7 @@ } }, { + "description": "Character to use to concatenate the token stream input. Defaults to a space.", "name": "separator", "required": false, "type": { @@ -70382,7 +70588,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L195-L199" + "specLocation": "_types/analysis/token_filters.ts#L260-L266" }, { "kind": "interface", @@ -70438,6 +70644,30 @@ ], "specLocation": "_types/analysis/analyzers.ts#L165-L170" }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "FlattenGraphTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "flatten_graph" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L485-L487" + }, { "kind": "interface", "name": { @@ -70600,6 +70830,30 @@ ], "specLocation": "_types/analysis/analyzers.ts#L186-L191" }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "GermanNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "german_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L489-L491" + }, { "kind": "interface", "name": { @@ -70694,6 +70948,30 @@ ], "specLocation": "_types/analysis/analyzers.ts#L199-L204" }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "HindiNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "hindi_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L493-L495" + }, { "kind": "interface", "inherits": { @@ -70808,6 +71086,7 @@ } }, { + "description": "If `true`, duplicate tokens are removed from the filter’s output. Defaults to `true`.", "name": "dedup", "required": false, "type": { @@ -70819,6 +71098,7 @@ } }, { + "description": "One or more `.dic` files (e.g, `en_US.dic`, my_custom.dic) to use for the Hunspell dictionary.\nBy default, the `hunspell` filter uses all `.dic` files in the `<$ES_PATH_CONF>/hunspell/` directory specified using the `lang`, `language`, or `locale` parameter.", "name": "dictionary", "required": false, "type": { @@ -70830,6 +71110,11 @@ } }, { + "aliases": [ + "lang", + "language" + ], + "description": "Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary.", "name": "locale", "required": true, "type": { @@ -70841,6 +71126,7 @@ } }, { + "description": "If `true`, only the longest stemmed version of each token is included in the output. If `false`, all stemmed versions of the token are included. Defaults to `false`.", "name": "longest_only", "required": false, "type": { @@ -70852,7 +71138,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L201-L207" + "specLocation": "_types/analysis/token_filters.ts#L268-L280" }, { "kind": "interface", @@ -70874,22 +71160,58 @@ "kind": "literal_value", "value": "hyphenation_decompounder" } - } - ], - "specLocation": "_types/analysis/token_filters.ts#L57-L59" - }, - { - "kind": "interface", - "name": { - "name": "IcuAnalyzer", - "namespace": "_types.analysis" - }, - "properties": [ + }, { - "name": "type", + "description": "Path to an Apache FOP (Formatting Objects Processor) XML hyphenation pattern file.\nThis path must be absolute or relative to the `config` location. Only FOP v1.2 compatible files are supported.", + "name": "hyphenation_patterns_path", "required": true, "type": { - "kind": "literal_value", + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "If `true`, do not match sub tokens in tokens that are in the word list. Defaults to `false`.", + "name": "no_sub_matches", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + }, + { + "description": "If `true`, do not allow overlapping tokens. Defaults to `false`.", + "name": "no_overlapping_matches", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L67-L76" + }, + { + "kind": "interface", + "name": { + "name": "IcuAnalyzer", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", "value": "icu_analyzer" } }, @@ -71395,6 +71717,30 @@ ], "specLocation": "_types/analysis/icu-plugin.ts#L24-L28" }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "IndicNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "indic_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L497-L499" + }, { "kind": "interface", "name": { @@ -71557,6 +71903,41 @@ ], "specLocation": "_types/analysis/analyzers.ts#L227-L232" }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "JaStopTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "ja_stop" + } + }, + { + "name": "stopwords", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "StopWords", + "namespace": "_types.analysis" + } + } + } + ], + "specLocation": "_types/analysis/kuromoji-plugin.ts#L32-L35" + }, { "kind": "interface", "inherits": { @@ -71579,7 +71960,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L240-L242" + "specLocation": "_types/analysis/token_filters.ts#L324-L326" }, { "kind": "enum", @@ -71595,7 +71976,7 @@ "name": "KeepTypesMode", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/token_filters.ts#L214-L217" + "specLocation": "_types/analysis/token_filters.ts#L282-L285" }, { "kind": "interface", @@ -71619,6 +72000,7 @@ } }, { + "description": "Indicates whether to keep or remove the specified token types.", "name": "mode", "required": false, "type": { @@ -71630,8 +72012,9 @@ } }, { + "description": "List of token types to keep or remove.", "name": "types", - "required": false, + "required": true, "type": { "kind": "array_of", "value": { @@ -71644,7 +72027,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L219-L223" + "specLocation": "_types/analysis/token_filters.ts#L287-L293" }, { "kind": "interface", @@ -71668,6 +72051,7 @@ } }, { + "description": "List of words to keep. Only tokens that match words in this list are included in the output.\nEither this parameter or `keep_words_path` must be specified.", "name": "keep_words", "required": false, "type": { @@ -71682,6 +72066,7 @@ } }, { + "description": "If `true`, lowercase all keep words. Defaults to `false`.", "name": "keep_words_case", "required": false, "type": { @@ -71693,6 +72078,7 @@ } }, { + "description": "Path to a file that contains a list of words to keep. Only tokens that match words in this list are included in the output.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break.\nEither this parameter or `keep_words` must be specified.", "name": "keep_words_path", "required": false, "type": { @@ -71704,7 +72090,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L225-L230" + "specLocation": "_types/analysis/token_filters.ts#L295-L306" }, { "kind": "interface", @@ -71761,6 +72147,7 @@ } }, { + "description": "If `true`, matching for the `keywords` and `keywords_path` parameters ignores letter case. Defaults to `false`.", "name": "ignore_case", "required": false, "type": { @@ -71772,6 +72159,7 @@ } }, { + "description": "Array of keywords. Tokens that match these keywords are not stemmed.\nThis parameter, `keywords_path`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`.", "name": "keywords", "required": false, "type": { @@ -71798,6 +72186,7 @@ } }, { + "description": "Path to a file that contains a list of keywords. Tokens that match these keywords are not stemmed.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break.\nThis parameter, `keywords`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`.", "name": "keywords_path", "required": false, "type": { @@ -71809,6 +72198,7 @@ } }, { + "description": "Java regular expression used to match tokens. Tokens that match this expression are marked as keywords and not stemmed.\nThis parameter, `keywords`, or `keywords_path` must be specified. You cannot specify this parameter and `keywords` or `keywords_pattern`.", "name": "keywords_pattern", "required": false, "type": { @@ -71820,7 +72210,31 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L232-L238" + "specLocation": "_types/analysis/token_filters.ts#L308-L322" + }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "KeywordRepeatTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "keyword_repeat" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L501-L503" }, { "kind": "interface", @@ -71896,7 +72310,7 @@ } } ], - "specLocation": "_types/analysis/kuromoji-plugin.ts#L25-L29" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L26-L30" }, { "kind": "interface", @@ -71942,7 +72356,7 @@ } } ], - "specLocation": "_types/analysis/kuromoji-plugin.ts#L31-L35" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L37-L41" }, { "kind": "interface", @@ -71980,7 +72394,7 @@ } } ], - "specLocation": "_types/analysis/kuromoji-plugin.ts#L37-L40" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L43-L46" }, { "kind": "interface", @@ -72015,7 +72429,7 @@ } } ], - "specLocation": "_types/analysis/kuromoji-plugin.ts#L42-L45" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L48-L51" }, { "kind": "interface", @@ -72050,7 +72464,7 @@ } } ], - "specLocation": "_types/analysis/kuromoji-plugin.ts#L47-L50" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L53-L56" }, { "kind": "enum", @@ -72069,7 +72483,7 @@ "name": "KuromojiTokenizationMode", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/kuromoji-plugin.ts#L52-L56" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L58-L62" }, { "kind": "interface", @@ -72173,7 +72587,7 @@ } } ], - "specLocation": "_types/analysis/kuromoji-plugin.ts#L58-L67" + "specLocation": "_types/analysis/kuromoji-plugin.ts#L64-L73" }, { "kind": "interface", @@ -72251,6 +72665,7 @@ } }, { + "description": "Maximum character length of a token. Longer tokens are excluded from the output. Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`.", "name": "max", "required": false, "type": { @@ -72262,6 +72677,7 @@ } }, { + "description": "Minimum character length of a token. Shorter tokens are excluded from the output. Defaults to `0`.", "name": "min", "required": false, "type": { @@ -72273,7 +72689,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L244-L248" + "specLocation": "_types/analysis/token_filters.ts#L328-L334" }, { "kind": "interface", @@ -72321,6 +72737,7 @@ } }, { + "description": "If `true`, the limit filter exhausts the token stream, even if the `max_token_count` has already been reached. Defaults to `false`.", "name": "consume_all_tokens", "required": false, "type": { @@ -72332,6 +72749,7 @@ } }, { + "description": "Maximum number of tokens to keep. Once this limit is reached, any remaining tokens are excluded from the output. Defaults to `1`.", "name": "max_token_count", "required": false, "type": { @@ -72352,7 +72770,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L250-L254" + "specLocation": "_types/analysis/token_filters.ts#L336-L342" }, { "kind": "interface", @@ -72448,18 +72866,38 @@ } }, { + "description": "Language-specific lowercase token filter to use.", "name": "language", "required": false, "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "LowercaseTokenFilterLanguages", + "namespace": "_types.analysis" } } } ], - "specLocation": "_types/analysis/token_filters.ts#L256-L259" + "specLocation": "_types/analysis/token_filters.ts#L350-L354" + }, + { + "kind": "enum", + "members": [ + { + "name": "greek" + }, + { + "name": "irish" + }, + { + "name": "turkish" + } + ], + "name": { + "name": "LowercaseTokenFilterLanguages", + "namespace": "_types.analysis" + }, + "specLocation": "_types/analysis/token_filters.ts#L344-L348" }, { "kind": "interface", @@ -72534,6 +72972,78 @@ ], "specLocation": "_types/analysis/char_filters.ts#L51-L55" }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "MinHashTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "min_hash" + } + }, + { + "description": "Number of buckets to which hashes are assigned. Defaults to `512`.", + "name": "bucket_count", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + }, + { + "description": "Number of ways to hash each token in the stream. Defaults to `1`.", + "name": "hash_count", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + }, + { + "description": "Number of hashes to keep from each bucket. Defaults to `1`.\nHashes are retained by ascending size, starting with the bucket’s smallest hash first.", + "name": "hash_set_size", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "integer", + "namespace": "_types" + } + } + }, + { + "description": "If `true`, the filter fills empty buckets with the value of the first non-empty bucket to its circular right if the `hash_set_size` is `1`. If the `bucket_count` argument is greater than 1, this parameter defaults to `true`. Otherwise, this parameter defaults to `false`.", + "name": "with_rotation", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "boolean", + "namespace": "_builtins" + } + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L505-L516" + }, { "kind": "interface", "inherits": { @@ -72556,6 +73066,7 @@ } }, { + "description": "A list of token filters to apply to incoming tokens.", "name": "filters", "required": true, "type": { @@ -72570,6 +73081,7 @@ } }, { + "description": "If `true` (the default) then emit the original token in addition to the filtered tokens.", "name": "preserve_original", "required": false, "type": { @@ -72590,7 +73102,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L261-L265" + "specLocation": "_types/analysis/token_filters.ts#L356-L362" }, { "kind": "interface", @@ -72614,6 +73126,7 @@ } }, { + "description": "Maximum length of characters in a gram. Defaults to `2`.", "name": "max_gram", "required": false, "type": { @@ -72625,6 +73138,7 @@ } }, { + "description": "Minimum length of characters in a gram. Defaults to `1`.", "name": "min_gram", "required": false, "type": { @@ -72636,6 +73150,7 @@ } }, { + "description": "Emits original token when set to `true`. Defaults to `false`.", "name": "preserve_original", "required": false, "type": { @@ -72656,7 +73171,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L267-L272" + "specLocation": "_types/analysis/token_filters.ts#L364-L372" }, { "kind": "interface", @@ -72816,7 +73331,7 @@ "name": "NoriDecompoundMode", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/nori-plugin.ts#L22-L26" + "specLocation": "_types/analysis/nori-plugin.ts#L23-L27" }, { "kind": "interface", @@ -72840,6 +73355,7 @@ } }, { + "description": "An array of part-of-speech tags that should be removed.", "name": "stoptags", "required": false, "type": { @@ -72854,7 +73370,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L274-L277" + "specLocation": "_types/analysis/nori-plugin.ts#L37-L41" }, { "kind": "interface", @@ -72925,7 +73441,7 @@ } } ], - "specLocation": "_types/analysis/nori-plugin.ts#L28-L34" + "specLocation": "_types/analysis/nori-plugin.ts#L29-L35" }, { "kind": "type_alias", @@ -73239,6 +73755,7 @@ } }, { + "description": "A list of regular expressions to match.", "name": "patterns", "required": true, "type": { @@ -73253,6 +73770,7 @@ } }, { + "description": "If set to `true` (the default) it will emit the original token.", "name": "preserve_original", "required": false, "type": { @@ -73273,7 +73791,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L279-L283" + "specLocation": "_types/analysis/token_filters.ts#L374-L380" }, { "kind": "interface", @@ -73354,6 +73872,7 @@ } }, { + "description": "If `true`, all substrings matching the pattern parameter’s regular expression are replaced. If `false`, the filter replaces only the first matching substring in each token. Defaults to `true`.", "name": "all", "required": false, "type": { @@ -73365,17 +73884,7 @@ } }, { - "name": "flags", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { + "description": "Regular expression, written in Java’s regular expression syntax. The filter replaces token substrings matching this pattern with the substring in the `replacement` parameter.", "name": "pattern", "required": true, "type": { @@ -73387,6 +73896,7 @@ } }, { + "description": "Replacement substring. Defaults to an empty substring (`\"\"`).", "name": "replacement", "required": false, "type": { @@ -73398,7 +73908,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L285-L291" + "specLocation": "_types/analysis/token_filters.ts#L382-L390" }, { "kind": "interface", @@ -73497,6 +74007,30 @@ ], "specLocation": "_types/analysis/analyzers.ts#L255-L259" }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "PersianNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "persian_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L518-L520" + }, { "kind": "enum", "members": [ @@ -73751,7 +74285,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L293-L295" + "specLocation": "_types/analysis/token_filters.ts#L392-L394" }, { "kind": "interface", @@ -73829,6 +74363,7 @@ } }, { + "description": "Script containing a condition used to filter incoming tokens. Only tokens that match this script are included in the output.", "name": "script", "required": true, "type": { @@ -73840,7 +74375,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L297-L300" + "specLocation": "_types/analysis/token_filters.ts#L396-L400" }, { "kind": "interface", @@ -73864,7 +74399,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L302-L304" + "specLocation": "_types/analysis/token_filters.ts#L402-L404" }, { "kind": "interface", @@ -73888,7 +74423,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L306-L308" + "specLocation": "_types/analysis/token_filters.ts#L406-L408" }, { "kind": "interface", @@ -74000,8 +74535,14 @@ }, { "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, "name": { - "name": "SerbianAnalyzer", + "name": "ScandinavianFoldingTokenFilter", "namespace": "_types.analysis" }, "properties": [ @@ -74010,12 +74551,54 @@ "required": true, "type": { "kind": "literal_value", - "value": "serbian" + "value": "scandinavian_folding" } - }, + } + ], + "specLocation": "_types/analysis/token_filters.ts#L522-L524" + }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "ScandinavianNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ { - "name": "stopwords", - "required": false, + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "scandinavian_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L526-L528" + }, + { + "kind": "interface", + "name": { + "name": "SerbianAnalyzer", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "serbian" + } + }, + { + "name": "stopwords", + "required": false, "type": { "kind": "instance_of", "type": { @@ -74052,6 +74635,30 @@ ], "specLocation": "_types/analysis/analyzers.ts#L282-L287" }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "SerbianNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "serbian_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L530-L532" + }, { "kind": "interface", "inherits": { @@ -74074,6 +74681,7 @@ } }, { + "description": "String used in shingles as a replacement for empty positions that do not contain a token. This filler token is only used in shingles, not original unigrams. Defaults to an underscore (`_`).", "name": "filler_token", "required": false, "type": { @@ -74085,52 +74693,49 @@ } }, { + "description": "Maximum number of tokens to concatenate when creating shingles. Defaults to `2`.", "name": "max_shingle_size", "required": false, "type": { - "kind": "union_of", - "items": [ + "kind": "instance_of", + "generics": [ { "kind": "instance_of", "type": { "name": "integer", "namespace": "_types" } - }, - { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } } - ] + ], + "type": { + "name": "Stringified", + "namespace": "_spec_utils" + } } }, { + "description": "Minimum number of tokens to concatenate when creating shingles. Defaults to `2`.", "name": "min_shingle_size", "required": false, "type": { - "kind": "union_of", - "items": [ + "kind": "instance_of", + "generics": [ { "kind": "instance_of", "type": { "name": "integer", "namespace": "_types" } - }, - { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } } - ] + ], + "type": { + "name": "Stringified", + "namespace": "_spec_utils" + } } }, { + "description": "If `true`, the output includes the original input tokens. If `false`, the output only includes shingles; the original input tokens are removed. Defaults to `true`.", "name": "output_unigrams", "required": false, "type": { @@ -74142,6 +74747,7 @@ } }, { + "description": "If `true`, the output includes the original input tokens only if no shingles are produced; if shingles are produced, the output only includes shingles. Defaults to `false`.", "name": "output_unigrams_if_no_shingles", "required": false, "type": { @@ -74153,6 +74759,7 @@ } }, { + "description": "Separator used to concatenate adjacent tokens to form a shingle. Defaults to a space (`\" \"`).", "name": "token_separator", "required": false, "type": { @@ -74164,7 +74771,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L86-L94" + "specLocation": "_types/analysis/token_filters.ts#L109-L123" }, { "kind": "interface", @@ -74327,6 +74934,9 @@ { "kind": "enum", "members": [ + { + "name": "Arabic" + }, { "name": "Armenian" }, @@ -74345,6 +74955,9 @@ { "name": "English" }, + { + "name": "Estonian" + }, { "name": "Finnish" }, @@ -74363,9 +74976,15 @@ { "name": "Italian" }, + { + "name": "Irish" + }, { "name": "Kp" }, + { + "name": "Lithuanian" + }, { "name": "Lovins" }, @@ -74384,6 +75003,9 @@ { "name": "Russian" }, + { + "name": "Serbian" + }, { "name": "Spanish" }, @@ -74398,7 +75020,7 @@ "name": "SnowballLanguage", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/languages.ts#L20-L43" + "specLocation": "_types/analysis/languages.ts#L20-L48" }, { "kind": "interface", @@ -74422,6 +75044,7 @@ } }, { + "description": "Controls the language used by the stemmer.", "name": "language", "required": false, "type": { @@ -74433,7 +75056,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L310-L313" + "specLocation": "_types/analysis/token_filters.ts#L410-L414" }, { "kind": "interface", @@ -74489,6 +75112,30 @@ ], "specLocation": "_types/analysis/analyzers.ts#L289-L294" }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "SoraniNormalizationTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "sorani_normalization" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L534-L536" + }, { "kind": "interface", "name": { @@ -74656,6 +75303,7 @@ } }, { + "description": "A list of mapping rules to use.", "name": "rules", "required": false, "type": { @@ -74670,6 +75318,7 @@ } }, { + "description": "A path (either relative to `config` location, or absolute) to a list of mappings.", "name": "rules_path", "required": false, "type": { @@ -74681,7 +75330,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L315-L319" + "specLocation": "_types/analysis/token_filters.ts#L416-L422" }, { "kind": "interface", @@ -74719,7 +75368,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L321-L325" + "specLocation": "_types/analysis/token_filters.ts#L424-L428" }, { "kind": "interface", @@ -74801,6 +75450,7 @@ } }, { + "description": "If `true`, stop word matching is case insensitive. For example, if `true`, a stop word of the matches and removes `The`, `THE`, or `the`. Defaults to `false`.", "name": "ignore_case", "required": false, "type": { @@ -74812,6 +75462,7 @@ } }, { + "description": "If `true`, the last token of a stream is removed if it’s a stop word. Defaults to `true`.", "name": "remove_trailing", "required": false, "type": { @@ -74823,6 +75474,7 @@ } }, { + "description": "Language value, such as `_arabic_` or `_thai_`. Defaults to `_english_`.", "name": "stopwords", "required": false, "type": { @@ -74834,6 +75486,7 @@ } }, { + "description": "Path to a file that contains a list of stop words to remove.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each stop word in the file must be separated by a line break.", "name": "stopwords_path", "required": false, "type": { @@ -74845,7 +75498,128 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L96-L102" + "specLocation": "_types/analysis/token_filters.ts#L125-L136" + }, + { + "kind": "enum", + "members": [ + { + "name": "_arabic_" + }, + { + "name": "_armenian_" + }, + { + "name": "_basque_" + }, + { + "name": "_bengali_" + }, + { + "name": "_brazilian_" + }, + { + "name": "_bulgarian_" + }, + { + "name": "_catalan_" + }, + { + "name": "_cjk_" + }, + { + "name": "_czech_" + }, + { + "name": "_danish_" + }, + { + "name": "_dutch_" + }, + { + "name": "_english_" + }, + { + "name": "_estonian_" + }, + { + "name": "_finnish_" + }, + { + "name": "_french_" + }, + { + "name": "_galician_" + }, + { + "name": "_german_" + }, + { + "name": "_greek_" + }, + { + "name": "_hindi_" + }, + { + "name": "_hungarian_" + }, + { + "name": "_indonesian_" + }, + { + "name": "_irish_" + }, + { + "name": "_italian_" + }, + { + "name": "_latvian_" + }, + { + "name": "_lithuanian_" + }, + { + "name": "_norwegian_" + }, + { + "name": "_persian_" + }, + { + "name": "_portuguese_" + }, + { + "name": "_romanian_" + }, + { + "name": "_russian_" + }, + { + "name": "_serbian_" + }, + { + "name": "_sorani_" + }, + { + "name": "_spanish_" + }, + { + "name": "_swedish_" + }, + { + "name": "_thai_" + }, + { + "name": "_turkish_" + }, + { + "name": "_none_" + } + ], + "name": { + "name": "StopWordLanguage", + "namespace": "_types.analysis" + }, + "specLocation": "_types/analysis/StopWords.ts#L20-L58" }, { "kind": "type_alias", @@ -74854,15 +75628,15 @@ "name": "StopWords", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/StopWords.ts#L20-L26", + "specLocation": "_types/analysis/StopWords.ts#L60-L66", "type": { "kind": "union_of", "items": [ { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "StopWordLanguage", + "namespace": "_types.analysis" } }, { @@ -74946,13 +75720,13 @@ "name": "SynonymFormat", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/token_filters.ts#L104-L107" + "specLocation": "_types/analysis/token_filters.ts#L138-L141" }, { "kind": "interface", "inherits": { "type": { - "name": "TokenFilterBase", + "name": "SynonymTokenFilterBase", "namespace": "_types.analysis" } }, @@ -74968,8 +75742,49 @@ "kind": "literal_value", "value": "synonym_graph" } - }, + } + ], + "specLocation": "_types/analysis/token_filters.ts#L163-L165" + }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "SynonymTokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "SynonymTokenFilter", + "namespace": "_types.analysis" + }, + "properties": [ + { + "name": "type", + "required": true, + "type": { + "kind": "literal_value", + "value": "synonym" + } + } + ], + "specLocation": "_types/analysis/token_filters.ts#L167-L169" + }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "SynonymTokenFilterBase", + "namespace": "_types.analysis" + }, + "properties": [ { + "description": "Expands definitions for equivalent synonym rules. Defaults to `true`.", "name": "expand", "required": false, "type": { @@ -74981,6 +75796,7 @@ } }, { + "description": "Sets the synonym rules format.", "name": "format", "required": false, "type": { @@ -74992,6 +75808,7 @@ } }, { + "description": "If `true` ignores errors while parsing the synonym rules. It is important to note that only those synonym rules which cannot get parsed are ignored. Defaults to the value of the `updateable` setting.", "name": "lenient", "required": false, "type": { @@ -75003,6 +75820,7 @@ } }, { + "description": "Used to define inline synonyms.", "name": "synonyms", "required": false, "type": { @@ -75017,6 +75835,7 @@ } }, { + "description": "Used to provide a synonym file. This path must be absolute or relative to the `config` location.", "name": "synonyms_path", "required": false, "type": { @@ -75028,6 +75847,7 @@ } }, { + "description": "Provide a synonym set created via Synonyms Management APIs.", "name": "synonyms_set", "required": false, "type": { @@ -75039,6 +75859,11 @@ } }, { + "deprecation": { + "description": "", + "version": "6.0.0" + }, + "description": "Controls the tokenizers that will be used to tokenize the synonym, this parameter is for backwards compatibility for indices that created before 6.0.", "name": "tokenizer", "required": false, "type": { @@ -75050,6 +75875,7 @@ } }, { + "description": "If `true` allows reloading search analyzers to pick up changes to synonym files. Only to be used for search analyzers. Defaults to `false`.", "name": "updateable", "required": false, "type": { @@ -75061,18 +75887,12 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L109-L119" + "specLocation": "_types/analysis/token_filters.ts#L143-L161" }, { "kind": "interface", - "inherits": { - "type": { - "name": "TokenFilterBase", - "namespace": "_types.analysis" - } - }, "name": { - "name": "SynonymTokenFilter", + "name": "ThaiAnalyzer", "namespace": "_types.analysis" }, "properties": [ @@ -75081,131 +75901,22 @@ "required": true, "type": { "kind": "literal_value", - "value": "synonym" + "value": "thai" } }, { - "name": "expand", + "name": "stopwords", "required": false, "type": { "kind": "instance_of", "type": { - "name": "boolean", - "namespace": "_builtins" + "name": "StopWords", + "namespace": "_types.analysis" } } }, { - "name": "format", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "SynonymFormat", - "namespace": "_types.analysis" - } - } - }, - { - "name": "lenient", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "synonyms", - "required": false, - "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - } - }, - { - "name": "synonyms_path", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "name": "synonyms_set", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "name": "tokenizer", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "name": "updateable", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - } - ], - "specLocation": "_types/analysis/token_filters.ts#L121-L131" - }, - { - "kind": "interface", - "name": { - "name": "ThaiAnalyzer", - "namespace": "_types.analysis" - }, - "properties": [ - { - "name": "type", - "required": true, - "type": { - "kind": "literal_value", - "value": "thai" - } - }, - { - "name": "stopwords", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "StopWords", - "namespace": "_types.analysis" - } - } - }, - { - "name": "stopwords_path", + "name": "stopwords_path", "required": false, "type": { "kind": "instance_of", @@ -75282,7 +75993,7 @@ "name": "TokenFilter", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/token_filters.ts#L345-L350", + "specLocation": "_types/analysis/token_filters.ts#L538-L543", "type": { "kind": "union_of", "items": [ @@ -75322,7 +76033,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L39-L41" + "specLocation": "_types/analysis/token_filters.ts#L41-L43" }, { "kind": "type_alias", @@ -75330,10 +76041,24 @@ "name": "TokenFilterDefinition", "namespace": "_types.analysis" }, - "specLocation": "_types/analysis/token_filters.ts#L352-L404", + "specLocation": "_types/analysis/token_filters.ts#L545-L614", "type": { "kind": "union_of", "items": [ + { + "kind": "instance_of", + "type": { + "name": "ApostropheTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "ArabicNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -75341,6 +76066,27 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "CjkBigramTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "CjkWidthTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "ClassicTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -75355,6 +76101,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "DecimalDigitTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -75383,6 +76136,27 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "FlattenGraphTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "GermanNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "HindiNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -75397,6 +76171,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "IndicNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -75418,6 +76199,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "KeywordRepeatTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -75446,6 +76234,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "MinHashTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -75481,6 +76276,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "PersianNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -75509,6 +76311,27 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "ScandinavianFoldingTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "ScandinavianNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, + { + "kind": "instance_of", + "type": { + "name": "SerbianNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -75523,6 +76346,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "SoraniNormalizationTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -75600,6 +76430,13 @@ "namespace": "_types.analysis" } }, + { + "kind": "instance_of", + "type": { + "name": "JaStopTokenFilter", + "namespace": "_types.analysis" + } + }, { "kind": "instance_of", "type": { @@ -75891,7 +76728,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L327-L329" + "specLocation": "_types/analysis/token_filters.ts#L430-L432" }, { "kind": "interface", @@ -75915,6 +76752,7 @@ } }, { + "description": "Character limit for each token. Tokens exceeding this limit are truncated. Defaults to `10`.", "name": "length", "required": false, "type": { @@ -75926,7 +76764,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L331-L334" + "specLocation": "_types/analysis/token_filters.ts#L434-L438" }, { "kind": "interface", @@ -76039,6 +76877,7 @@ } }, { + "description": "If `true`, only remove duplicate tokens in the same position. Defaults to `false`.", "name": "only_on_same_position", "required": false, "type": { @@ -76050,7 +76889,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L336-L339" + "specLocation": "_types/analysis/token_filters.ts#L440-L444" }, { "kind": "interface", @@ -76074,7 +76913,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L341-L343" + "specLocation": "_types/analysis/token_filters.ts#L446-L448" }, { "kind": "interface", @@ -76148,7 +76987,7 @@ "kind": "interface", "inherits": { "type": { - "name": "TokenFilterBase", + "name": "WordDelimiterTokenFilterBase", "namespace": "_types.analysis" } }, @@ -76166,6 +77005,7 @@ } }, { + "description": "If `true`, the filter adjusts the offsets of split or catenated tokens to better reflect their actual position in the token stream. Defaults to `true`.", "name": "adjust_offsets", "required": false, "type": { @@ -76177,61 +77017,7 @@ } }, { - "name": "catenate_all", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "catenate_numbers", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "catenate_words", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "generate_number_parts", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "generate_word_parts", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { + "description": "If `true`, the filter skips tokens with a keyword attribute of true. Defaults to `false`.", "name": "ignore_keywords", "required": false, "type": { @@ -76241,118 +77027,15 @@ "namespace": "_builtins" } } - }, - { - "name": "preserve_original", - "required": false, - "type": { - "kind": "instance_of", - "generics": [ - { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - ], - "type": { - "name": "Stringified", - "namespace": "_spec_utils" - } - } - }, - { - "name": "protected_words", - "required": false, - "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - } - }, - { - "name": "protected_words_path", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "name": "split_on_case_change", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "split_on_numerics", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "stem_english_possessive", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - }, - { - "name": "type_table", - "required": false, - "type": { - "kind": "array_of", - "value": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - } - }, - { - "name": "type_table_path", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } } ], - "specLocation": "_types/analysis/token_filters.ts#L150-L167" + "specLocation": "_types/analysis/token_filters.ts#L205-L211" }, { "kind": "interface", "inherits": { "type": { - "name": "TokenFilterBase", + "name": "WordDelimiterTokenFilterBase", "namespace": "_types.analysis" } }, @@ -76368,8 +77051,25 @@ "kind": "literal_value", "value": "word_delimiter" } - }, + } + ], + "specLocation": "_types/analysis/token_filters.ts#L201-L203" + }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "TokenFilterBase", + "namespace": "_types.analysis" + } + }, + "name": { + "name": "WordDelimiterTokenFilterBase", + "namespace": "_types.analysis" + }, + "properties": [ { + "description": "If `true`, the filter produces catenated tokens for chains of alphanumeric characters separated by non-alphabetic delimiters. Defaults to `false`.", "name": "catenate_all", "required": false, "type": { @@ -76381,6 +77081,7 @@ } }, { + "description": "If `true`, the filter produces catenated tokens for chains of numeric characters separated by non-alphabetic delimiters. Defaults to `false`.", "name": "catenate_numbers", "required": false, "type": { @@ -76392,6 +77093,7 @@ } }, { + "description": "If `true`, the filter produces catenated tokens for chains of alphabetical characters separated by non-alphabetic delimiters. Defaults to `false`.", "name": "catenate_words", "required": false, "type": { @@ -76403,6 +77105,7 @@ } }, { + "description": "If `true`, the filter includes tokens consisting of only numeric characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`.", "name": "generate_number_parts", "required": false, "type": { @@ -76414,6 +77117,7 @@ } }, { + "description": "If `true`, the filter includes tokens consisting of only alphabetical characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`.", "name": "generate_word_parts", "required": false, "type": { @@ -76425,6 +77129,7 @@ } }, { + "description": "If `true`, the filter includes the original version of any split tokens in the output. This original version includes non-alphanumeric delimiters. Defaults to `false`.", "name": "preserve_original", "required": false, "type": { @@ -76445,6 +77150,7 @@ } }, { + "description": "Array of tokens the filter won’t split.", "name": "protected_words", "required": false, "type": { @@ -76459,6 +77165,7 @@ } }, { + "description": "Path to a file that contains a list of tokens the filter won’t split.\nThis path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break.", "name": "protected_words_path", "required": false, "type": { @@ -76470,6 +77177,7 @@ } }, { + "description": "If `true`, the filter splits tokens at letter case transitions. For example: camelCase -> [ camel, Case ]. Defaults to `true`.", "name": "split_on_case_change", "required": false, "type": { @@ -76481,6 +77189,7 @@ } }, { + "description": "If `true`, the filter splits tokens at letter-number transitions. For example: j2se -> [ j, 2, se ]. Defaults to `true`.", "name": "split_on_numerics", "required": false, "type": { @@ -76492,6 +77201,7 @@ } }, { + "description": "If `true`, the filter removes the English possessive (`'s`) from the end of each token. For example: O'Neil's -> [ O, Neil ]. Defaults to `true`.", "name": "stem_english_possessive", "required": false, "type": { @@ -76503,6 +77213,7 @@ } }, { + "description": "Array of custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.", "name": "type_table", "required": false, "type": { @@ -76517,6 +77228,7 @@ } }, { + "description": "Path to a file that contains custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters.", "name": "type_table_path", "required": false, "type": { @@ -76528,7 +77240,7 @@ } } ], - "specLocation": "_types/analysis/token_filters.ts#L133-L148" + "specLocation": "_types/analysis/token_filters.ts#L171-L199" }, { "kind": "interface", diff --git a/output/typescript/types.ts b/output/typescript/types.ts index 6aa9b43e2a..16c0cfc99d 100644 --- a/output/typescript/types.ts +++ b/output/typescript/types.ts @@ -4459,6 +4459,10 @@ export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetr export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer +export interface AnalysisApostropheTokenFilter extends AnalysisTokenFilterBase { + type: 'apostrophe' +} + export interface AnalysisArabicAnalyzer { type: 'arabic' stopwords?: AnalysisStopWords @@ -4466,6 +4470,10 @@ export interface AnalysisArabicAnalyzer { stem_exclusion?: string[] } +export interface AnalysisArabicNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'arabic_normalization' +} + export interface AnalysisArmenianAnalyzer { type: 'armenian' stopwords?: AnalysisStopWords @@ -4538,6 +4546,22 @@ export interface AnalysisCjkAnalyzer { stopwords_path?: string } +export type AnalysisCjkBigramIgnoredScript = 'han' | 'hangul' | 'hiragana' | 'katakana' + +export interface AnalysisCjkBigramTokenFilter extends AnalysisTokenFilterBase { + type: 'cjk_bigram' + ignored_scripts?: AnalysisCjkBigramIgnoredScript[] + output_unigrams?: boolean +} + +export interface AnalysisCjkWidthTokenFilter extends AnalysisTokenFilterBase { + type: 'cjk_width' +} + +export interface AnalysisClassicTokenFilter extends AnalysisTokenFilterBase { + type: 'classic' +} + export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase { type: 'classic' max_token_length?: integer @@ -4552,7 +4576,6 @@ export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase } export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase { - hyphenation_patterns_path?: string max_subword_size?: integer min_subword_size?: integer min_word_size?: integer @@ -4595,6 +4618,10 @@ export interface AnalysisDanishAnalyzer { stopwords_path?: string } +export interface AnalysisDecimalDigitTokenFilter extends AnalysisTokenFilterBase { + type: 'decimal_digit' +} + export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { @@ -4674,6 +4701,10 @@ export interface AnalysisFinnishAnalyzer { stem_exclusion?: string[] } +export interface AnalysisFlattenGraphTokenFilter extends AnalysisTokenFilterBase { + type: 'flatten_graph' +} + export interface AnalysisFrenchAnalyzer { type: 'french' stopwords?: AnalysisStopWords @@ -4695,6 +4726,10 @@ export interface AnalysisGermanAnalyzer { stem_exclusion?: string[] } +export interface AnalysisGermanNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'german_normalization' +} + export interface AnalysisGreekAnalyzer { type: 'greek' stopwords?: AnalysisStopWords @@ -4708,6 +4743,10 @@ export interface AnalysisHindiAnalyzer { stem_exclusion?: string[] } +export interface AnalysisHindiNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'hindi_normalization' +} + export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { type: 'html_strip' escaped_tags?: string[] @@ -4725,11 +4764,16 @@ export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { dedup?: boolean dictionary?: string locale: string + lang: string + language: string longest_only?: boolean } export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { type: 'hyphenation_decompounder' + hyphenation_patterns_path: string + no_sub_matches?: boolean + no_overlapping_matches?: boolean } export interface AnalysisIcuAnalyzer { @@ -4795,6 +4839,10 @@ export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase id: string } +export interface AnalysisIndicNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'indic_normalization' +} + export interface AnalysisIndonesianAnalyzer { type: 'indonesian' stopwords?: AnalysisStopWords @@ -4816,6 +4864,11 @@ export interface AnalysisItalianAnalyzer { stem_exclusion?: string[] } +export interface AnalysisJaStopTokenFilter extends AnalysisTokenFilterBase { + type: 'ja_stop' + stopwords?: AnalysisStopWords +} + export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { type: 'kstem' } @@ -4825,7 +4878,7 @@ export type AnalysisKeepTypesMode = 'include' | 'exclude' export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase { type: 'keep_types' mode?: AnalysisKeepTypesMode - types?: string[] + types: string[] } export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase { @@ -4848,6 +4901,10 @@ export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBas keywords_pattern?: string } +export interface AnalysisKeywordRepeatTokenFilter extends AnalysisTokenFilterBase { + type: 'keyword_repeat' +} + export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { type: 'keyword' buffer_size?: integer @@ -4929,9 +4986,11 @@ export interface AnalysisLowercaseNormalizer { export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { type: 'lowercase' - language?: string + language?: AnalysisLowercaseTokenFilterLanguages } +export type AnalysisLowercaseTokenFilterLanguages = 'greek' | 'irish' | 'turkish' + export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { type: 'lowercase' } @@ -4942,6 +5001,14 @@ export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { mappings_path?: string } +export interface AnalysisMinHashTokenFilter extends AnalysisTokenFilterBase { + type: 'min_hash' + bucket_count?: integer + hash_count?: integer + hash_set_size?: integer + with_rotation?: boolean +} + export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { type: 'multiplexer' filters: string[] @@ -5030,7 +5097,6 @@ export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { type: 'pattern_replace' all?: boolean - flags?: string pattern: string replacement?: string } @@ -5048,6 +5114,10 @@ export interface AnalysisPersianAnalyzer { stopwords_path?: string } +export interface AnalysisPersianNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'persian_normalization' +} + export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' @@ -5104,6 +5174,14 @@ export interface AnalysisRussianAnalyzer { stem_exclusion?: string[] } +export interface AnalysisScandinavianFoldingTokenFilter extends AnalysisTokenFilterBase { + type: 'scandinavian_folding' +} + +export interface AnalysisScandinavianNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'scandinavian_normalization' +} + export interface AnalysisSerbianAnalyzer { type: 'serbian' stopwords?: AnalysisStopWords @@ -5111,11 +5189,15 @@ export interface AnalysisSerbianAnalyzer { stem_exclusion?: string[] } +export interface AnalysisSerbianNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'serbian_normalization' +} + export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { type: 'shingle' filler_token?: string - max_shingle_size?: integer | string - min_shingle_size?: integer | string + max_shingle_size?: SpecUtilsStringified + min_shingle_size?: SpecUtilsStringified output_unigrams?: boolean output_unigrams_if_no_shingles?: boolean token_separator?: string @@ -5143,7 +5225,7 @@ export interface AnalysisSnowballAnalyzer { stopwords?: AnalysisStopWords } -export type AnalysisSnowballLanguage = 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Kp' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Spanish' | 'Swedish' | 'Turkish' +export type AnalysisSnowballLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Irish' | 'Kp' | 'Lithuanian' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Serbian' | 'Spanish' | 'Swedish' | 'Turkish' export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { type: 'snowball' @@ -5157,6 +5239,10 @@ export interface AnalysisSoraniAnalyzer { stem_exclusion?: string[] } +export interface AnalysisSoraniNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'sorani_normalization' +} + export interface AnalysisSpanishAnalyzer { type: 'spanish' stopwords?: AnalysisStopWords @@ -5203,7 +5289,9 @@ export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { stopwords_path?: string } -export type AnalysisStopWords = string | string[] +export type AnalysisStopWordLanguage = '_arabic_' | '_armenian_' | '_basque_' | '_bengali_' | '_brazilian_' | '_bulgarian_' | '_catalan_' | '_cjk_' | '_czech_' | '_danish_' | '_dutch_' | '_english_' | '_estonian_' | '_finnish_' | '_french_' | '_galician_' | '_german_' | '_greek_' | '_hindi_' | '_hungarian_' | '_indonesian_' | '_irish_' | '_italian_' | '_latvian_' | '_lithuanian_' | '_norwegian_' | '_persian_' | '_portuguese_' | '_romanian_' | '_russian_' | '_serbian_' | '_sorani_' | '_spanish_' | '_swedish_' | '_thai_' | '_turkish_' | '_none_' + +export type AnalysisStopWords = AnalysisStopWordLanguage | string[] export interface AnalysisSwedishAnalyzer { type: 'swedish' @@ -5214,20 +5302,15 @@ export interface AnalysisSwedishAnalyzer { export type AnalysisSynonymFormat = 'solr' | 'wordnet' -export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase { +export interface AnalysisSynonymGraphTokenFilter extends AnalysisSynonymTokenFilterBase { type: 'synonym_graph' - expand?: boolean - format?: AnalysisSynonymFormat - lenient?: boolean - synonyms?: string[] - synonyms_path?: string - synonyms_set?: string - tokenizer?: string - updateable?: boolean } -export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { +export interface AnalysisSynonymTokenFilter extends AnalysisSynonymTokenFilterBase { type: 'synonym' +} + +export interface AnalysisSynonymTokenFilterBase extends AnalysisTokenFilterBase { expand?: boolean format?: AnalysisSynonymFormat lenient?: boolean @@ -5256,7 +5339,7 @@ export interface AnalysisTokenFilterBase { version?: VersionString } -export type AnalysisTokenFilterDefinition = AnalysisAsciiFoldingTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter +export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter export type AnalysisTokenizer = string | AnalysisTokenizerDefinition @@ -5306,27 +5389,17 @@ export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase { max_token_length?: integer } -export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisTokenFilterBase { +export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisWordDelimiterTokenFilterBase { type: 'word_delimiter_graph' adjust_offsets?: boolean - catenate_all?: boolean - catenate_numbers?: boolean - catenate_words?: boolean - generate_number_parts?: boolean - generate_word_parts?: boolean ignore_keywords?: boolean - preserve_original?: SpecUtilsStringified - protected_words?: string[] - protected_words_path?: string - split_on_case_change?: boolean - split_on_numerics?: boolean - stem_english_possessive?: boolean - type_table?: string[] - type_table_path?: string } -export interface AnalysisWordDelimiterTokenFilter extends AnalysisTokenFilterBase { +export interface AnalysisWordDelimiterTokenFilter extends AnalysisWordDelimiterTokenFilterBase { type: 'word_delimiter' +} + +export interface AnalysisWordDelimiterTokenFilterBase extends AnalysisTokenFilterBase { catenate_all?: boolean catenate_numbers?: boolean catenate_words?: boolean