diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/Command.java b/libs/cli/src/main/java/org/elasticsearch/cli/Command.java index 1690515532e7b..3cee280535ccc 100644 --- a/libs/cli/src/main/java/org/elasticsearch/cli/Command.java +++ b/libs/cli/src/main/java/org/elasticsearch/cli/Command.java @@ -40,15 +40,44 @@ public abstract class Command implements Closeable { .availableUnless(silentOption); /** - * Construct the command with the specified command description and runnable to execute before main is invoked. - * @param description the command description + * Constructs the command with the specified command description. * + * @param description the command description to be displayed in help output + * + *
Usage Example:
+ *{@code
+ * public class MyCommand extends Command {
+ * public MyCommand() {
+ * super("Performs custom processing");
+ * }
+ * }
+ * }
*/
public Command(final String description) {
this.description = description;
}
- /** Parses options for this command from args and executes it. */
+ /**
+ * Parses command-line options and executes this command with proper error handling.
+ *
+ * This is the main entry point for command execution. It handles parsing + * of command-line arguments, error handling, and returning appropriate exit codes. + * All exceptions are caught and converted to appropriate exit codes. + * + * @param args the command-line arguments to parse + * @param terminal the terminal for input/output operations + * @param processInfo information about the current process (system properties, environment variables, etc.) + * @return the exit code (0 for success, non-zero for errors as defined in {@link ExitCodes}) + * @throws IOException if an I/O error occurs during command execution + * + *
Usage Example:
+ *{@code
+ * Command cmd = new MyCommand();
+ * Terminal terminal = Terminal.DEFAULT;
+ * ProcessInfo processInfo = ProcessInfo.fromSystem();
+ * int exitCode = cmd.main(new String[]{"--verbose"}, terminal, processInfo);
+ * }
+ */
public final int main(String[] args, Terminal terminal, ProcessInfo processInfo) throws IOException {
try {
mainWithoutErrorHandling(args, terminal, processInfo);
@@ -76,7 +105,16 @@ public final int main(String[] args, Terminal terminal, ProcessInfo processInfo)
}
/**
- * Executes the command, but all errors are thrown.
+ * Executes the command without error handling, allowing all exceptions to propagate.
+ *
+ * This method parses options, handles help and verbosity flags, and delegates + * to {@link #execute(Terminal, OptionSet, ProcessInfo)}. Unlike {@link #main(String[], Terminal, ProcessInfo)}, + * this method does not catch exceptions, allowing callers to handle them. + * + * @param args the command-line arguments to parse + * @param terminal the terminal for input/output operations + * @param processInfo information about the current process + * @throws Exception if any error occurs during command execution */ protected void mainWithoutErrorHandling(String[] args, Terminal terminal, ProcessInfo processInfo) throws Exception { final OptionSet options = parseOptions(args); @@ -102,9 +140,16 @@ protected void mainWithoutErrorHandling(String[] args, Terminal terminal, Proces } /** - * Parse command line arguments for this command. - * @param args The string arguments passed to the command - * @return A set of parsed options + * Parses command-line arguments for this command using the configured option parser. + * + * @param args the string arguments passed to the command + * @return a set of parsed options + * @throws joptsimple.OptionException if the arguments cannot be parsed + * + *
Usage Example:
+ *{@code
+ * OptionSet options = parseOptions(new String[]{"--verbose", "input.txt"});
+ * }
*/
public OptionSet parseOptions(String[] args) {
return parser.parse(args);
@@ -126,9 +171,24 @@ private void printHelp(Terminal terminal, boolean toStdError) throws IOException
}
}
- /** Prints additional help information, specific to the command */
+ /**
+ * Prints additional help information specific to this command.
+ *
+ * Subclasses can override this method to provide command-specific help text + * that will be displayed when the user requests help via the -h or --help option. + * + * @param terminal the terminal to write help output to + */ protected void printAdditionalHelp(Terminal terminal) {} + /** + * Prints a user exception message to the terminal's error stream. + * + *
Subclasses can override this method to customize how user exceptions are displayed. + * + * @param terminal the terminal to write error output to + * @param e the user exception to print + */ protected void printUserException(Terminal terminal, UserException e) { if (e.getMessage() != null) { terminal.errorPrintln(""); @@ -136,17 +196,52 @@ protected void printUserException(Terminal terminal, UserException e) { } } + /** + * Exits the JVM with the specified status code. + * + *
This method calls {@link System#exit(int)} and should be used sparingly, + * typically only after {@link #main(String[], Terminal, ProcessInfo)} has completed. + * + * @param status the exit status code (0 for success, non-zero for errors) + */ @SuppressForbidden(reason = "Allowed to exit explicitly from #main()") protected static void exit(int status) { System.exit(status); } /** - * Executes this command. + * Executes the core logic of this command. * - * Any runtime user errors (like an input file that does not exist), should throw a {@link UserException}. */ + *
Subclasses must implement this method to provide command-specific functionality. + * This method is called by {@link #mainWithoutErrorHandling(String[], Terminal, ProcessInfo)} + * after options have been parsed and help/verbosity flags processed. + * + * @param terminal the terminal for input/output operations + * @param options the parsed command-line options + * @param processInfo information about the current process + * @throws Exception if any error occurs during execution + * @throws UserException for user-correctable errors (e.g., invalid input file) + * + *
Usage Example:
+ *{@code
+ * @Override
+ * protected void execute(Terminal terminal, OptionSet options, ProcessInfo processInfo) throws Exception {
+ * String input = options.valueOf(inputOption);
+ * terminal.println("Processing: " + input);
+ * // ... perform command logic ...
+ * }
+ * }
+ */
protected abstract void execute(Terminal terminal, OptionSet options, ProcessInfo processInfo) throws Exception;
+ /**
+ * Closes this command and releases any resources.
+ *
+ * The default implementation does nothing. Subclasses should override this method + * to release any resources they have acquired. + * + * @throws IOException if an I/O error occurs while closing resources + */ @Override public void close() throws IOException { diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/ExitCodes.java b/libs/cli/src/main/java/org/elasticsearch/cli/ExitCodes.java index 6c686e2bf9da8..73ce91dafd371 100644 --- a/libs/cli/src/main/java/org/elasticsearch/cli/ExitCodes.java +++ b/libs/cli/src/main/java/org/elasticsearch/cli/ExitCodes.java @@ -10,26 +10,60 @@ package org.elasticsearch.cli; /** - * POSIX exit codes. + * Standard POSIX exit codes for command-line tools. + * + *
These exit codes follow POSIX conventions and are used by CLI commands + * to indicate the result of their execution. These values are part of the public + * API and may be used in scripts, so they should not be changed. + * + *
Warning: Do not modify these values as they may be used in external scripts
+ * where usages are not tracked by the IDE.
*/
public class ExitCodes {
- // please be extra careful when changing these as the values might be used in scripts,
- // usages of which are not tracked by the IDE
+ /** Successful completion (exit code 0). */
public static final int OK = 0;
- public static final int USAGE = 64; // command line usage error
- public static final int DATA_ERROR = 65; // data format error
- public static final int NO_INPUT = 66; // cannot open input
- public static final int NO_USER = 67; // addressee unknown
- public static final int NO_HOST = 68; // host name unknown
- public static final int UNAVAILABLE = 69; // service unavailable
- public static final int CODE_ERROR = 70; // internal software error
- public static final int CANT_CREATE = 73; // can't create (user) output file
- public static final int IO_ERROR = 74; // input/output error
- public static final int TEMP_FAILURE = 75; // temp failure; user is invited to retry
- public static final int PROTOCOL = 76; // remote error in protocol
- public static final int NOPERM = 77; // permission denied
- public static final int CONFIG = 78; // configuration error
- public static final int NOOP = 80; // nothing to do
+
+ /** Command line usage error (exit code 64). */
+ public static final int USAGE = 64;
+
+ /** Data format error (exit code 65). */
+ public static final int DATA_ERROR = 65;
+
+ /** Cannot open input (exit code 66). */
+ public static final int NO_INPUT = 66;
+
+ /** Addressee unknown (exit code 67). */
+ public static final int NO_USER = 67;
+
+ /** Host name unknown (exit code 68). */
+ public static final int NO_HOST = 68;
+
+ /** Service unavailable (exit code 69). */
+ public static final int UNAVAILABLE = 69;
+
+ /** Internal software error (exit code 70). */
+ public static final int CODE_ERROR = 70;
+
+ /** Can't create (user) output file (exit code 73). */
+ public static final int CANT_CREATE = 73;
+
+ /** Input/output error (exit code 74). */
+ public static final int IO_ERROR = 74;
+
+ /** Temporary failure; user is invited to retry (exit code 75). */
+ public static final int TEMP_FAILURE = 75;
+
+ /** Remote error in protocol (exit code 76). */
+ public static final int PROTOCOL = 76;
+
+ /** Permission denied (exit code 77). */
+ public static final int NOPERM = 77;
+
+ /** Configuration error (exit code 78). */
+ public static final int CONFIG = 78;
+
+ /** Nothing to do (exit code 80). */
+ public static final int NOOP = 80;
private ExitCodes() { /* no instance, just constants */ }
}
diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java b/libs/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java
index 47e12d8f2ac94..1c02fcc8bc0f1 100644
--- a/libs/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java
+++ b/libs/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java
@@ -34,9 +34,20 @@ public class MultiCommand extends Command {
private final OptionSpec A MultiCommand is a CLI tool that contains multiple sub-commands, each
+ * represented by a separate {@link Command} instance. The user specifies which
+ * sub-command to run as the first argument.
+ *
+ * @param description the multi-command description to be displayed in help output
+ *
+ * Usage Example: This method parses the first non-option argument to determine which sub-command
+ * to execute, then delegates to that sub-command's {@link Command#mainWithoutErrorHandling(String[], Terminal, ProcessInfo)}
+ * method.
+ *
+ * @param terminal the terminal for input/output operations
+ * @param options the parsed command-line options
+ * @param processInfo information about the current process
+ * @throws Exception if an error occurs during sub-command execution
+ * @throws MissingCommandException if no sub-command name is provided
+ * @throws UserException if the specified sub-command does not exist
+ * @throws IllegalStateException if no sub-commands have been configured
+ */
@Override
protected void execute(Terminal terminal, OptionSet options, ProcessInfo processInfo) throws Exception {
if (subcommands.isEmpty()) {
@@ -95,6 +121,14 @@ protected void execute(Terminal terminal, OptionSet options, ProcessInfo process
subcommand.mainWithoutErrorHandling(args.toArray(new String[0]), terminal, processInfo);
}
+ /**
+ * Closes this multi-command and all of its sub-commands.
+ *
+ * This method iterates through all registered sub-commands and closes each one,
+ * ensuring proper resource cleanup.
+ *
+ * @throws IOException if an I/O error occurs while closing any sub-command
+ */
@Override
public void close() throws IOException {
IOUtils.close(subcommands.values());
diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java b/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java
index dcf16514f4a41..ad7bbfc26af8d 100644
--- a/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java
+++ b/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java
@@ -106,24 +106,75 @@ private char[] read(String prompt) {
return line;
}
- /** Reads clear text from the terminal input. See {@link Console#readLine()}. */
+ /**
+ * Reads clear text from the terminal input with the specified prompt.
+ *
+ * The prompt is displayed to the user, and the method waits for input
+ * until a newline is encountered. The input is echoed to the terminal.
+ *
+ * @param prompt the prompt message to display before reading input
+ * @return the text entered by the user (without the trailing newline)
+ * @throws IllegalStateException if unable to read from standard input
+ * @see Console#readLine()
+ *
+ * Usage Example: The prompt is displayed to the user, and the method waits for input
+ * until a newline is encountered. The input is NOT echoed to the terminal
+ * for security purposes.
+ *
+ * @param prompt the prompt message to display before reading input
+ * @return a character array containing the secret text (without the trailing newline)
+ * @throws IllegalStateException if unable to read from standard input
+ * @see Console#readPassword()
+ *
+ * Usage Example: Note: This OutputStream is NOT thread-safe.
+ *
+ * @param charset the character set to use for encoding bytes to characters
+ * @return a line-based OutputStream that writes to this terminal
+ *
+ * Usage Example: A message is printable if the terminal's current verbosity level is
+ * greater than or equal to the specified verbosity level.
+ *
+ * @param verbosity the verbosity level to check
+ * @return true if messages at this verbosity level should be printed, false otherwise
+ *
+ * Usage Example: This method displays a prompt and waits for the user to enter 'y' or 'n'
+ * (case-insensitive). If the user presses Enter without typing anything,
+ * the default value is returned. The method loops until a valid answer is provided.
+ *
+ * @param prompt the prompt message to display (the method appends [Y/n] or [y/N] automatically)
+ * @param defaultYes if true, the default answer is yes; if false, the default is no
+ * @return true if the user answered yes, false if the user answered no
+ *
+ * Usage Example: This method reads characters until a newline character ('\n') is encountered.
+ * If the newline is preceded by a carriage return ('\r'), both characters are
+ * discarded (Windows-style line ending). The returned array does not include
+ * the line terminator characters.
+ *
+ * This method automatically expands the internal buffer as needed to accommodate
+ * lines of any length, and securely clears old buffers when resizing.
+ *
+ * @param reader the reader to read from
+ * @return a character array containing the line (without line terminators),
+ * or null if end-of-stream is reached before any characters are read
+ * @throws RuntimeException if an IOException occurs while reading
+ *
+ * Usage Example: This ensures that any buffered output is immediately written to the underlying streams.
*/
public final void flush() {
outWriter.flush();
@@ -291,10 +395,13 @@ public final void flush() {
}
/**
- * Indicates whether this terminal is for a headless system i.e. is not interactive. If an instances answers
- * {@code false}, interactive operations can be attempted, but it is not guaranteed that they will succeed.
+ * Indicates whether this terminal is operating in headless mode (non-interactive).
+ *
+ * A headless terminal is one where interactive operations (such as reading user input)
+ * may not be possible or reliable. If this method returns false, interactive operations
+ * can be attempted, but success is not guaranteed.
*
- * @return if this terminal is headless.
+ * @return true if this terminal is headless (non-interactive), false otherwise
*/
public boolean isHeadless() {
return false;
diff --git a/libs/core/src/main/java/org/elasticsearch/core/Booleans.java b/libs/core/src/main/java/org/elasticsearch/core/Booleans.java
index 7984c45fc4e3c..868cb3ef39c06 100644
--- a/libs/core/src/main/java/org/elasticsearch/core/Booleans.java
+++ b/libs/core/src/main/java/org/elasticsearch/core/Booleans.java
@@ -9,6 +9,22 @@
package org.elasticsearch.core;
+/**
+ * Utility methods for parsing and working with boolean values.
+ *
+ * This class provides strict boolean parsing methods that only accept "true" or "false"
+ * (unlike {@link Boolean#parseBoolean(String)} which accepts any non-"true" value as false).
+ *
+ * Usage Examples: This class provides optimized predicate implementations and factory methods
+ * for common predicate patterns.
+ *
+ * Usage Examples: This class provides methods for safely releasing resources, wrapping multiple
+ * releasables, and handling exceptions during release operations. It is designed to
+ * work similarly to try-with-resources but with more flexibility.
+ *
+ * Usage Examples: This class provides convenient methods for common string manipulation tasks,
+ * ensuring consistent behavior across the Elasticsearch codebase.
*/
public class Strings {
/**
- * Returns a formatted string using the specified format string and
- * arguments.
- *
- * This method calls {@link String#format(Locale, String, Object...)}
- * with Locale.ROOT
- * If format is incorrect the function will return format without populating
- * its variable placeholders.
+ * Returns a formatted string using the specified format string and arguments.
+ *
+ * This method calls {@link String#format(Locale, String, Object...)} with
+ * {@link Locale#ROOT} to ensure consistent locale-independent formatting.
+ * If the format string is incorrect, this method returns the format string
+ * unchanged without populating its variable placeholders, and triggers an
+ * assertion error in development environments.
+ *
+ * @param format the format string
+ * @param args the arguments referenced by the format specifiers in the format string
+ * @return the formatted string, or the original format string if formatting fails
+ *
+ * Usage Example: This class provides a convenient way to represent and manipulate time durations
+ * in various units (nanoseconds, milliseconds, seconds, minutes, hours, days).
+ * It supports conversion between units and parsing from string representations.
+ *
+ * Usage Example: Usage Example: This method returns singleton instances for common values (0 and -1).
+ *
+ * @param millis the duration in milliseconds
+ * @return a TimeValue representing the specified duration
+ */
public static TimeValue timeValueMillis(long millis) {
if (millis == 0) {
return ZERO;
@@ -62,6 +114,14 @@ public static TimeValue timeValueMillis(long millis) {
return new TimeValue(millis, TimeUnit.MILLISECONDS);
}
+ /**
+ * Creates a TimeValue representing the specified number of seconds.
+ *
+ * This method returns a singleton instance for 30 seconds.
+ *
+ * @param seconds the duration in seconds
+ * @return a TimeValue representing the specified duration
+ */
public static TimeValue timeValueSeconds(long seconds) {
if (seconds == 30) {
// common value, no need to allocate each time
@@ -70,6 +130,14 @@ public static TimeValue timeValueSeconds(long seconds) {
return new TimeValue(seconds, TimeUnit.SECONDS);
}
+ /**
+ * Creates a TimeValue representing the specified number of minutes.
+ *
+ * This method returns a singleton instance for 1 minute.
+ *
+ * @param minutes the duration in minutes
+ * @return a TimeValue representing the specified duration
+ */
public static TimeValue timeValueMinutes(long minutes) {
if (minutes == 1) {
// common value, no need to allocate each time
@@ -78,10 +146,23 @@ public static TimeValue timeValueMinutes(long minutes) {
return new TimeValue(minutes, TimeUnit.MINUTES);
}
+ /**
+ * Creates a TimeValue representing the specified number of hours.
+ *
+ * @param hours the duration in hours
+ * @return a TimeValue representing the specified duration
+ */
public static TimeValue timeValueHours(long hours) {
return new TimeValue(hours, TimeUnit.HOURS);
}
+ /**
+ * Creates a TimeValue representing the specified number of days.
+ *
+ * @param days the duration in days (must not exceed 106751 days)
+ * @return a TimeValue representing the specified duration
+ * @throws IllegalArgumentException if days exceeds 106751 (due to internal nanosecond representation limits)
+ */
public static TimeValue timeValueDays(long days) {
// 106751.9 days is Long.MAX_VALUE nanoseconds, so we cannot store 106752 days
if (days > 106751) {
diff --git a/libs/core/src/main/java/org/elasticsearch/core/Tuple.java b/libs/core/src/main/java/org/elasticsearch/core/Tuple.java
index 5a741d26ce2c5..b9c0007003768 100644
--- a/libs/core/src/main/java/org/elasticsearch/core/Tuple.java
+++ b/libs/core/src/main/java/org/elasticsearch/core/Tuple.java
@@ -9,8 +9,49 @@
package org.elasticsearch.core;
+/**
+ * A generic tuple containing two values.
+ *
+ * This record provides a simple container for holding two related values of
+ * potentially different types. It is immutable and provides standard equals,
+ * hashCode, and toString implementations.
+ *
+ * @param Usage Example: This is a convenience factory method that can be statically imported
+ * for more concise tuple creation.
+ *
+ * @param Usage Example: Usage Example: A Point is defined by x (longitude), y (latitude), and optionally z (altitude in meters).
+ * Points can be empty, representing a null geometry.
+ *
+ * Coordinate System: Usage Examples: This is an alias for {@link #getY()}.
+ *
+ * @return the latitude in decimal degrees
+ */
public double getLat() {
return y;
}
+ /**
+ * Returns the longitude of this point.
+ *
+ * This is an alias for {@link #getX()}.
+ *
+ * @return the longitude in decimal degrees
+ */
public double getLon() {
return x;
}
+ /**
+ * Returns the altitude of this point.
+ *
+ * This is an alias for {@link #getZ()}.
+ *
+ * @return the altitude in meters, or Double.NaN if not set
+ */
public double getAlt() {
return z;
}
diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java
index 284ede5d0f4f2..d4d6e8693fe15 100644
--- a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java
+++ b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java
@@ -26,6 +26,38 @@
import java.util.function.Consumer;
import java.util.function.Function;
+/**
+ * Parses unstructured text data using Grok patterns into structured data.
+ *
+ * Grok is a pattern matching library that allows you to define patterns using
+ * named regular expressions. It is particularly useful for parsing log files and
+ * other unstructured text data.
+ *
+ * Pattern Syntax: Grok patterns use the syntax {@code %{PATTERN_NAME:field_name}} where:
+ * Usage Examples:
+ * This plugin registers a health indicator service that monitors the availability of shards
+ * across the cluster and reports their health status through the Health API.
+ * Usage Examples:
+ * This constructor is called by the Elasticsearch plugin system during plugin initialization.
+ *
+ * This method instantiates the {@link ShardsAvailabilityHealthIndicatorService} using the
+ * cluster service, allocation service, system indices, and project resolver provided by
+ * the plugin framework.
+ * Usage Examples:
+ * This method provides the shards availability health indicator service to the Health API,
+ * enabling monitoring of shard availability across the Elasticsearch cluster.
+ * Usage Examples:
+ * This processor parses documents in various formats (PDF, Microsoft Office, HTML, etc.) and extracts
+ * information including content, title, author, keywords, dates, and other metadata. The extracted
+ * data is added to a specified target field in the ingest document.
+ * Usage Examples:
+ * This method extracts the binary content from the specified source field, parses it using
+ * Apache Tika, and populates the target field with extracted content and metadata. The method
+ * handles various document formats and can be configured to extract specific properties,
+ * limit indexed characters, and optionally remove the binary field after processing.
+ * Usage Examples: Usage Examples:
+ * This factory reads processor configuration from pipeline definitions and creates
+ * configured attachment processor instances with appropriate settings for field extraction,
+ * property selection, character limits, and binary removal options.
+ *
+ * This method parses the processor configuration including source field, target field,
+ * properties to extract, character limits, and other options. It validates the configuration
+ * and returns a configured processor instance.
+ * Usage Examples:
+ * These properties represent metadata fields that Apache Tika can extract from various
+ * document formats. Each property corresponds to a specific metadata field such as content,
+ * title, author, dates, geolocation, and more.
+ *
+ * The parsing is case-insensitive, converting the input to uppercase before matching.
+ * Usage Examples:
+ * This method is used when adding extracted metadata to the document, ensuring
+ * consistent lowercase field names in the output.
+ * Usage Examples:
+ * This plugin integrates Apache Tika to extract text and metadata from binary documents including
+ * PDFs, Microsoft Office documents, HTML, plain text, and various other formats. The extracted
+ * content and metadata are added to the ingest document for indexing.
+ * Usage Examples:
+ * This method registers the attachment processor factory which creates processors
+ * for parsing and extracting content from binary documents using Apache Tika.
+ * Usage Examples:
+ * This plugin registers a processor that transforms non-OpenTelemetry-compliant documents into
+ * a namespaced flavor of Elastic Common Schema (ECS) that is compatible with OpenTelemetry.
+ * It renames specific ECS fields, namespaces attributes, and restructures resource attributes
+ * to align with OpenTelemetry semantic conventions.
+ * Usage Examples:
+ * This method registers the normalize_for_stream processor factory which creates
+ * processors for transforming documents into OpenTelemetry-compatible format.
+ * Usage Examples: Usage Examples:
+ * This method transforms non-OpenTelemetry-compliant documents by performing the following operations:
+ * Usage Examples:
+ * This processor requires no configuration and is created with only tag and description.
+ * Usage Examples:
+ * This plugin registers and manages system index descriptors for Kibana configuration,
+ * reporting, Onechat, workflows, and APM functionality. These system indices are protected
+ * and can only be modified by Kibana products.
+ * Usage Examples:
+ * This method provides descriptors for all Kibana-related system indices including
+ * saved objects, reporting data, Onechat, workflows, and APM configuration. These
+ * indices are protected as external unmanaged system indices that can only be
+ * accessed by Kibana products.
+ * Usage Examples:
+ * The feature name identifies this plugin in Elasticsearch's feature registry
+ * and is used for licensing and feature tracking purposes.
+ * Usage Examples:
+ * This description is used in Elasticsearch's feature registry to provide
+ * information about the functionality provided by this plugin.
+ * Usage Examples:
+ * Returns a map containing all named capture groups if the string matches the pattern,
+ * or {@code null} if it doesn't match.
+ * Usage Examples:
+ * Dissect parsing extracts structured fields from text using a pattern-based approach
+ * that is simpler and faster than regular expressions or grok.
+ * Usage Examples:
+ * Dissect parsing extracts structured fields from text. The append separator is used when
+ * multiple values are captured for the same key and need to be concatenated.
+ * Usage Examples:
+ * This class provides factory methods for creating grok-based extractors with watchdog
+ * protection against long-running or infinite loop pattern matching operations.
+ *
+ * The watchdog monitors grok pattern matching operations and interrupts them if they
+ * exceed the configured maximum execution time. The interval determines how frequently
+ * the watchdog checks for timeouts.
+ * Usage Examples:
+ * This method is separate from the constructor because an instance of GrokHelper
+ * needs to be available to Painless before the {@link ThreadPool} is ready during
+ * plugin initialization.
+ * Usage Examples:
+ * This method compiles the grok pattern and returns an extractor that can match strings
+ * and extract named capture groups. The compilation validates the pattern upfront and
+ * will throw an exception if the pattern emits any warnings or errors.
+ *
+ * Grok patterns support built-in patterns from {@link GrokBuiltinPatterns} and custom
+ * named capture groups. The watchdog protects against runaway pattern matching.
+ * Usage Examples:
+ * This constructor initializes the grok helper with configured watchdog interval and
+ * maximum execution time settings to protect against long-running or infinite loops
+ * in grok pattern matching.
+ * Usage Examples:
+ * This method exposes the grok watchdog configuration settings that control
+ * pattern matching timeouts and intervals.
+ * Usage Examples:
+ * This method completes the initialization of the grok helper by providing it with
+ * the thread pool needed for watchdog scheduling. The watchdog monitors grok pattern
+ * matching operations to prevent runaway executions.
+ * Usage Examples:
+ * The grok helper provides functionality for compiling grok patterns and creating
+ * extractors that can parse text and extract named capture groups.
+ * Usage Examples:
+ * This constructor exists to satisfy module-info requirements but should not be called directly.
+ * Use {@link #RuntimeFieldsPainlessExtension(RuntimeFieldsCommonPlugin)} instead.
+ *
+ * This constructor creates Painless whitelists that expose grok and dissect functionality
+ * to runtime field scripts. The grok helper from the plugin is bound as an instance binding,
+ * making it available for pattern compilation in Painless scripts.
+ * Usage Examples:
+ * This method provides whitelists containing grok, dissect, and related functionality
+ * for all runtime field script contexts. The whitelists enable runtime field scripts
+ * to use pattern matching and text extraction features.
+ * Usage Examples:
+ * This plugin enables Elasticsearch to communicate with systemd on Linux systems when running
+ * as a systemd service. It uses sd_notify to report service status including startup progress,
+ * ready state, and shutdown notifications. The plugin is only active in package distributions
+ * (DEB/RPM) when the ES_SD_NOTIFY environment variable is set to "true".
+ * Usage Examples:
+ * This constructor checks the current build type and ES_SD_NOTIFY environment variable
+ * to determine whether systemd integration should be enabled. It is automatically called
+ * by the Elasticsearch plugin system.
+ * Usage Examples:
+ * If systemd integration is enabled, this method schedules a recurring task that sends
+ * timeout extension notifications to systemd every 15 seconds during startup. This prevents
+ * systemd from timing out during long startup operations (e.g., metadata upgrades). The
+ * scheduled task is cancelled once the node startup completes successfully.
+ *
+ * Since systemd expects a READY=1 notification within 60 seconds by default, this method
+ * ensures that systemd receives EXTEND_TIMEOUT_USEC notifications to extend the timeout
+ * by 30 seconds every 15 seconds until startup completes.
+ * Usage Examples:
+ * This method sends a READY=1 notification to systemd via sd_notify, indicating that the
+ * service has successfully started. It also cancels the recurring timeout extension task
+ * that was scheduled during initialization, as it is no longer needed once the node is ready.
+ * Usage Examples:
+ * This method sends a STOPPING=1 notification to systemd via sd_notify, indicating that
+ * the service is shutting down gracefully. This allows systemd to track the service
+ * lifecycle properly.
+ * Usage Examples: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example: Usage Example:
+ * For index versions before {@link IndexVersions#FIRST_DETACHED_INDEX_VERSION}, this returns
+ * the corresponding {@link Version} string. For newer index versions, returns the IndexVersion
+ * string representation directly.
+ *
+ * @param minimumCompatible the minimum compatible index version
+ * @return the string representation of the minimum compatible version
+ */
public static String minimumCompatString(IndexVersion minimumCompatible) {
if (minimumCompatible.before(IndexVersions.FIRST_DETACHED_INDEX_VERSION)) {
// use Version for compatibility
@@ -146,6 +156,21 @@ public static String minimumCompatString(IndexVersion minimumCompatible) {
}
}
+ /**
+ * Returns the build information for the current running Elasticsearch instance.
+ *
+ * This method provides access to the singleton Build instance representing the current
+ * Elasticsearch build, including version, type, hash, and build date information.
+ *
+ * Usage Examples:
+ * This method deserializes build information from a stream, handling version-specific
+ * serialization formats based on the transport version of the stream.
+ *
+ * @param in the stream input to read from
+ * @return the deserialized build information
+ * @throws IOException if an I/O error occurs while reading from the stream
+ */
public static Build readBuild(StreamInput in) throws IOException {
final String flavor;
if (in.getTransportVersion().before(TransportVersions.V_8_3_0) || in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) {
@@ -250,6 +285,16 @@ public static Build readBuild(StreamInput in) throws IOException {
return new Build(flavor, type, hash, date, version, qualifier, snapshot, minWireVersion, minIndexVersion, displayString);
}
+ /**
+ * Writes build information to a stream output.
+ *
+ * This method serializes build information to a stream, adapting the format based on
+ * the transport version of the stream to maintain compatibility with different node versions.
+ *
+ * @param build the build information to write
+ * @param out the stream output to write to
+ * @throws IOException if an I/O error occurs while writing to the stream
+ */
public static void writeBuild(Build build, StreamOutput out) throws IOException {
if (out.getTransportVersion().before(TransportVersions.V_8_3_0)
|| out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) {
diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchTimeoutException.java b/server/src/main/java/org/elasticsearch/ElasticsearchTimeoutException.java
index 7e4735bf7b3b0..8abbc1bc61d0b 100644
--- a/server/src/main/java/org/elasticsearch/ElasticsearchTimeoutException.java
+++ b/server/src/main/java/org/elasticsearch/ElasticsearchTimeoutException.java
@@ -15,33 +15,83 @@
import java.io.IOException;
/**
- * The same as {@link java.util.concurrent.TimeoutException} simply a runtime one.
- *
+ * Indicates an operation timeout in Elasticsearch.
+ *
+ * This is the runtime equivalent of {@link java.util.concurrent.TimeoutException}, used throughout
+ * Elasticsearch to signal that an operation exceeded its allotted time limit.
+ *
+ * This exception returns {@link RestStatus#TOO_MANY_REQUESTS} as its HTTP status code, which is
+ * the closest semantic match for "your request took longer than you asked for".
*
+ * Usage Examples:
+ * If the provided exception is already a {@link RuntimeException}, it is returned as-is.
+ * Otherwise, the exception is wrapped in an {@link ElasticsearchException}.
+ *
+ * Usage Examples:
+ * If the provided exception is already an {@link ElasticsearchException}, it is returned as-is.
+ * Otherwise, the exception is wrapped in an {@link ElasticsearchException}.
+ *
+ * Usage Examples:
+ * This method examines the exception type and returns the appropriate HTTP status code.
+ * For {@link ElasticsearchException}, the status is retrieved from the exception itself.
+ * For {@link IllegalArgumentException} and {@link XContentParseException}, returns {@link RestStatus#BAD_REQUEST}.
+ * For {@link EsRejectedExecutionException}, returns {@link RestStatus#TOO_MANY_REQUESTS}.
+ * For all other exceptions, returns {@link RestStatus#INTERNAL_SERVER_ERROR}.
+ *
+ * Usage Examples:
+ * This method recursively unwraps the cause chain of wrapper exceptions to find the actual
+ * underlying exception. It protects against circular references and excessive nesting depth.
+ *
+ * Usage Examples:
+ * This method captures the full stack trace of the provided throwable,
+ * including all causes and suppressed exceptions, as a formatted string.
+ *
+ * Usage Examples:
+ * This method converts an array of stack trace elements into a formatted string,
+ * skipping the first element and prefixing each line with "\tat ".
+ *
+ * Usage Examples:
+ * If the given list is empty, no exception is thrown. This is useful for collecting
+ * multiple exceptions during a cleanup operation and then throwing them together.
+ *
+ * Usage Examples:
+ * If the given list is empty, no exception is thrown. The first exception becomes the main
+ * exception wrapped in {@link ElasticsearchException}, and all remaining exceptions are
+ * added as suppressed exceptions.
+ *
+ * Usage Examples:
+ * If first is null, returns second. Otherwise, adds second as a suppressed exception
+ * to first and returns first. This is useful for accumulating exceptions during
+ * multi-step operations.
+ *
+ * Usage Examples:
+ * If the provided exception is null, this method returns true. Otherwise, it throws
+ * the exception (wrapping in {@link RuntimeException} if it's a checked exception).
+ *
+ * Usage Examples:
+ * This generic exception corresponds to the {@link RestStatus#NOT_FOUND} HTTP status code
+ * and is used when an operation references a resource (index, document, snapshot, etc.) that
+ * does not exist in the cluster.
+ *
+ * Usage Examples: The {@code ActionFuture} methods catch {@link InterruptedException} and wrap it in an
+ * {@link IllegalStateException}, and unwrap {@link java.util.concurrent.ExecutionException} to throw
+ * the actual cause. This behavior simplifies exception handling in the common case where interruption
+ * is unexpected and the underlying cause of execution failure is more relevant.
*
+ * Usage Examples: All concrete action requests must extend this class and implement the {@link #validate()}
+ * method to ensure that the request parameters are valid before execution.
+ *
+ * This class extends {@link AbstractTransportRequest} to support serialization and
+ * deserialization for transmission across the network in a distributed Elasticsearch cluster.
+ *
+ * Usage Examples: Implementations should check all request parameters and accumulate validation errors
+ * using {@link org.elasticsearch.action.ValidateActions#addValidationError(String, ActionRequestValidationException)}.
+ *
+ * @return an {@link ActionRequestValidationException} containing all validation errors,
+ * or {@code null} if the request is valid
+ */
public abstract ActionRequestValidationException validate();
/**
- * Should this task store its result after it has finished?
+ * Determines whether this task should store its result after it has finished execution.
+ * Task results can be retrieved later via the Task Management API.
+ *
+ * By default, this returns {@code false}. Subclasses can override this method to
+ * enable result storage for specific request types.
+ *
+ * @return {@code true} if the task result should be stored, {@code false} otherwise
*/
public boolean getShouldStoreResult() {
return false;
}
+ /**
+ * Writes this action request to the provided stream output for serialization.
+ * This method is used to transmit the request across the network.
+ *
+ * @param out the stream output to write the request state to
+ * @throws IOException if an I/O error occurs while writing to the stream
+ */
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
diff --git a/server/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java b/server/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java
index 3ccf1447c19ac..6997d78b895a1 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java
@@ -11,4 +11,41 @@
import org.elasticsearch.common.ValidationException;
+/**
+ * Exception thrown when an {@link ActionRequest} fails validation. This exception accumulates
+ * multiple validation errors that occurred during request validation, allowing all issues to be
+ * reported at once rather than failing on the first error.
+ *
+ * Validation errors are typically added using
+ * {@link org.elasticsearch.action.ValidateActions#addValidationError(String, ActionRequestValidationException)},
+ * which builds up a chain of error messages.
+ *
+ * Usage Examples: Concrete action responses should extend this class and implement the necessary methods to
+ * serialize/deserialize their state for network transmission. Many responses also implement
+ * {@link ToXContent} to provide JSON/XML representations of the response data.
+ *
+ * Usage Examples: Use {@link EmptyResponseListener} to convert this to a valid (plain-text) REST response instead.
+ * This singleton instance is used when an action completes successfully but has no meaningful data to return.
+ *
+ * Usage Examples: This class extends {@link AbstractRunnable} and automatically handles exceptions by forwarding them
+ * to the listener's {@link ActionListener#onFailure} method. This is particularly useful for executing
+ * asynchronous operations on thread pools where exceptions need to be properly propagated.
+ *
+ * Usage Examples: This is useful for creating runnables that need to perform complex operations and then complete
+ * the listener with a result or error.
+ *
+ * @param This is particularly useful for submitting actions holding resources to a threadpool which
+ * might have a bounded queue. The resource will be properly released whether the task executes
+ * successfully, fails, or is rejected.
+ *
+ * @param
- * Typically, every {@link ActionType} instance is a global constant (i.e. a public static final field) called {@code INSTANCE} or {@code
- * TYPE}. Some legacy implementations create custom subclasses of {@link ActionType} but this is unnecessary and somewhat wasteful. Prefer
- * to create instances of this class directly whenever possible.
+ * Represents a type of action that can be invoked by {@link Client#execute}. An {@code ActionType} serves
+ * as a unique identifier and type descriptor for actions in the Elasticsearch action framework.
+ *
+ * The implementation must be registered with the node using {@link ActionModule#setupActions}
+ * (for actions in the {@code :server} package) or {@link ActionPlugin#getActions} (for actions in plugins).
+ *
+ * Typically, every {@link ActionType} instance is a global constant (i.e. a public static final field)
+ * called {@code INSTANCE} or {@code TYPE}. Some legacy implementations create custom subclasses of
+ * {@link ActionType} but this is unnecessary and somewhat wasteful. Prefer to create instances of this
+ * class directly whenever possible.
+ *
+ * Usage Examples:
- * There is no facility for directly executing an action on a different node in the local cluster. To achieve this, implement an action
- * which runs on the local node and knows how to use the {@link TransportService} to forward the request to a different node. There are
- * several utilities that help implement such an action, including {@link TransportNodesAction} or {@link TransportMasterNodeAction}.
+ *
+ * There is no facility for directly executing an action on a different node in the local cluster.
+ * To achieve this, implement an action which runs on the local node and knows how to use the
+ * {@link TransportService} to forward the request to a different node. There are several utilities
+ * that help implement such an action, including {@link TransportNodesAction} or
+ * {@link TransportMasterNodeAction}.
*
* @param name The name of the action, which must be unique across actions.
+ * @param
- * There is no facility for directly executing an action on a different node in the local cluster. To achieve this, implement an action
- * which runs on the local node and knows how to use the {@link TransportService} to forward the request to a different node. There are
- * several utilities that help implement such an action, including {@link TransportNodesAction} or {@link TransportMasterNodeAction}.
+ * Constructs an {@link ActionType} with the given name.
*
- * @param name The name of the action, which must be unique across actions.
+ * There is no facility for directly executing an action on a different node in the local cluster.
+ * To achieve this, implement an action which runs on the local node and knows how to use the
+ * {@link TransportService} to forward the request to a different node. There are several utilities
+ * that help implement such an action, including {@link TransportNodesAction} or
+ * {@link TransportMasterNodeAction}.
+ *
+ * Action names typically follow a hierarchical pattern like:
+ *
- * This is a useful base class for creating ActionListener wrappers that override the {@link #onResponse} handling, with access to
- * {@code L}, while retaining all of {@code L}'s other handling. It can also be useful to override other methods to do new work with access
- * to {@code L}.
+ * A base class for creating wrappers around {@link ActionListener}s that delegate certain operations
+ * to the wrapped listener. By default, this class delegates failure handling to the delegate listener's
+ * {@link ActionListener#onFailure} method.
+ *
+ * This is a useful base class for creating ActionListener wrappers that need to override
+ * {@link #onResponse} handling with custom logic, while retaining the delegate's failure handling.
+ * It can also be useful to override other methods to perform additional work with access to the
+ * delegate listener.
+ *
+ * Usage Examples: When an action is executed across multiple nodes (e.g., gathering node statistics,
+ * performing cluster-wide operations), individual nodes may fail. This exception captures
+ * both the failure details and the identity of the failed node.
+ *
+ * Usage Examples: In case of internal requests originated during the distributed execution of an external request,
+ * they will still return the indices that the original request related to, maintaining the context
+ * of the original operation.
+ *
+ * Usage Examples: This may include concrete index names, index patterns with wildcards (e.g., {@code logs-*}),
+ * or aliases. The actual indices targeted will be determined based on these names combined with
+ * the {@link #indicesOptions()} settings.
+ *
+ * @return the array of index names, patterns, or aliases that this action operates on
*/
String[] indices();
/**
- * Returns the indices options used to resolve indices. They tell for instance whether a single index is
- * accepted, whether an empty array will be converted to _all, and how wildcards will be expanded if needed.
+ * Returns the indices options used to resolve indices. These options control various aspects
+ * of index resolution including:
+ * All layers involved in the request's fulfillment including security, name resolution, etc.,
+ * should respect this flag to ensure consistent behavior across the system.
+ *
+ * @return {@code true} if data streams should be included in index resolution, {@code false} otherwise
*/
default boolean includeDataStreams() {
return false;
}
+ /**
+ * Extension of {@link IndicesRequest} for requests that support replacing their target indices
+ * after the request has been constructed. This is used during index resolution to update the
+ * request with the concrete indices after wildcard expansion and alias resolution.
+ *
+ * Usage Examples: Note: This method does not replace {@link #indices(String...)}. The
+ * {@link #indices(String...)} method must still be called to update the actual list
+ * of indices the request relates to. This method only stores metadata about how the
+ * indices were resolved.
+ *
+ * Note: The recorded information is transient and not serialized.
+ *
+ * @param expressions the resolved index expressions to record
*/
default void setResolvedIndexExpressions(ResolvedIndexExpressions expressions) {}
/**
- * Returns the results of index resolution, if recorded via
- * {@link #setResolvedIndexExpressions(ResolvedIndexExpressions)}. Null if not recorded.
+ * Returns the results of index resolution, if previously recorded via
+ * {@link #setResolvedIndexExpressions(ResolvedIndexExpressions)}.
+ *
+ * @return the resolved index expressions, or {@code null} if not recorded
*/
@Nullable
default ResolvedIndexExpressions getResolvedIndexExpressions() {
@@ -68,28 +156,43 @@ default ResolvedIndexExpressions getResolvedIndexExpressions() {
/**
* Determines whether the request can contain indices on a remote cluster.
- *
- * NOTE in theory this method can belong to the {@link IndicesRequest} interface because whether a request
- * allowing remote indices has no inherent relationship to whether it is {@link Replaceable} or not.
- * However, we don't have an existing request that is non-replaceable but allows remote indices.
- * In addition, authorization code currently relies on the fact that non-replaceable requests do not allow
- * remote indices.
- * That said, it is possible to remove this constraint should the needs arise in the future. We just need
+ *
+ * Note: In theory this method can belong to the {@link IndicesRequest} interface
+ * because whether a request allowing remote indices has no inherent relationship to whether
+ * it is {@link Replaceable} or not. However, we don't have an existing request that is
+ * non-replaceable but allows remote indices. In addition, authorization code currently relies
+ * on the fact that non-replaceable requests do not allow remote indices. That said, it is
+ * possible to remove this constraint should the needs arise in the future. We just need to
* proceed with extra caution.
+ *
+ * @return {@code true} if this request type allows targeting indices on remote clusters,
+ * {@code false} otherwise
*/
default boolean allowsRemoteIndices() {
return false;
}
/**
- * Determines whether the request type allows cross-project processing. Cross-project processing entails cross-project search
- * index resolution and error handling. Note: this method only determines in the request _supports_ cross-project.
- * Whether cross-project processing is actually performed is determined by {@link IndicesOptions}.
+ * Determines whether the request type allows cross-project processing. Cross-project
+ * processing entails cross-project search, index resolution, and error handling.
+ *
+ * Note: This method only determines if the request supports cross-project
+ * processing. Whether cross-project processing is actually performed is determined by
+ * {@link IndicesOptions}.
+ *
+ * @return {@code true} if this request type supports cross-project processing,
+ * {@code false} otherwise
*/
default boolean allowsCrossProject() {
return false;
}
+ /**
+ * Returns the project routing hint for this request, if any. Project routing is used to
+ * direct requests to specific projects in multi-project deployments.
+ *
+ * @return the project routing string, or {@code null} if no routing is specified
+ */
@Nullable // if no routing is specified
default String getProjectRouting() {
return null;
diff --git a/server/src/main/java/org/elasticsearch/action/NoSuchNodeException.java b/server/src/main/java/org/elasticsearch/action/NoSuchNodeException.java
index a3f21c86c27cb..3c2b983eed20a 100644
--- a/server/src/main/java/org/elasticsearch/action/NoSuchNodeException.java
+++ b/server/src/main/java/org/elasticsearch/action/NoSuchNodeException.java
@@ -13,12 +13,48 @@
import java.io.IOException;
+/**
+ * Exception thrown when an operation attempts to target a node that does not exist in the cluster.
+ * This typically occurs when a node ID is specified that is either invalid or refers to a node that
+ * has been removed from the cluster.
+ *
+ * This exception extends {@link FailedNodeException} and represents a specific case where the
+ * failure is due to the node not existing rather than the node failing during an operation.
+ *
+ * Usage Examples: Usage Examples: The returned request reflects all the configuration that has been applied to the
+ * builder up to this point.
+ *
+ * @return the request object built by this builder
*/
Request request();
+ /**
+ * Executes the request asynchronously and returns a future that can be used to retrieve
+ * the response. The future allows the caller to wait for the result or check if it's ready.
+ *
+ * @return an {@link ActionFuture} that will be completed with the response or an exception
+ */
ActionFuture Routing is a mechanism in Elasticsearch to control which shard a document is stored on.
+ * When an index requires routing (typically for performance or co-location reasons), all
+ * operations on documents in that index must include a routing parameter.
+ *
+ * Usage Examples: Shard operation failures are common in distributed systems where operations may fail on individual
+ * shards while succeeding on others. This exception captures the context of the failure including the
+ * index, shard ID, reason, status, and underlying cause.
+ *
+ * This is an abstract class that should be subclassed for specific types of shard failures.
+ * It implements both {@link Writeable} for serialization across nodes and {@link ToXContentObject}
+ * for JSON/XML representation.
+ *
+ * Usage Examples:
+ * Bootstrap checks are validation tests run when Elasticsearch starts to ensure the node
+ * is configured properly for production use. These checks verify system settings, resource
+ * limits, and other prerequisites required for reliable operation.
+ *
+ * Usage Examples:
+ * A result can either be a success (with no message) or a failure (with a descriptive
+ * error message explaining why the check failed).
*/
record BootstrapCheckResult(String message) {
private static final BootstrapCheckResult SUCCESS = new BootstrapCheckResult(null);
+ /**
+ * Creates a successful bootstrap check result.
+ *
+ * @return a success result
+ */
public static BootstrapCheckResult success() {
return SUCCESS;
}
+ /**
+ * Creates a failed bootstrap check result with an error message.
+ *
+ * @param message the failure message explaining why the check failed
+ * @return a failure result with the provided message
+ * @throws NullPointerException if message is null
+ */
public static BootstrapCheckResult failure(final String message) {
Objects.requireNonNull(message);
return new BootstrapCheckResult(message);
}
+ /**
+ * Checks if this result represents a successful bootstrap check.
+ *
+ * @return true if the check succeeded, false otherwise
+ */
public boolean isSuccess() {
return this == SUCCESS;
}
+ /**
+ * Checks if this result represents a failed bootstrap check.
+ *
+ * @return true if the check failed, false otherwise
+ */
public boolean isFailure() {
return isSuccess() == false;
}
+ /**
+ * Returns the failure message for this result.
+ *
+ * This method should only be called on failure results.
+ *
+ * @return the failure message
+ */
public String getMessage() {
assert isFailure();
assert message != null;
@@ -51,17 +105,37 @@ public String getMessage() {
}
/**
- * Test if the node fails the check.
+ * Tests if the node passes this bootstrap check.
+ *
+ * This method performs the actual validation logic and returns a result indicating
+ * whether the check passed or failed. If failed, the result should include a
+ * descriptive message explaining the problem.
*
- * @param context the bootstrap context
+ * @param context the bootstrap context containing environment and metadata
* @return the result of the bootstrap check
*/
BootstrapCheckResult check(BootstrapContext context);
+ /**
+ * Indicates whether this check should always be enforced, even in development mode.
+ *
+ * By default, most bootstrap checks are only enforced in production mode. Checks that
+ * return true from this method will be enforced regardless of the node's mode.
+ *
+ * @return true if this check should always be enforced, false for production-only enforcement
+ */
default boolean alwaysEnforce() {
return false;
}
+ /**
+ * Returns the reference documentation for this bootstrap check.
+ *
+ * This provides users with a link to documentation explaining why the check failed
+ * and how to resolve the issue.
+ *
+ * @return the reference documentation for this check
+ */
ReferenceDocs referenceDocs();
}
diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapContext.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapContext.java
index 9256225734118..8a09a7be8c6cc 100644
--- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapContext.java
+++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapContext.java
@@ -13,10 +13,34 @@
import org.elasticsearch.env.Environment;
/**
- * Context that is passed to every bootstrap check to make decisions on.
+ * Context passed to bootstrap checks containing environment and metadata information.
+ *
+ * This record encapsulates the information needed by bootstrap checks to validate the
+ * node's configuration. It provides access to the node's environment settings and
+ * cluster metadata.
+ *
+ * Usage Examples:
+ * This is a convenience method equivalent to {@code environment.settings()}.
+ *
+ * @return the node's settings
+ */
public Settings settings() {
return environment.settings();
}
diff --git a/server/src/main/java/org/elasticsearch/bootstrap/StartupException.java b/server/src/main/java/org/elasticsearch/bootstrap/StartupException.java
index bb13c6959749d..689cbdf8e9210 100644
--- a/server/src/main/java/org/elasticsearch/bootstrap/StartupException.java
+++ b/server/src/main/java/org/elasticsearch/bootstrap/StartupException.java
@@ -16,18 +16,35 @@
import java.util.Objects;
/**
- * A wrapper for exceptions occurring during startup.
+ * A wrapper for exceptions occurring during Elasticsearch node startup.
+ *
+ * This exception provides a cleaner presentation of startup failures by truncating
+ * and compressing stack traces, particularly for Guice-related errors which can
+ * have hundreds of stack frames. The stack trace is limited to {@link #STACKTRACE_LIMIT}
+ * lines and consecutive Guice frames are compressed into a single line.
*
- * The stacktrack of a startup exception may be truncated if it is from Guice,
- * which can have a large number of stack frames.
+ * Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: This applier is called before the state becomes visible. If you need to react to state changes
+ * after they are visible, use {@link ClusterStateListener} instead. Critical Safety Requirements: Usage Examples:
- * Cluster states are applied one-by-one which means they can be a performance bottleneck. Implementations of this method should
- * therefore be fast, so please consider forking work into the background rather than doing everything inline.
+ * Applies the new cluster state to internal data structures. This method is called before
+ * the state becomes visible via {@link ClusterService#state()}.
+ *
+ * Critical Requirements: Usage Examples: This listener is called after the state has been applied and is visible.
+ * If you need to apply changes before the state becomes visible, use {@link ClusterStateApplier} instead. Performance Considerations: Cluster states are applied sequentially, which can create a performance bottleneck.
+ * Implementations should be fast and consider offloading long-running work to background threads. Usage Examples:
- * Cluster states are applied one-by-one which means they can be a performance bottleneck. Implementations of this method should
- * therefore be fast, so please consider forking work into the background rather than doing everything inline.
+ * Invoked when the cluster state changes. This method is called after the new cluster state
+ * has been applied and is visible via {@link ClusterService#state()}.
+ *
+ * Implementation Guidelines: Usage Examples: Tasks are executed on the master node with configurable priority and optional timeout.
+ * If the timeout expires before execution, the task fails with a timeout exception. Usage Examples: Usage Examples: Important Optimization: Return the same instance if no changes are needed.
+ * This short-circuits the entire publication process, saving significant time and effort. Usage Examples: Critical Requirements: Usage Examples: This interface is the counterpart to {@link Diffable}, where {@code Diffable}
+ * generates diffs and {@code Diff} applies them. Usage Examples: Implementations of this interface must ensure that the generated diff can be applied
+ * to the previous state to reconstruct the current state. Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Blocks can be global (affecting the entire cluster) or index-specific, and can
+ * restrict operations at different levels such as read, write, or metadata operations. Usage Examples: Usage Examples: Usage Examples: Block Levels: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: When merging conflicting configuration settings such as
* field mapping settings it is preferable to preserve an explicit
- * choice rather than a choice made only made implicitly by defaults.
+ * choice rather than a choice made only made implicitly by defaults. Usage Examples: The exponentially weighted moving average is calculated using the formula: Usage Examples: Usage Examples: The priority levels in order from highest to lowest are:
+ * {@link #IMMEDIATE}, {@link #URGENT}, {@link #HIGH}, {@link #NORMAL}, {@link #LOW}, {@link #LANGUID}. Usage Examples: Usage Examples: Usage Examples: Usage Examples: Time-based UUIDs are preferred for use as Lucene document IDs because they have better
+ * index locality characteristics. Random UUIDs should be used when uniqueness without ordering
+ * is required. Usage Examples: Usage Examples: BytesReference instances are immutable after creation and thread-safe for read operations. Usage Examples: Usage Examples: Usage Examples: The listener is called synchronously during the removal operation, so implementations
+ * should be quick and avoid blocking operations. Usage Examples: Usage Examples:
+ * This class manages all filesystem paths and settings for an Elasticsearch node, including:
+ *
+ * All paths are resolved to absolute, normalized paths during construction to ensure
+ * consistent path handling across the application.
+ *
+ * Usage Examples:
+ * A shard lock guarantees that only one process can access a shard's data directory
+ * at a time, preventing concurrent modifications that could corrupt the shard data.
+ * Internal processes must acquire a lock on a shard before executing any write
+ * operations on the shard's data directory.
+ *
+ * This lock is {@link Closeable} and should be used with try-with-resources to
+ * ensure proper release.
+ *
+ * Usage Examples:
+ * This method is idempotent; calling it multiple times has no additional effect
+ * beyond the first call. The actual lock release logic is delegated to
+ * {@link #closeInternal()}.
+ */
@Override
public final void close() {
if (this.closed.compareAndSet(false, true)) {
@@ -44,11 +73,23 @@ public final void close() {
}
}
+ /**
+ * Internal method to release the lock.
+ *
+ * Subclasses must implement this method to provide the actual lock release logic.
+ */
protected abstract void closeInternal();
/**
- * Update the details of the holder of this lock. These details are displayed alongside a {@link ShardLockObtainFailedException}. Must
- * only be called by the holder of this lock.
+ * Updates the details of the current holder of this lock.
+ *
+ * These details are displayed in {@link ShardLockObtainFailedException} when another
+ * process attempts to acquire the lock. This helps diagnose which operation is
+ * holding the lock.
+ *
+ * This method must only be called by the current holder of this lock.
+ *
+ * @param details a description of the operation holding the lock
*/
public void setDetails(String details) {}
diff --git a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java
index 2ef630f2d4cd3..35695f41ce390 100644
--- a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java
+++ b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java
@@ -18,17 +18,46 @@ public abstract class AbstractIndexComponent {
protected final IndexSettings indexSettings;
/**
- * Constructs a new index component, with the index name and its settings.
+ * Constructs a new index component with the specified index settings.
+ * Initializes the logger with the component's class and index information.
+ *
+ * @param indexSettings the index settings containing configuration and metadata for this component
*/
protected AbstractIndexComponent(IndexSettings indexSettings) {
this.logger = Loggers.getLogger(getClass(), indexSettings.getIndex());
this.indexSettings = indexSettings;
}
+ /**
+ * Retrieves the index associated with this component.
+ *
+ * @return the {@link Index} object containing the index name and UUID
+ *
+ * Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples:
+ * Important: This method is expensive and should be called at most once
+ * during the lifetime of the object, as the generated map is not cached.
+ *
+ * @return the source as a Map, or null if source is not available
+ *
+ * Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples: Usage Examples:
+ * This plugin manages the index templates and mappings required for APM data ingestion.
+ * It creates and maintains an {@link APMIndexTemplateRegistry} that handles the lifecycle
+ * of APM-related index templates.
+ *
+ * This setting is ignored if the APM data plugin itself is disabled via
+ * {@link XPackSettings#APM_DATA_ENABLED}.
+ *
+ * This method creates the {@link APMIndexTemplateRegistry} which manages APM index templates.
+ * If the plugin is enabled, the registry is initialized and configured according to the
+ * {@link #APM_DATA_REGISTRY_ENABLED} setting. If disabled, the registry is created but not initialized.
+ *
+ * This method ensures the APM index template registry is properly closed and
+ * any associated resources are released.
+ *
+ * This plugin enables long-running search requests to execute asynchronously,
+ * allowing clients to submit a search, disconnect, and retrieve results later.
+ * This is particularly useful for searches that may take a long time to complete.
+ * Usage Example:
+ * Registers transport actions for submitting async searches, retrieving results,
+ * and checking search status.
+ *
+ * Registers REST endpoints for async search operations including submit, get,
+ * status, and delete operations.
+ *
+ * This plugin provides functionality for storing and managing results from asynchronous search operations.
+ * It maintains a system index for async task results and provides maintenance services for cleanup operations.
+ *
+ * This method provides the descriptor for the system index used to store asynchronous task results.
+ *
+ * On data nodes, this creates an {@link AsyncTaskMaintenanceService} responsible for
+ * cleaning up expired async search results. Non-data nodes do not run the maintenance service.
+ *
+ * This is the type name as it should be displayed in API responses.
+ *
+ * This field is populated when:
+ *
+ * This method should be called when query planning is complete and execution in
+ * {@code ComputeService} is about to start. It captures the planning duration for
+ * performance monitoring and diagnostic purposes.
+ *
+ * Important: This method should only be called once per query execution.
+ * The current implementation is designed for a single-phase planning/execution model.
+ * If INLINE STATS or other multi-phase features move towards GA, this model may need
+ * to be revised.
+ *
+ * This method should be called when the query execution is complete to capture
+ * the total elapsed time. The duration is only recorded for the main plan
+ * (not for subplans).
+ *
+ * In cross-cluster search scenarios, remote clusters can be configured as skippable,
+ * allowing the query to continue with partial results if those clusters fail. The local
+ * cluster is never skippable.
+ *
+ * Metadata is included when either:
+ *
+ * Failures are typically used during query verification to track semantic errors,
+ * type mismatches, or other validation issues. Each failure is attached to the
+ * specific node in the query tree where the problem was detected.
+ * Usage Examples:
+ * This returns only the message part, without location information.
+ * Use {@link #failMessage(Collection)} to get formatted messages with locations.
+ *
+ * This is a convenience factory method that formats the message using the provided
+ * arguments before creating the Failure instance. Message placeholders are denoted by {}.
+ *
+ * This method creates a comprehensive error message that includes:
+ * {@code
+ * MultiCommand tool = new MultiCommand("Elasticsearch administration tool");
+ * tool.subcommands.put("index", new IndexCommand());
+ * tool.subcommands.put("cluster", new ClusterCommand());
+ * }
*/
public MultiCommand(final String description) {
super(description);
@@ -70,6 +81,21 @@ private void printSubCommandList(Consumer{@code
+ * Terminal terminal = Terminal.DEFAULT;
+ * String name = terminal.readText("Enter your name: ");
+ * }
+ */
public String readText(String prompt) {
return new String(read(prompt));
}
- /** Reads password text from the terminal input. See {@link Console#readPassword()}}. */
+ /**
+ * Reads secret text (e.g., password) from the terminal input with the specified prompt.
+ *
+ * {@code
+ * Terminal terminal = Terminal.DEFAULT;
+ * char[] password = terminal.readSecret("Enter password: ");
+ * // ... use password ...
+ * Arrays.fill(password, '\0'); // Clear password from memory
+ * }
+ */
public char[] readSecret(String prompt) {
return read(prompt);
}
- /** Returns a Reader which can be used to read directly from the terminal using standard input. */
+ /**
+ * Returns a Reader for reading directly from the terminal using standard input.
+ *
+ * @return the reader for this terminal's input stream
+ */
public final Reader getReader() {
return reader;
}
/**
- * Returns a line based OutputStream wrapping this Terminal's println.
- * Note, this OutputStream is not thread-safe!
+ * Returns a line-based OutputStream that wraps this terminal's println method.
+ *
+ * {@code
+ * Terminal terminal = Terminal.DEFAULT;
+ * try (OutputStream out = terminal.asLineOutputStream(StandardCharsets.UTF_8)) {
+ * out.write("Hello\n".getBytes(StandardCharsets.UTF_8));
+ * }
+ * }
*/
public final OutputStream asLineOutputStream(Charset charset) {
return new LineOutputStream(charset);
@@ -216,14 +267,45 @@ public void errorPrintln(Throwable throwable) {
errorPrintln(Verbosity.SILENT, throwable);
}
- /** Checks if is enough {@code verbosity} level to be printed */
+ /**
+ * Checks if a message at the specified verbosity level should be printed.
+ *
+ * {@code
+ * if (terminal.isPrintable(Verbosity.VERBOSE)) {
+ * terminal.println(Verbosity.VERBOSE, "Detailed debug information");
+ * }
+ * }
+ */
public final boolean isPrintable(Verbosity verbosity) {
return this.currentVerbosity.ordinal() >= verbosity.ordinal();
}
/**
- * Prompt for a yes or no answer from the user. This method will loop until 'y' or 'n'
- * (or the default empty value) is entered.
+ * Prompts the user for a yes or no answer with a default value.
+ *
+ * {@code
+ * Terminal terminal = Terminal.DEFAULT;
+ * boolean proceed = terminal.promptYesNo("Continue with operation?", true);
+ * if (proceed) {
+ * // ... perform operation ...
+ * }
+ * }
*/
public final boolean promptYesNo(String prompt, boolean defaultYes) {
String answerPrompt = defaultYes ? " [Y/n]" : " [y/N]";
@@ -243,10 +325,30 @@ public final boolean promptYesNo(String prompt, boolean defaultYes) {
}
/**
- * Read from the reader until we find a newline. If that newline
- * character is immediately preceded by a carriage return, we have
- * a Windows-style newline, so we discard the carriage return as well
- * as the newline.
+ * Reads a line of text from the reader and returns it as a character array.
+ *
+ * {@code
+ * Reader reader = new InputStreamReader(System.in);
+ * char[] line = Terminal.readLineToCharArray(reader);
+ * if (line != null) {
+ * String lineStr = new String(line);
+ * Arrays.fill(line, '\0'); // Clear sensitive data
+ * }
+ * }
*/
public static char[] readLineToCharArray(Reader reader) {
char[] buf = new char[128];
@@ -283,7 +385,9 @@ public static char[] readLineToCharArray(Reader reader) {
}
/**
- * Flush the outputs of this terminal.
+ * Flushes both the standard output and error output streams of this terminal.
+ *
+ * {@code
+ * boolean result = Booleans.parseBoolean("true"); // Returns true
+ * boolean result2 = Booleans.parseBoolean("false"); // Returns false
+ * boolean result3 = Booleans.parseBoolean("invalid"); // Throws IllegalArgumentException
+ *
+ * boolean withDefault = Booleans.parseBoolean(null, false); // Returns false (default)
+ * boolean isValid = Booleans.isBoolean("true"); // Returns true
+ * }
+ */
public final class Booleans {
private Booleans() {
throw new AssertionError("No instances intended");
diff --git a/libs/core/src/main/java/org/elasticsearch/core/Predicates.java b/libs/core/src/main/java/org/elasticsearch/core/Predicates.java
index 88c4f13896722..a2d1ae4cbc05c 100644
--- a/libs/core/src/main/java/org/elasticsearch/core/Predicates.java
+++ b/libs/core/src/main/java/org/elasticsearch/core/Predicates.java
@@ -14,7 +14,24 @@
import java.util.function.Predicate;
/**
- * Utilities around predicates.
+ * Utility methods for working with {@link Predicate} and {@link BooleanSupplier}.
+ *
+ * {@code
+ * // Get a predicate that always returns true
+ * Predicate
*/
public enum Predicates {
;
diff --git a/libs/core/src/main/java/org/elasticsearch/core/Releasables.java b/libs/core/src/main/java/org/elasticsearch/core/Releasables.java
index 8eee84050ca39..e3ef92c101958 100644
--- a/libs/core/src/main/java/org/elasticsearch/core/Releasables.java
+++ b/libs/core/src/main/java/org/elasticsearch/core/Releasables.java
@@ -13,7 +13,31 @@
import java.util.Iterator;
import java.util.concurrent.atomic.AtomicReference;
-/** Utility methods to work with {@link Releasable}s. */
+/**
+ * Utility methods for working with {@link Releasable} resources.
+ *
+ * {@code
+ * // Close multiple releasables, propagating the first exception
+ * Releasables.close(resource1, resource2, resource3);
+ *
+ * // Close and suppress all exceptions (useful in exception handlers)
+ * Releasables.closeWhileHandlingException(resource1, resource2);
+ *
+ * // Wrap multiple releasables for use with try-with-resources
+ * List
+ */
public enum Releasables {
;
diff --git a/libs/core/src/main/java/org/elasticsearch/core/Strings.java b/libs/core/src/main/java/org/elasticsearch/core/Strings.java
index ed8dbc9cdbdb6..5db3bf77d6413 100644
--- a/libs/core/src/main/java/org/elasticsearch/core/Strings.java
+++ b/libs/core/src/main/java/org/elasticsearch/core/Strings.java
@@ -12,18 +12,31 @@
import java.util.Locale;
/**
- * Utilities related to String class
+ * Utility methods for String operations.
+ *
+ * {@code
+ * String result = Strings.format("Hello %s, you have %d messages", "John", 5);
+ * // Returns: "Hello John, you have 5 messages"
+ * }
*/
public static String format(String format, Object... args) {
try {
diff --git a/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java b/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java
index 6ac84479dc6e8..8c8d6a99406bb 100644
--- a/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java
+++ b/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java
@@ -13,6 +13,25 @@
import java.util.Objects;
import java.util.concurrent.TimeUnit;
+/**
+ * Represents a duration or time value with a specific time unit.
+ *
+ * {@code
+ * TimeValue fiveSeconds = TimeValue.timeValueSeconds(5);
+ * TimeValue twoMinutes = TimeValue.timeValueMinutes(2);
+ * TimeValue thirtySeconds = TimeValue.THIRTY_SECONDS;
+ *
+ * long millis = fiveSeconds.millis(); // Returns 5000
+ * long seconds = twoMinutes.seconds(); // Returns 120
+ *
+ * TimeValue parsed = TimeValue.parseTimeValue("10m", "timeout");
+ * }
+ */
public class TimeValue implements Comparable{@code
+ * TimeValue fiveMinutes = new TimeValue(5, TimeUnit.MINUTES);
+ * TimeValue tenSeconds = new TimeValue(10, TimeUnit.SECONDS);
+ * }
+ */
public TimeValue(long duration, TimeUnit timeUnit) {
if (duration < -1) {
throw new IllegalArgumentException("duration cannot be negative, was given [" + duration + "]");
@@ -48,10 +86,24 @@ public TimeValue(long duration, TimeUnit timeUnit) {
this.timeUnit = timeUnit;
}
+ /**
+ * Creates a TimeValue representing the specified number of nanoseconds.
+ *
+ * @param nanos the duration in nanoseconds
+ * @return a TimeValue representing the specified duration
+ */
public static TimeValue timeValueNanos(long nanos) {
return new TimeValue(nanos, TimeUnit.NANOSECONDS);
}
+ /**
+ * Creates a TimeValue representing the specified number of milliseconds.
+ *
+ * {@code
+ * Tuple
+ */
public record Tuple{@code
+ * import static org.elasticsearch.core.Tuple.tuple;
+ *
+ * Tuple
+ */
public static {@code
+ * DissectParser parser = new DissectParser("%{timestamp} %{+timestamp} %{level} %{message}", " ");
+ * Map
+ */
public DissectParser(String pattern, String appendSeparator) {
this.pattern = pattern;
this.appendSeparator = appendSeparator == null ? "" : appendSeparator;
diff --git a/libs/geo/src/main/java/org/elasticsearch/geometry/Point.java b/libs/geo/src/main/java/org/elasticsearch/geometry/Point.java
index dc6a29db87bc0..06a4d8fc62a41 100644
--- a/libs/geo/src/main/java/org/elasticsearch/geometry/Point.java
+++ b/libs/geo/src/main/java/org/elasticsearch/geometry/Point.java
@@ -12,9 +12,37 @@
import org.elasticsearch.geometry.utils.WellKnownText;
/**
- * Represents a Point on the earth's surface in decimal degrees and optional altitude in meters.
+ * Represents a geographic point on the earth's surface in decimal degrees with optional altitude.
+ *
+ *
+ *
+ *
+ * {@code
+ * // Create a 2D point (longitude, latitude)
+ * Point london = new Point(-0.1278, 51.5074);
+ *
+ * // Create a 3D point with altitude
+ * Point mountEverest = new Point(86.9250, 27.9881, 8848.86);
+ *
+ * // Access coordinates
+ * double lat = london.getLat();
+ * double lon = london.getLon();
+ * double alt = mountEverest.getAlt();
+ *
+ * // Use empty point
+ * Point empty = Point.EMPTY;
+ * }
*/
public class Point implements Geometry {
+ /** A singleton empty Point instance. */
public static final Point EMPTY = new Point();
private final double y;
@@ -22,6 +50,9 @@ public class Point implements Geometry {
private final double z;
private final boolean empty;
+ /**
+ * Constructs an empty Point.
+ */
private Point() {
y = 0;
x = 0;
@@ -29,10 +60,23 @@ private Point() {
empty = true;
}
+ /**
+ * Constructs a 2D Point with the specified longitude and latitude.
+ *
+ * @param x the longitude in decimal degrees
+ * @param y the latitude in decimal degrees
+ */
public Point(double x, double y) {
this(x, y, Double.NaN);
}
+ /**
+ * Constructs a 3D Point with the specified longitude, latitude, and altitude.
+ *
+ * @param x the longitude in decimal degrees
+ * @param y the latitude in decimal degrees
+ * @param z the altitude in meters (use Double.NaN for 2D points)
+ */
public Point(double x, double y, double z) {
this.y = y;
this.x = x;
@@ -45,26 +89,62 @@ public ShapeType type() {
return ShapeType.POINT;
}
+ /**
+ * Returns the y-coordinate (latitude) of this point.
+ *
+ * @return the latitude in decimal degrees
+ */
public double getY() {
return y;
}
+ /**
+ * Returns the x-coordinate (longitude) of this point.
+ *
+ * @return the longitude in decimal degrees
+ */
public double getX() {
return x;
}
+ /**
+ * Returns the z-coordinate (altitude) of this point.
+ *
+ * @return the altitude in meters, or Double.NaN if not set
+ */
public double getZ() {
return z;
}
+ /**
+ * Returns the latitude of this point.
+ *
+ *
+ *
+ *
+ * {@code
+ * // Create a pattern bank with predefined patterns
+ * PatternBank patternBank = new PatternBank();
+ * patternBank.addPattern("IP", "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}");
+ *
+ * // Create a Grok instance with a pattern
+ * Grok grok = new Grok(patternBank, "%{IP:client_ip} %{WORD:method} %{URIPATHPARAM:request}", msg -> {});
+ *
+ * // Match and extract data
+ * String logLine = "192.168.1.1 GET /index.html";
+ * Map
+ */
public final class Grok {
private static final String NAME_GROUP = "name";
diff --git a/modules/health-shards-availability/src/main/java/org/elasticsearch/health/plugin/ShardsAvailabilityPlugin.java b/modules/health-shards-availability/src/main/java/org/elasticsearch/health/plugin/ShardsAvailabilityPlugin.java
index 66afd6465c178..b3f2d21a0e389 100644
--- a/modules/health-shards-availability/src/main/java/org/elasticsearch/health/plugin/ShardsAvailabilityPlugin.java
+++ b/modules/health-shards-availability/src/main/java/org/elasticsearch/health/plugin/ShardsAvailabilityPlugin.java
@@ -18,12 +18,49 @@
import java.util.Collection;
import java.util.Set;
+/**
+ * Plugin that provides shards availability health indicator functionality to Elasticsearch.
+ * {@code
+ * // Plugin is automatically loaded by Elasticsearch plugin system
+ * // Access health status via Health API:
+ * GET /_health_report/shards_availability
+ * }
+ */
public class ShardsAvailabilityPlugin extends Plugin implements HealthPlugin {
private final SetOnce{@code
+ * // Called automatically by Elasticsearch during plugin initialization
+ * Collection> components = plugin.createComponents(pluginServices);
+ * }
+ */
@Override
public Collection> createComponents(PluginServices services) {
this.shardHealthService.set(
@@ -37,6 +74,24 @@ public Collection> createComponents(PluginServices services) {
return Set.of(this.shardHealthService.get());
}
+ /**
+ * Returns the collection of health indicator services provided by this plugin.
+ * {@code
+ * // Called by Elasticsearch Health API framework
+ * Collection
+ */
@Override
public Collection{@code
+ * // Basic usage in an ingest pipeline:
+ * {
+ * "attachment": {
+ * "field": "data",
+ * "target_field": "attachment",
+ * "indexed_chars": 100000,
+ * "properties": ["content", "title", "author"],
+ * "ignore_missing": false,
+ * "remove_binary": true
+ * }
+ * }
+ * }
+ */
public final class AttachmentProcessor extends AbstractProcessor {
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(AttachmentProcessor.class);
@@ -88,6 +111,31 @@ boolean isRemoveBinary() {
return removeBinary;
}
+ /**
+ * Executes the attachment processor on an ingest document.
+ * {@code
+ * // Executed automatically as part of an ingest pipeline
+ * IngestDocument doc = new IngestDocument(...);
+ * doc.setFieldValue("data", base64EncodedPdfBytes);
+ * IngestDocument result = processor.execute(doc);
+ * // result now contains extracted data in the target field
+ * Map
+ */
@Override
public IngestDocument execute(IngestDocument ingestDocument) {
Map{@code
+ * String type = processor.getType();
+ * // Returns: "attachment"
+ * }
+ */
@Override
public String getType() {
return TYPE;
@@ -224,10 +283,48 @@ int getIndexedChars() {
return indexedChars;
}
+ /**
+ * Factory for creating AttachmentProcessor instances.
+ * {@code
+ * // Configuration in pipeline definition:
+ * Map
+ */
@Override
public AttachmentProcessor create(
Map{@code
+ * Property prop1 = Property.parse("content"); // Returns CONTENT
+ * Property prop2 = Property.parse("TITLE"); // Returns TITLE
+ * Property prop3 = Property.parse("Author"); // Returns AUTHOR
+ * }
+ */
public static Property parse(String value) {
return valueOf(value.toUpperCase(Locale.ROOT));
}
+ /**
+ * Returns the lowercase string representation of this property.
+ * {@code
+ * String fieldName = Property.CONTENT.toLowerCase(); // Returns "content"
+ * String titleField = Property.TITLE.toLowerCase(); // Returns "title"
+ * }
+ */
public String toLowerCase() {
return this.toString().toLowerCase(Locale.ROOT);
}
diff --git a/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/IngestAttachmentPlugin.java b/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/IngestAttachmentPlugin.java
index 0229e13f97b53..bde2713f10115 100644
--- a/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/IngestAttachmentPlugin.java
+++ b/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/IngestAttachmentPlugin.java
@@ -15,8 +15,51 @@
import java.util.Map;
+/**
+ * Plugin that provides the attachment ingest processor for parsing and extracting document content.
+ * {@code
+ * // Plugin registers the "attachment" processor type
+ * // Use in an ingest pipeline:
+ * PUT _ingest/pipeline/attachment
+ * {
+ * "description": "Extract attachment information",
+ * "processors": [
+ * {
+ * "attachment": {
+ * "field": "data",
+ * "target_field": "attachment"
+ * }
+ * }
+ * ]
+ * }
+ * }
+ */
public class IngestAttachmentPlugin extends Plugin implements IngestPlugin {
+ /**
+ * Returns a map of ingest processors provided by this plugin.
+ * {@code
+ * // Called automatically by Elasticsearch during plugin initialization
+ * Map
+ */
@Override
public Map{@code
+ * // Plugin registers the "normalize_for_stream" processor type
+ * // Use in an ingest pipeline:
+ * PUT _ingest/pipeline/normalize_otel
+ * {
+ * "description": "Normalize documents for OpenTelemetry compatibility",
+ * "processors": [
+ * {
+ * "normalize_for_stream": {}
+ * }
+ * ]
+ * }
+ * }
+ */
public class NormalizeForStreamPlugin extends Plugin implements IngestPlugin {
+ /**
+ * Returns a map of ingest processors provided by this plugin.
+ * {@code
+ * // Called automatically by Elasticsearch during plugin initialization
+ * Map
+ */
@Override
public Map{@code
+ * String type = processor.getType();
+ * // Returns: "normalize_for_stream"
+ * }
+ */
@Override
public String getType() {
return TYPE;
}
+ /**
+ * Executes the normalization processor on an ingest document.
+ *
+ *
+ * {@code
+ * // Executed automatically as part of an ingest pipeline
+ * IngestDocument doc = new IngestDocument(...);
+ * doc.setFieldValue("message", "Log message");
+ * doc.setFieldValue("span.id", "abc123");
+ * IngestDocument result = processor.execute(doc);
+ * // result now has "body.text" instead of "message"
+ * // and "span_id" instead of "span.id"
+ * }
+ */
@Override
public IngestDocument execute(IngestDocument document) {
Map
+ * This factory creates processors with default configuration, as the normalize_for_stream
+ * processor does not require any configuration parameters. It operates based on predefined
+ * rules for OpenTelemetry normalization.
+ * {@code
+ * // Configuration in pipeline definition:
+ * Map
+ */
@Override
public Processor create(
Map{@code
+ * // Plugin is automatically loaded by Elasticsearch
+ * // System indices are automatically registered:
+ * // - .kibana_* (Kibana saved objects)
+ * // - .reporting-* (Reporting data)
+ * // - .chat-* (Onechat data)
+ * // - .workflows-* (Workflows data)
+ * // - .apm-agent-configuration* (APM agent config)
+ * // - .apm-custom-link* (APM custom links)
+ * }
+ */
public class KibanaPlugin extends Plugin implements SystemIndexPlugin {
private static final List{@code
+ * // Called by Elasticsearch during plugin initialization
+ * Collection
+ */
@Override
public Collection{@code
+ * // Called by Elasticsearch feature registry
+ * String featureName = plugin.getFeatureName();
+ * // Returns: "kibana"
+ * }
+ */
@Override
public String getFeatureName() {
return "kibana";
}
+ /**
+ * Returns a human-readable description of this plugin's feature.
+ * {@code
+ * // Called by Elasticsearch feature registry
+ * String description = plugin.getFeatureDescription();
+ * // Returns: "Manages Kibana configuration and reports"
+ * }
+ */
@Override
public String getFeatureDescription() {
return "Manages Kibana configuration and reports";
diff --git a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java
index eb70d3c688c5c..fabd5d3f27576 100644
--- a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java
+++ b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java
@@ -30,21 +30,65 @@
*/
public interface NamedGroupExtractor {
/**
- * Returns a {@link Map} containing all named capture groups if the
- * string matches or {@code null} if it doesn't.
+ * Extracts named groups from the input string using the configured pattern.
+ * {@code
+ * NamedGroupExtractor extractor = NamedGroupExtractor.dissect("%{name} is %{age}");
+ * Map
*/
Map{@code
+ * NamedGroupExtractor extractor = NamedGroupExtractor.dissect("%{name} is %{age} years old");
+ * Map
*/
static NamedGroupExtractor dissect(String pattern) {
return dissect(pattern, null);
}
/**
- * Create a {@link NamedGroupExtractor} that runs {@link DissectParser}.
+ * Creates a {@link NamedGroupExtractor} that runs {@link DissectParser} with a custom append separator.
+ * {@code
+ * NamedGroupExtractor extractor = NamedGroupExtractor.dissect("%{+name} %{+name}", " ");
+ * Map
*/
static NamedGroupExtractor dissect(String pattern, String appendSeparator) {
DissectParser dissect = new DissectParser(pattern, appendSeparator);
@@ -57,12 +101,37 @@ static NamedGroupExtractor dissect(String pattern, String appendSeparator) {
}
/**
- * Builds {@link NamedGroupExtractor}s from grok patterns.
+ * Helper class for building {@link NamedGroupExtractor}s from grok patterns.
+ * {@code
+ * GrokHelper helper = new GrokHelper(
+ * TimeValue.timeValueSeconds(1),
+ * TimeValue.timeValueSeconds(1)
+ * );
+ * helper.finishInitializing(threadPool);
+ * NamedGroupExtractor extractor = helper.grok("%{WORD:name}");
+ * }
+ */
public GrokHelper(TimeValue interval, TimeValue maxExecutionTime) {
this.watchdogSupplier = new LazyInitializable{@code
+ * GrokHelper helper = new GrokHelper(interval, maxTime);
+ * // ... pass helper to painless extension ...
+ * helper.finishInitializing(threadPool); // Complete initialization later
+ * }
*/
public void finishInitializing(ThreadPool threadPool) {
threadPoolContainer.set(threadPool);
}
+ /**
+ * Creates a {@link NamedGroupExtractor} from a grok pattern.
+ * {@code
+ * GrokHelper helper = plugin.grokHelper();
+ * NamedGroupExtractor extractor = helper.grok("%{WORD:name} %{INT:age}");
+ * Map
+ */
public NamedGroupExtractor grok(String pattern) {
MatcherWatchdog watchdog = watchdogSupplier.get();
/*
diff --git a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java
index 71188c2793d6c..af60e6e735c24 100644
--- a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java
+++ b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java
@@ -37,6 +37,26 @@ public final class RuntimeFieldsCommonPlugin extends Plugin {
private final NamedGroupExtractor.GrokHelper grokHelper;
+ /**
+ * Constructs a new RuntimeFieldsCommonPlugin with grok watchdog settings.
+ * {@code
+ * // Constructor is called automatically by Elasticsearch plugin system
+ * Settings settings = Settings.builder()
+ * .put("runtime_fields.grok.watchdog.interval", "1s")
+ * .put("runtime_fields.grok.watchdog.max_execution_time", "1s")
+ * .build();
+ * RuntimeFieldsCommonPlugin plugin = new RuntimeFieldsCommonPlugin(settings);
+ * }
+ */
public RuntimeFieldsCommonPlugin(Settings settings) {
grokHelper = new NamedGroupExtractor.GrokHelper(
GROK_WATCHDOG_INTERVAL.get(settings),
@@ -44,17 +64,66 @@ public RuntimeFieldsCommonPlugin(Settings settings) {
);
}
+ /**
+ * Returns the list of settings provided by this plugin.
+ * {@code
+ * List
+ */
@Override
public List{@code
+ * // Called automatically by Elasticsearch during plugin initialization
+ * Collection> components = plugin.createComponents(pluginServices);
+ * }
+ */
@Override
public Collection> createComponents(PluginServices services) {
grokHelper.finishInitializing(services.threadPool());
return List.of();
}
+ /**
+ * Returns the grok helper instance for creating grok-based named group extractors.
+ * {@code
+ * NamedGroupExtractor.GrokHelper helper = plugin.grokHelper();
+ * NamedGroupExtractor extractor = helper.grok("%{WORD:name} %{INT:age}");
+ * Map
+ */
public NamedGroupExtractor.GrokHelper grokHelper() {
return grokHelper;
}
diff --git a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsPainlessExtension.java b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsPainlessExtension.java
index 7317bab1193ee..be52d656f1f74 100644
--- a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsPainlessExtension.java
+++ b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsPainlessExtension.java
@@ -29,11 +29,37 @@
public class RuntimeFieldsPainlessExtension implements PainlessExtension {
private final List{@code
+ * // Constructor is called automatically during plugin initialization
+ * RuntimeFieldsCommonPlugin plugin = new RuntimeFieldsCommonPlugin(settings);
+ * RuntimeFieldsPainlessExtension extension = new RuntimeFieldsPainlessExtension(plugin);
+ * }
+ */
public RuntimeFieldsPainlessExtension(RuntimeFieldsCommonPlugin plugin) {
Whitelist commonWhitelist = WhitelistLoader.loadFromResourceFiles(RuntimeFieldsPainlessExtension.class, "common_whitelist.txt");
Whitelist grokWhitelist = new Whitelist(
@@ -55,6 +81,25 @@ public RuntimeFieldsPainlessExtension(RuntimeFieldsCommonPlugin plugin) {
this.whitelists = List.of(commonWhitelist, grokWhitelist);
}
+ /**
+ * Returns the Painless whitelists for runtime field script contexts.
+ * {@code
+ * // Called automatically by Painless during script compilation
+ * Map
+ */
@Override
public Map{@code
+ * // Plugin is automatically loaded by Elasticsearch in package distributions
+ * // Enable systemd notifications by setting environment variable:
+ * // ES_SD_NOTIFY=true
+ *
+ * // Plugin will automatically:
+ * // - Extend systemd timeout during startup every 15 seconds
+ * // - Notify systemd when Elasticsearch is ready
+ * // - Notify systemd when Elasticsearch is stopping
+ * }
+ */
public class SystemdPlugin extends Plugin implements ClusterPlugin {
private static final Logger logger = LogManager.getLogger(SystemdPlugin.class);
@@ -35,6 +56,21 @@ final boolean isEnabled() {
return enabled;
}
+ /**
+ * Constructs a new SystemdPlugin with default configuration.
+ * {@code
+ * // Constructor is called automatically by Elasticsearch
+ * // To enable systemd notifications, set:
+ * // export ES_SD_NOTIFY=true
+ * }
+ */
@SuppressWarnings("unused")
public SystemdPlugin() {
this(true, Build.current().type(), System.getenv("ES_SD_NOTIFY"));
@@ -71,6 +107,30 @@ Scheduler.Cancellable extender() {
return extender.get();
}
+ /**
+ * Creates and initializes plugin components for systemd integration.
+ * {@code
+ * // Called automatically by Elasticsearch during plugin initialization
+ * Collection> components = plugin.createComponents(pluginServices);
+ * // If enabled, schedules periodic timeout extension notifications to systemd
+ * }
+ */
@Override
public Collection> createComponents(PluginServices services) {
if (enabled == false) {
@@ -105,6 +165,21 @@ void notifyStopping() {
systemd.notify_stopping();
}
+ /**
+ * Called when the Elasticsearch node has completed startup and is ready to serve requests.
+ * {@code
+ * // Called automatically by Elasticsearch after node startup completes
+ * plugin.onNodeStarted();
+ * // Sends READY=1 to systemd and cancels timeout extension task
+ * }
+ */
@Override
public void onNodeStarted() {
if (enabled == false) {
@@ -117,6 +192,21 @@ public void onNodeStarted() {
assert cancelled;
}
+ /**
+ * Called when the plugin is being closed during Elasticsearch shutdown.
+ * {@code
+ * // Called automatically by Elasticsearch during shutdown
+ * plugin.close();
+ * // Sends STOPPING=1 to systemd
+ * }
+ */
@Override
public void close() {
if (enabled == false) {
diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/AnalysisICUPlugin.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/AnalysisICUPlugin.java
index 240aa27b51bc2..fc22edb893079 100644
--- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/AnalysisICUPlugin.java
+++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/AnalysisICUPlugin.java
@@ -29,12 +29,57 @@
import static java.util.Collections.singletonMap;
+/**
+ * Elasticsearch plugin that provides ICU-based analysis components.
+ * This plugin integrates International Components for Unicode (ICU) functionality for text analysis,
+ * including normalization, folding, tokenization, and collation support.
+ */
public class AnalysisICUPlugin extends Plugin implements AnalysisPlugin, MapperPlugin {
+
+ /**
+ * Provides ICU-based character filters for text normalization.
+ *
+ * @return a map containing the "icu_normalizer" character filter factory
+ *
+ * {@code
+ * // Configure ICU normalizer character filter in index settings
+ * "analysis": {
+ * "char_filter": {
+ * "my_icu_normalizer": {
+ * "type": "icu_normalizer",
+ * "name": "nfc"
+ * }
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * // Configure ICU token filters in index settings
+ * "analysis": {
+ * "filter": {
+ * "my_icu_folding": {
+ * "type": "icu_folding"
+ * },
+ * "my_icu_normalizer": {
+ * "type": "icu_normalizer"
+ * }
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * // Configure ICU analyzer in index settings
+ * "analysis": {
+ * "analyzer": {
+ * "my_icu_analyzer": {
+ * "type": "icu_analyzer"
+ * }
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * // Configure ICU tokenizer in index settings
+ * "analysis": {
+ * "tokenizer": {
+ * "my_icu_tokenizer": {
+ * "type": "icu_tokenizer"
+ * }
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * // Define ICU collation keyword field in mappings
+ * "mappings": {
+ * "properties": {
+ * "name": {
+ * "type": "icu_collation_keyword",
+ * "language": "en"
+ * }
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * Collator collator = Collator.getInstance(ULocale.US);
+ * collator.setStrength(Collator.PRIMARY);
+ * TokenStream tokenStream = new StandardTokenizer();
+ * TokenStream filtered = new ICUCollationKeyFilter(tokenStream, collator);
+ * }
*/
public ICUCollationKeyFilter(TokenStream input, Collator collator) {
super(input);
@@ -89,6 +100,14 @@ public ICUCollationKeyFilter(TokenStream input, Collator collator) {
}
}
+ /**
+ * Advances the token stream and converts each token to its collation key representation.
+ * The resulting collation keys are encoded using {@link IndexableBinaryStringTools} to
+ * allow storage as index terms.
+ *
+ * @return true if a token is available, false if the stream has ended
+ * @throws IOException if an I/O error occurs while reading from the input stream
+ */
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuAnalyzerProvider.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuAnalyzerProvider.java
index 9fb611345dbea..a49dbc74a2303 100644
--- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuAnalyzerProvider.java
+++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuAnalyzerProvider.java
@@ -23,10 +23,39 @@
import java.io.Reader;
+/**
+ * Provides an ICU-based analyzer that combines tokenization, normalization, and folding.
+ * This analyzer uses the ICU tokenizer for language-aware segmentation and applies
+ * normalization and folding for case-insensitive matching.
+ */
public class IcuAnalyzerProvider extends AbstractIndexAnalyzerProvider
+ *
+ * @throws IllegalArgumentException if mode is not "compose" or "decompose"
+ *
+ * {@code
+ * "analyzer": {
+ * "my_icu_analyzer": {
+ * "type": "icu_analyzer",
+ * "method": "nfkc_cf",
+ * "mode": "compose"
+ * }
+ * }
+ * }
+ */
public IcuAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(name);
String method = settings.get("method", "nfkc_cf");
@@ -44,6 +73,13 @@ public IcuAnalyzerProvider(IndexSettings indexSettings, Environment environment,
this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(normalizerInstance, settings);
}
+ /**
+ * Creates and returns the configured ICU analyzer instance.
+ * The analyzer performs ICU normalization on the input, then tokenizes using the ICU tokenizer,
+ * and finally applies ICU folding for case-insensitive matching.
+ *
+ * @return a new {@link Analyzer} instance configured with ICU components
+ */
@Override
public Analyzer get() {
return new Analyzer() {
diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuCollationTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuCollationTokenFilterFactory.java
index 5a46217c6d467..f4aa5d610f85c 100644
--- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuCollationTokenFilterFactory.java
+++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuCollationTokenFilterFactory.java
@@ -42,6 +42,49 @@ public class IcuCollationTokenFilterFactory extends AbstractTokenFilterFactory {
private final Collator collator;
+ /**
+ * Constructs an ICU collation token filter factory that creates filters for language-specific sorting.
+ * This factory supports two configuration methods:
+ *
+ *
+ *
+ * @param indexSettings the index settings
+ * @param environment the environment for resolving configuration files
+ * @param name the filter name
+ * @param settings the filter settings containing collation configuration such as:
+ *
+ *
+ * @throws IllegalArgumentException if rules cannot be resolved or parsed, or if invalid configuration values are provided
+ *
+ * {@code
+ * // Locale-based collation
+ * "filter": {
+ * "french_collation": {
+ * "type": "icu_collation",
+ * "language": "fr",
+ * "country": "FR",
+ * "strength": "primary"
+ * }
+ * }
+ *
+ * // Rule-based collation
+ * "filter": {
+ * "custom_collation": {
+ * "type": "icu_collation",
+ * "rules": "& a < b < c"
+ * }
+ * }
+ * }
+ */
@SuppressWarnings("HiddenField")
public IcuCollationTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(name);
@@ -162,6 +205,13 @@ public IcuCollationTokenFilterFactory(IndexSettings indexSettings, Environment e
this.collator = collator;
}
+ /**
+ * Creates an ICU collation key filter that transforms tokens into collation keys
+ * using the configured collator.
+ *
+ * @param tokenStream the input token stream to be filtered
+ * @return a new {@link ICUCollationKeyFilter} that converts tokens to collation keys
+ */
@Override
public TokenStream create(TokenStream tokenStream) {
return new ICUCollationKeyFilter(tokenStream, collator);
diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuFoldingTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuFoldingTokenFilterFactory.java
index 8932518dc5436..360424c7548f0 100644
--- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuFoldingTokenFilterFactory.java
+++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuFoldingTokenFilterFactory.java
@@ -38,11 +38,48 @@ public class IcuFoldingTokenFilterFactory extends AbstractTokenFilterFactory imp
private final Normalizer2 normalizer;
+ /**
+ * Constructs an ICU folding token filter factory that applies case and diacritic folding.
+ * Folding converts text to a normalized form for case-insensitive and accent-insensitive matching,
+ * following the rules defined in Unicode Technical Report #30.
+ *
+ * @param indexSettings the index settings
+ * @param environment the environment
+ * @param name the filter name
+ * @param settings the filter settings containing:
+ *
+ *
+ *
+ * {@code
+ * "filter": {
+ * "my_icu_folding": {
+ * "type": "icu_folding"
+ * }
+ * }
+ *
+ * // With character filtering
+ * "filter": {
+ * "swedish_folding": {
+ * "type": "icu_folding",
+ * "unicodeSetFilter": "[^åäöÅÄÖ]"
+ * }
+ * }
+ * }
+ */
public IcuFoldingTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(name);
this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(ICU_FOLDING_NORMALIZER, settings);
}
+ /**
+ * Creates an ICU folding token filter that applies Unicode folding to tokens.
+ *
+ * @param tokenStream the input token stream to be filtered
+ * @return a new {@link org.apache.lucene.analysis.icu.ICUNormalizer2Filter} that applies folding
+ */
@Override
public TokenStream create(TokenStream tokenStream) {
return new org.apache.lucene.analysis.icu.ICUNormalizer2Filter(tokenStream, normalizer);
diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuNormalizerCharFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuNormalizerCharFilterFactory.java
index 102f27cd855b2..b0ad88aa91a0e 100644
--- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuNormalizerCharFilterFactory.java
+++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuNormalizerCharFilterFactory.java
@@ -30,6 +30,32 @@ public class IcuNormalizerCharFilterFactory extends AbstractCharFilterFactory im
private final Normalizer2 normalizer;
+ /**
+ * Constructs an ICU normalizer character filter factory for Unicode normalization.
+ * Normalization ensures text is in a consistent form for comparison and indexing.
+ *
+ * @param indexSettings the index settings
+ * @param environment the environment
+ * @param name the filter name
+ * @param settings the filter settings containing:
+ *
+ *
+ * @throws IllegalArgumentException if an invalid normalization method or mode is specified
+ *
+ * {@code
+ * "char_filter": {
+ * "my_icu_normalizer": {
+ * "type": "icu_normalizer",
+ * "name": "nfc",
+ * "mode": "compose"
+ * }
+ * }
+ * }
+ */
public IcuNormalizerCharFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(name);
String method = settings.get("name", "nfkc_cf");
@@ -45,6 +71,12 @@ public IcuNormalizerCharFilterFactory(IndexSettings indexSettings, Environment e
this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(normalizerInstance, settings);
}
+ /**
+ * Creates an ICU normalizer character filter that normalizes the input text stream.
+ *
+ * @param reader the input character stream to be normalized
+ * @return a new {@link ICUNormalizer2CharFilter} that applies Unicode normalization
+ */
@Override
public Reader create(Reader reader) {
return new ICUNormalizer2CharFilter(reader, normalizer);
diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuNormalizerTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuNormalizerTokenFilterFactory.java
index c9eceef30f62e..60fa58cd4055c 100644
--- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuNormalizerTokenFilterFactory.java
+++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuNormalizerTokenFilterFactory.java
@@ -29,6 +29,28 @@ public class IcuNormalizerTokenFilterFactory extends AbstractTokenFilterFactory
private final Normalizer2 normalizer;
+ /**
+ * Constructs an ICU normalizer token filter factory for Unicode normalization of tokens.
+ *
+ * @param indexSettings the index settings
+ * @param environment the environment
+ * @param name the filter name
+ * @param settings the filter settings containing:
+ *
+ *
+ *
+ * {@code
+ * "filter": {
+ * "my_icu_normalizer": {
+ * "type": "icu_normalizer",
+ * "name": "nfc"
+ * }
+ * }
+ * }
+ */
public IcuNormalizerTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(name);
String method = settings.get("name", "nfkc_cf");
@@ -36,11 +58,25 @@ public IcuNormalizerTokenFilterFactory(IndexSettings indexSettings, Environment
this.normalizer = wrapWithUnicodeSetFilter(normalizerInstance, settings);
}
+ /**
+ * Creates an ICU normalizer filter that applies Unicode normalization to tokens.
+ *
+ * @param tokenStream the input token stream to be normalized
+ * @return a new {@link org.apache.lucene.analysis.icu.ICUNormalizer2Filter} that applies normalization
+ */
@Override
public TokenStream create(TokenStream tokenStream) {
return new org.apache.lucene.analysis.icu.ICUNormalizer2Filter(tokenStream, normalizer);
}
+ /**
+ * Wraps a normalizer with a Unicode set filter if specified in settings.
+ * This allows selective normalization of only certain characters.
+ *
+ * @param normalizer the base normalizer to wrap
+ * @param settings the settings containing an optional unicode_set_filter parameter
+ * @return the original normalizer if no filter is specified, or a {@link FilteredNormalizer2} if a filter is provided
+ */
static Normalizer2 wrapWithUnicodeSetFilter(final Normalizer2 normalizer, final Settings settings) {
String unicodeSetFilter = settings.get("unicode_set_filter");
if (unicodeSetFilter != null) {
diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTokenizerFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTokenizerFactory.java
index 4a0ead6a893e8..e21876c073e47 100644
--- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTokenizerFactory.java
+++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTokenizerFactory.java
@@ -33,16 +33,55 @@
import java.util.Map;
import java.util.stream.Collectors;
+/**
+ * Factory for creating ICU tokenizers that perform language-aware text segmentation.
+ * Supports custom rule-based break iteration for specific scripts.
+ */
public class IcuTokenizerFactory extends AbstractTokenizerFactory {
private final ICUTokenizerConfig config;
private static final String RULE_FILES = "rule_files";
+ /**
+ * Constructs an ICU tokenizer factory with optional custom segmentation rules.
+ *
+ * @param indexSettings the index settings
+ * @param environment the environment for resolving rule files
+ * @param name the tokenizer name
+ * @param settings the tokenizer settings containing:
+ *
+ *
+ * @throws IllegalArgumentException if rule file format is invalid
+ * @throws ElasticsearchException if rule files cannot be loaded or parsed
+ *
+ * {@code
+ * "tokenizer": {
+ * "my_icu_tokenizer": {
+ * "type": "icu_tokenizer"
+ * }
+ * }
+ *
+ * // With custom rules
+ * "tokenizer": {
+ * "custom_icu": {
+ * "type": "icu_tokenizer",
+ * "rule_files": ["Latin:latin-rules.rbbi", "Hira:hiragana-rules.rbbi"]
+ * }
+ * }
+ * }
+ */
public IcuTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(name);
config = getIcuConfig(environment, settings);
}
+ /**
+ * Creates a new ICU tokenizer instance with the configured segmentation rules.
+ *
+ * @return a new {@link ICUTokenizer} with default or custom configuration
+ */
@Override
public Tokenizer create() {
if (config == null) {
@@ -52,6 +91,15 @@ public Tokenizer create() {
}
}
+ /**
+ * Builds an ICU tokenizer configuration from settings by loading and parsing rule files.
+ *
+ * @param env the environment for resolving file paths
+ * @param settings the tokenizer settings
+ * @return an {@link ICUTokenizerConfig} with custom rules, or null if no custom rules are defined
+ * @throws IllegalArgumentException if rule file format is invalid
+ * @throws ElasticsearchException if rule files cannot be loaded
+ */
private static ICUTokenizerConfig getIcuConfig(Environment env, Settings settings) {
Map
+ *
+ * @throws IllegalArgumentException if the transliterator ID is invalid
+ *
+ * {@code
+ * "filter": {
+ * "my_icu_transform": {
+ * "type": "icu_transform",
+ * "id": "Latin-ASCII"
+ * }
+ * }
+ *
+ * // Katakana to Hiragana conversion
+ * "filter": {
+ * "katakana_to_hiragana": {
+ * "type": "icu_transform",
+ * "id": "Katakana-Hiragana"
+ * }
+ * }
+ * }
+ */
public IcuTransformTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(name);
this.id = settings.get("id", "Null");
@@ -33,6 +69,12 @@ public IcuTransformTokenFilterFactory(IndexSettings indexSettings, Environment e
this.transliterator = Transliterator.getInstance(id, dir);
}
+ /**
+ * Creates an ICU transform filter that applies the configured transliteration to tokens.
+ *
+ * @param tokenStream the input token stream to be transformed
+ * @return a new {@link ICUTransformFilter} that applies the transliteration
+ */
@Override
public TokenStream create(TokenStream tokenStream) {
return new ICUTransformFilter(tokenStream, transliterator);
diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/AnalysisKuromojiPlugin.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/AnalysisKuromojiPlugin.java
index 4942e895e0785..0c4d5e11625a2 100644
--- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/AnalysisKuromojiPlugin.java
+++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/AnalysisKuromojiPlugin.java
@@ -23,12 +23,52 @@
import static java.util.Collections.singletonMap;
+/**
+ * Elasticsearch plugin that provides Kuromoji-based analysis components for Japanese text.
+ * Kuromoji is a Japanese morphological analyzer that performs tokenization and various
+ * linguistic transformations specific to the Japanese language.
+ */
public class AnalysisKuromojiPlugin extends Plugin implements AnalysisPlugin {
+
+ /**
+ * Provides Kuromoji character filters for Japanese text preprocessing.
+ *
+ * @return a map containing the "kuromoji_iteration_mark" character filter factory
+ *
+ * {@code
+ * "char_filter": {
+ * "my_iteration_mark_filter": {
+ * "type": "kuromoji_iteration_mark"
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * "filter": {
+ * "my_baseform": {
+ * "type": "kuromoji_baseform"
+ * },
+ * "my_pos_filter": {
+ * "type": "kuromoji_part_of_speech",
+ * "stoptags": ["助詞-格助詞-一般"]
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * "tokenizer": {
+ * "my_kuromoji_tokenizer": {
+ * "type": "kuromoji_tokenizer",
+ * "mode": "search"
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * "analyzer": {
+ * "my_kuromoji": {
+ * "type": "kuromoji",
+ * "mode": "search"
+ * }
+ * }
+ * }
+ */
@Override
public Map
+ *
+ * @throws IllegalArgumentException if configuration is invalid
+ * @throws ElasticsearchException if user dictionary cannot be loaded
+ *
+ * {@code
+ * "analyzer": {
+ * "my_japanese": {
+ * "type": "kuromoji",
+ * "mode": "search",
+ * "stopwords": ["_japanese_", "カスタム"]
+ * }
+ * }
+ * }
+ */
public KuromojiAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(name);
final Set> stopWords = Analysis.parseStopWords(env, settings, JapaneseAnalyzer.getDefaultStopSet());
@@ -33,6 +65,11 @@ public KuromojiAnalyzerProvider(IndexSettings indexSettings, Environment env, St
analyzer = new JapaneseAnalyzer(userDictionary, mode, CharArraySet.copy(stopWords), JapaneseAnalyzer.getDefaultStopTags());
}
+ /**
+ * Returns the configured Japanese analyzer instance.
+ *
+ * @return the {@link JapaneseAnalyzer} instance
+ */
@Override
public JapaneseAnalyzer get() {
return this.analyzer;
diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java
index aa978e3e73872..bb4ec3dde17e3 100644
--- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java
+++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java
@@ -26,6 +26,10 @@
import java.util.List;
import java.util.Locale;
+/**
+ * Factory for creating Kuromoji tokenizers that perform Japanese morphological analysis.
+ * Supports multiple segmentation modes, custom user dictionaries, and n-best tokenization.
+ */
public class KuromojiTokenizerFactory extends AbstractTokenizerFactory {
private static final String USER_DICT_PATH_OPTION = "user_dictionary";
@@ -43,6 +47,37 @@ public class KuromojiTokenizerFactory extends AbstractTokenizerFactory {
private boolean discardPunctuation;
private boolean discardCompoundToken;
+ /**
+ * Constructs a Kuromoji tokenizer factory with configurable tokenization behavior.
+ *
+ * @param indexSettings the index settings
+ * @param env the environment for resolving user dictionary files
+ * @param name the tokenizer name
+ * @param settings the tokenizer settings containing:
+ *
+ *
+ * @throws IllegalArgumentException if both user_dictionary and user_dictionary_rules are specified
+ * @throws ElasticsearchException if the user dictionary cannot be loaded
+ *
+ * {@code
+ * "tokenizer": {
+ * "my_kuromoji": {
+ * "type": "kuromoji_tokenizer",
+ * "mode": "search",
+ * "discard_punctuation": true,
+ * "user_dictionary_rules": ["東京スカイツリー,東京 スカイツリー,トウキョウ スカイツリー,カスタム名詞"]
+ * }
+ * }
+ * }
+ */
public KuromojiTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(name);
mode = getMode(settings);
@@ -53,6 +88,16 @@ public KuromojiTokenizerFactory(IndexSettings indexSettings, Environment env, St
discardCompoundToken = settings.getAsBoolean(DISCARD_COMPOUND_TOKEN, false);
}
+ /**
+ * Loads a user dictionary from settings, either from a file path or inline rules.
+ * User dictionaries allow customization of tokenization by defining custom entries.
+ *
+ * @param env the environment for resolving dictionary file paths
+ * @param settings the settings containing user dictionary configuration
+ * @return a {@link UserDictionary} if dictionary is configured, null otherwise
+ * @throws IllegalArgumentException if both file path and inline rules are specified
+ * @throws ElasticsearchException if the dictionary file cannot be loaded or parsed
+ */
public static UserDictionary getUserDictionary(Environment env, Settings settings) {
if (settings.get(USER_DICT_PATH_OPTION) != null && settings.get(USER_DICT_RULES_OPTION) != null) {
throw new IllegalArgumentException(
@@ -83,11 +128,30 @@ public static UserDictionary getUserDictionary(Environment env, Settings setting
}
}
+ /**
+ * Extracts the tokenization mode from settings.
+ * Modes control how text is segmented:
+ *
+ *
+ *
+ * @param settings the settings containing the mode parameter
+ * @return the {@link JapaneseTokenizer.Mode} specified in settings, or the default mode
+ */
public static JapaneseTokenizer.Mode getMode(Settings settings) {
String modeSetting = settings.get("mode", JapaneseTokenizer.DEFAULT_MODE.name());
return JapaneseTokenizer.Mode.valueOf(modeSetting.toUpperCase(Locale.ENGLISH));
}
+ /**
+ * Creates a new Kuromoji tokenizer with the configured settings.
+ * The tokenizer applies user dictionary if configured and sets n-best cost based on
+ * examples or explicit configuration.
+ *
+ * @return a new {@link JapaneseTokenizer} instance with the configured parameters
+ */
@Override
public Tokenizer create() {
JapaneseTokenizer t = new JapaneseTokenizer(userDictionary, discardPunctuation, discardCompoundToken, mode);
diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/AnalysisNoriPlugin.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/AnalysisNoriPlugin.java
index 3e418852f9ecd..f640fc7a1a578 100644
--- a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/AnalysisNoriPlugin.java
+++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/AnalysisNoriPlugin.java
@@ -22,7 +22,29 @@
import static java.util.Collections.singletonMap;
+/**
+ * Elasticsearch plugin that provides Nori-based analysis components for Korean text.
+ * Nori is a Korean morphological analyzer that performs tokenization and linguistic
+ * transformations specific to the Korean language.
+ */
public class AnalysisNoriPlugin extends Plugin implements AnalysisPlugin {
+
+ /**
+ * Provides Nori token filters for Korean text analysis.
+ * Includes filters for part-of-speech filtering, reading form extraction, and number handling.
+ *
+ * @return a map of token filter names to their corresponding factory providers
+ *
+ * {@code
+ * "filter": {
+ * "my_pos_filter": {
+ * "type": "nori_part_of_speech",
+ * "stoptags": ["E", "IC", "J"]
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * "tokenizer": {
+ * "my_nori_tokenizer": {
+ * "type": "nori_tokenizer",
+ * "decompound_mode": "mixed"
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * "analyzer": {
+ * "my_nori": {
+ * "type": "nori"
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * "filter": {
+ * "my_phonetic": {
+ * "type": "phonetic",
+ * "encoder": "metaphone",
+ * "replace": false
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * "filter": {
+ * "my_smartcn_stop": {
+ * "type": "smartcn_stop"
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * "tokenizer": {
+ * "my_smartcn": {
+ * "type": "smartcn_tokenizer"
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * "analyzer": {
+ * "my_smartcn": {
+ * "type": "smartcn"
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * "filter": {
+ * "my_polish_stem": {
+ * "type": "polish_stem"
+ * },
+ * "my_polish_stop": {
+ * "type": "polish_stop"
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * "analyzer": {
+ * "my_polish": {
+ * "type": "polish"
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * "analyzer": {
+ * "my_ukrainian": {
+ * "type": "ukrainian"
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * discovery.seed_providers: ec2
+ * discovery.ec2.tag.elasticsearch: production
+ * discovery.ec2.availability_zones: us-east-1a,us-east-1b
+ * }
+ */
@Override
public Map{@code
+ * PUT my-index
+ * {
+ * "mappings": {
+ * "_size": {
+ * "enabled": true
+ * }
+ * }
+ * }
+ *
+ * // Query documents by size
+ * GET my-index/_search
+ * {
+ * "query": {
+ * "range": {
+ * "_size": {
+ * "gte": 1000
+ * }
+ * }
+ * }
+ * }
+ * }
+ */
@Override
public Map
+ *
+ *
+ * @return a map of store type names to their corresponding directory factories
+ *
+ * {@code
+ * PUT /my-index
+ * {
+ * "settings": {
+ * "index.store.type": "smb_mmap_fs"
+ * }
+ * }
+ * }
+ */
@Override
public Map{@code
+ * Build build = Build.current();
+ * logger.info("Running Elasticsearch version: {}", build.version());
+ * logger.info("Build type: {}", build.type().displayName());
+ * }
+ *
+ * @return the current build information
+ */
public static Build current() {
return CurrentHolder.CURRENT;
}
@@ -203,6 +228,16 @@ static URL getElasticsearchCodeSourceLocation() {
return codeSource == null ? null : codeSource.getLocation();
}
+ /**
+ * Reads build information from a stream input.
+ * {@code
+ * long startTime = System.currentTimeMillis();
+ * long timeout = 5000; // 5 seconds
+ * while (!operationComplete()) {
+ * if (System.currentTimeMillis() - startTime > timeout) {
+ * throw new ElasticsearchTimeoutException("Operation timed out after {} ms", timeout);
+ * }
+ * // continue operation
+ * }
+ * }
*/
public class ElasticsearchTimeoutException extends ElasticsearchException {
+ /**
+ * Constructs a timeout exception from a stream input.
+ *
+ * @param in the stream input to read from
+ * @throws IOException if an I/O error occurs while reading from the stream
+ */
public ElasticsearchTimeoutException(StreamInput in) throws IOException {
super(in);
}
+ /**
+ * Constructs a timeout exception with a cause.
+ *
+ * @param cause the underlying cause of this exception
+ */
public ElasticsearchTimeoutException(Throwable cause) {
super(cause);
}
+ /**
+ * Constructs a timeout exception with a formatted message.
+ *
+ * @param message the detail message, can include {} placeholders
+ * @param args the arguments to format into the message
+ */
public ElasticsearchTimeoutException(String message, Object... args) {
super(message, args);
}
+ /**
+ * Constructs a timeout exception with a formatted message and cause.
+ *
+ * @param message the detail message, can include {} placeholders
+ * @param cause the underlying cause of this exception
+ * @param args the arguments to format into the message
+ */
public ElasticsearchTimeoutException(String message, Throwable cause, Object... args) {
super(message, cause, args);
}
+ /**
+ * Returns the REST status code for this exception.
+ *
+ * @return {@link RestStatus#TOO_MANY_REQUESTS} indicating the operation timed out
+ */
@Override
public RestStatus status() {
// closest thing to "your request took longer than you asked for"
return RestStatus.TOO_MANY_REQUESTS;
}
+ /**
+ * Indicates whether this exception represents a timeout.
+ *
+ * @return always true for timeout exceptions
+ */
@Override
public boolean isTimeout() {
return true;
diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java
index e2e61d78024f2..abf7805ad85a1 100644
--- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java
+++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java
@@ -46,6 +46,24 @@ public final class ExceptionsHelper {
private static final Logger logger = LogManager.getLogger(ExceptionsHelper.class);
+ /**
+ * Converts a checked exception to a runtime exception.
+ * {@code
+ * try {
+ * // some operation that throws IOException
+ * } catch (IOException e) {
+ * throw ExceptionsHelper.convertToRuntime(e);
+ * }
+ * }
+ *
+ * @param e the exception to convert
+ * @return a runtime exception, either the original if already runtime, or wrapped in ElasticsearchException
+ */
public static RuntimeException convertToRuntime(Exception e) {
if (e instanceof RuntimeException) {
return (RuntimeException) e;
@@ -53,6 +71,24 @@ public static RuntimeException convertToRuntime(Exception e) {
return new ElasticsearchException(e);
}
+ /**
+ * Converts a checked exception to an {@link ElasticsearchException}.
+ * {@code
+ * try {
+ * // some operation that throws Exception
+ * } catch (Exception e) {
+ * throw ExceptionsHelper.convertToElastic(e);
+ * }
+ * }
+ *
+ * @param e the exception to convert
+ * @return an ElasticsearchException, either the original if already ElasticsearchException, or wrapped
+ */
public static ElasticsearchException convertToElastic(Exception e) {
if (e instanceof ElasticsearchException) {
return (ElasticsearchException) e;
@@ -60,6 +96,28 @@ public static ElasticsearchException convertToElastic(Exception e) {
return new ElasticsearchException(e);
}
+ /**
+ * Determines the appropriate REST status for a throwable.
+ * {@code
+ * try {
+ * // some operation
+ * } catch (Exception e) {
+ * RestStatus status = ExceptionsHelper.status(e);
+ * // use status for response
+ * }
+ * }
+ *
+ * @param t the throwable to examine, may be null
+ * @return the appropriate REST status code
+ */
public static RestStatus status(Throwable t) {
if (t != null) {
if (t instanceof ElasticsearchException) {
@@ -75,6 +133,21 @@ public static RestStatus status(Throwable t) {
return RestStatus.INTERNAL_SERVER_ERROR;
}
+ /**
+ * Unwraps the cause of a throwable until a non-{@link ElasticsearchWrapperException} is found.
+ * {@code
+ * Throwable actual = ExceptionsHelper.unwrapCause(wrappedException);
+ * logger.error("Actual exception: {}", actual.getMessage());
+ * }
+ *
+ * @param t the throwable to unwrap
+ * @return the first non-wrapper exception found, or the original throwable if not a wrapper
+ */
public static Throwable unwrapCause(Throwable t) {
int counter = 0;
Throwable result = t;
@@ -95,6 +168,25 @@ public static Throwable unwrapCause(Throwable t) {
return result;
}
+ /**
+ * Converts a throwable's stack trace to a string representation.
+ * {@code
+ * try {
+ * // some operation
+ * } catch (Exception e) {
+ * String trace = ExceptionsHelper.stackTrace(e);
+ * logger.error("Full stack trace: {}", trace);
+ * }
+ * }
+ *
+ * @param e the throwable whose stack trace to capture
+ * @return the complete stack trace as a string
+ */
public static String stackTrace(Throwable e) {
StringWriter stackTraceStringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stackTraceStringWriter);
@@ -289,14 +381,48 @@ static void compressPackages(StringBuilder s, String className) {
s.append(className.substring(finalDot + 1));
}
+ /**
+ * Formats a stack trace array into a human-readable string.
+ * {@code
+ * StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
+ * String formatted = ExceptionsHelper.formatStackTrace(stackTrace);
+ * logger.debug("Stack trace: {}", formatted);
+ * }
+ *
+ * @param stackTrace the stack trace elements to format
+ * @return a formatted string representation of the stack trace
+ */
public static String formatStackTrace(final StackTraceElement[] stackTrace) {
return Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n"));
}
/**
* Rethrows the first exception in the list and adds all remaining to the suppressed list.
- * If the given list is empty no exception is thrown
+ * {@code
+ * List
*
+ * @param {@code
+ * List
+ *
+ * @param {@code
+ * IOException main = null;
+ * for (Closeable resource : resources) {
+ * try {
+ * resource.close();
+ * } catch (IOException e) {
+ * main = ExceptionsHelper.useOrSuppress(main, e);
+ * }
+ * }
+ * if (main != null) throw main;
+ * }
+ *
+ * @param true is returned.
+ * Throws the specified exception if not null, otherwise returns true.
+ * {@code
+ * Throwable error = null;
+ * // ... some operations that might set error
+ * if (ExceptionsHelper.reThrowIfNotNull(error)) {
+ * // continue processing, no error occurred
+ * }
+ * }
+ *
+ * @param e the exception to throw, may be null
+ * @return true if the exception is null
+ * @throws RuntimeException if e is not null
*/
public static boolean reThrowIfNotNull(@Nullable Throwable e) {
if (e != null) {
diff --git a/server/src/main/java/org/elasticsearch/ResourceNotFoundException.java b/server/src/main/java/org/elasticsearch/ResourceNotFoundException.java
index c461d84aa6740..5448626ddebd3 100644
--- a/server/src/main/java/org/elasticsearch/ResourceNotFoundException.java
+++ b/server/src/main/java/org/elasticsearch/ResourceNotFoundException.java
@@ -14,22 +14,61 @@
import java.io.IOException;
/**
- * Generic ResourceNotFoundException corresponding to the {@link RestStatus#NOT_FOUND} status code
+ * Indicates a requested resource was not found in Elasticsearch.
+ * {@code
+ * public Document getDocument(String id) {
+ * Document doc = repository.findById(id);
+ * if (doc == null) {
+ * throw new ResourceNotFoundException("Document with id [{}] not found", id);
+ * }
+ * return doc;
+ * }
+ * }
*/
public class ResourceNotFoundException extends ElasticsearchException {
+ /**
+ * Constructs a resource not found exception with a formatted message.
+ *
+ * @param msg the detail message, can include {} placeholders
+ * @param args the arguments to format into the message
+ */
public ResourceNotFoundException(String msg, Object... args) {
super(msg, args);
}
+ /**
+ * Constructs a resource not found exception with a formatted message and cause.
+ *
+ * @param msg the detail message, can include {} placeholders
+ * @param cause the underlying cause of this exception
+ * @param args the arguments to format into the message
+ */
public ResourceNotFoundException(String msg, Throwable cause, Object... args) {
super(msg, cause, args);
}
+ /**
+ * Constructs a resource not found exception from a stream input.
+ *
+ * @param in the stream input to read from
+ * @throws IOException if an I/O error occurs while reading from the stream
+ */
public ResourceNotFoundException(StreamInput in) throws IOException {
super(in);
}
+ /**
+ * Returns the REST status code for this exception.
+ *
+ * @return {@link RestStatus#NOT_FOUND} indicating the resource was not found
+ */
@Override
public final RestStatus status() {
return RestStatus.NOT_FOUND;
diff --git a/server/src/main/java/org/elasticsearch/action/ActionFuture.java b/server/src/main/java/org/elasticsearch/action/ActionFuture.java
index 806c97351f6b9..0fa5be54b6ed1 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionFuture.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionFuture.java
@@ -15,30 +15,77 @@
import java.util.concurrent.TimeUnit;
/**
- * An extension to {@link Future} allowing for simplified "get" operations.
+ * An extension to {@link Future} that provides simplified "get" operations for action execution results.
+ * This interface offers alternatives to the standard {@link Future#get()} methods by handling interruption
+ * and execution exceptions differently, making them more suitable for Elasticsearch's action framework.
*
+ * {@code
+ * // Simple non-blocking get
+ * ActionFuture
+ *
+ * @param
+ *
+ *
+ * @return the computed result
+ * @throws IllegalStateException if the thread is interrupted while waiting
+ * @throws RuntimeException if the computation threw an exception (the actual cause is thrown)
*/
T actionGet();
/**
- * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
- * an {@link IllegalStateException} instead. Also catches
- * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
+ * Gets the result of the action, waiting up to the specified time if necessary for the computation to complete.
+ * This method is similar to {@link Future#get(long, TimeUnit)} but with simplified exception handling:
+ *
+ *
+ *
+ * @param timeout the maximum time to wait
+ * @param unit the time unit of the timeout argument
+ * @return the computed result
+ * @throws IllegalStateException if the thread is interrupted while waiting
+ * @throws java.util.concurrent.TimeoutException if the wait timed out
+ * @throws RuntimeException if the computation threw an exception (the actual cause is thrown)
*/
T actionGet(long timeout, TimeUnit unit);
/**
- * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing
- * an {@link IllegalStateException} instead. Also catches
- * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead.
+ * Gets the result of the action, waiting up to the specified time if necessary for the computation to complete.
+ * This method is similar to {@link Future#get(long, TimeUnit)} but with simplified exception handling
+ * and accepts a {@link TimeValue} for timeout specification:
+ *
+ *
+ *
+ * @param timeout the maximum time to wait as a {@link TimeValue}
+ * @return the computed result
+ * @throws IllegalStateException if the thread is interrupted while waiting
+ * @throws java.util.concurrent.TimeoutException if the wait timed out
+ * @throws RuntimeException if the computation threw an exception (the actual cause is thrown)
*/
T actionGet(TimeValue timeout);
}
diff --git a/server/src/main/java/org/elasticsearch/action/ActionRequest.java b/server/src/main/java/org/elasticsearch/action/ActionRequest.java
index 1fb09062c39d2..5601a85656641 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionRequest.java
@@ -15,25 +15,92 @@
import java.io.IOException;
+/**
+ * Base class for all action requests in Elasticsearch. An action request represents a request
+ * to perform a specific operation, such as indexing a document, searching, or managing cluster state.
+ *
+ * {@code
+ * // Example of a concrete action request
+ * public class MyCustomRequest extends ActionRequest {
+ * private String param;
+ *
+ * public MyCustomRequest(String param) {
+ * this.param = param;
+ * }
+ *
+ * @Override
+ * public ActionRequestValidationException validate() {
+ * ActionRequestValidationException validationException = null;
+ * if (param == null || param.isEmpty()) {
+ * validationException = ValidateActions.addValidationError(
+ * "param must not be null or empty",
+ * validationException
+ * );
+ * }
+ * return validationException;
+ * }
+ * }
+ * }
+ */
public abstract class ActionRequest extends AbstractTransportRequest {
+ /**
+ * Constructs a new action request with default settings.
+ */
public ActionRequest() {
super();
}
+ /**
+ * Constructs a new action request by reading its state from the provided stream input.
+ * This constructor is used for deserialization when receiving requests over the network.
+ *
+ * @param in the stream input to read the request state from
+ * @throws IOException if an I/O error occurs while reading from the stream
+ */
public ActionRequest(StreamInput in) throws IOException {
super(in);
}
+ /**
+ * Validates this action request and returns any validation errors. This method is called
+ * before the request is executed to ensure that all required parameters are present and valid.
+ *
+ * {@code
+ * // In an ActionRequest validate() method
+ * @Override
+ * public ActionRequestValidationException validate() {
+ * ActionRequestValidationException validationException = null;
+ *
+ * if (index == null) {
+ * validationException = ValidateActions.addValidationError(
+ * "index is missing",
+ * validationException
+ * );
+ * }
+ *
+ * if (size < 0) {
+ * validationException = ValidateActions.addValidationError(
+ * "size must be positive",
+ * validationException
+ * );
+ * }
+ *
+ * return validationException;
+ * }
+ * }
+ *
+ * @see ActionRequest#validate()
+ * @see org.elasticsearch.action.ValidateActions#addValidationError(String, ActionRequestValidationException)
+ */
public class ActionRequestValidationException extends ValidationException {}
diff --git a/server/src/main/java/org/elasticsearch/action/ActionResponse.java b/server/src/main/java/org/elasticsearch/action/ActionResponse.java
index 000756bc7465a..3e8c4dc275e21 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionResponse.java
@@ -16,21 +16,79 @@
import org.elasticsearch.xcontent.XContent;
/**
- * Base class for responses to action requests.
+ * Base class for all action responses in Elasticsearch. An action response represents the result
+ * of executing an action request, containing the data or status information returned by the operation.
+ *
+ * {@code
+ * // Example of a concrete action response
+ * public class MyCustomResponse extends ActionResponse implements ToXContent {
+ * private final String result;
+ *
+ * public MyCustomResponse(String result) {
+ * this.result = result;
+ * }
+ *
+ * public MyCustomResponse(StreamInput in) throws IOException {
+ * super();
+ * this.result = in.readString();
+ * }
+ *
+ * public String getResult() {
+ * return result;
+ * }
+ *
+ * @Override
+ * public void writeTo(StreamOutput out) throws IOException {
+ * out.writeString(result);
+ * }
+ *
+ * @Override
+ * public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ * builder.startObject();
+ * builder.field("result", result);
+ * builder.endObject();
+ * return builder;
+ * }
+ * }
+ * }
*/
public abstract class ActionResponse extends TransportResponse {
+ /**
+ * Constructs a new action response.
+ */
public ActionResponse() {}
/**
- * A response with no payload. This is deliberately not an implementation of {@link ToXContent} or similar because an empty response
- * has no valid {@link XContent} representation. Use {@link EmptyResponseListener} to convert this to a valid (plain-text) REST
- * response instead.
+ * A response with no payload. This is deliberately not an implementation of {@link ToXContent} or similar
+ * because an empty response has no valid {@link XContent} representation.
+ *
+ * {@code
+ * // Return an empty response from an action
+ * listener.onResponse(ActionResponse.Empty.INSTANCE);
+ *
+ * // Check if a response is empty
+ * if (response == ActionResponse.Empty.INSTANCE) {
+ * // Handle empty response
+ * }
+ * }
*/
public static final class Empty extends ActionResponse {
private Empty() { /* singleton */ }
+ /**
+ * The singleton instance of the empty response.
+ */
public static final ActionResponse.Empty INSTANCE = new ActionResponse.Empty();
@Override
@@ -38,6 +96,11 @@ public String toString() {
return "ActionResponse.Empty{}";
}
+ /**
+ * Writes this empty response to the stream. Since there is no payload, this method does nothing.
+ *
+ * @param out the stream output (unused for empty response)
+ */
@Override
public void writeTo(StreamOutput out) {}
}
diff --git a/server/src/main/java/org/elasticsearch/action/ActionRunnable.java b/server/src/main/java/org/elasticsearch/action/ActionRunnable.java
index 492102c153e45..be9283e7d0940 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionRunnable.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionRunnable.java
@@ -17,18 +17,54 @@
import org.elasticsearch.core.Releasable;
/**
- * Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Exception)} in case an uncaught
- * exception or error is thrown while the actual action is run.
+ * Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Exception)} in case
+ * an uncaught exception or error is thrown while the actual action is run.
+ *
+ * {@code
+ * // Execute a simple runnable that completes the listener with null
+ * ActionListener
*/
public abstract class ActionRunnable{@code
+ * // Define an action type as a public constant
+ * public class MyCustomAction extends ActionType
+ *
+ * @param
+ *
+ *
+ * @param name The name of the action, which must be unique across all actions in the cluster.
*/
public ActionType(String name) {
this.name = name;
}
/**
- * The name of the action. Must be unique across actions.
+ * Returns the unique name of this action.
+ *
+ * @return the action name, which is unique across all actions
*/
public String name() {
return this.name;
}
+ /**
+ * Compares this action type with another object for equality. Two action types are equal
+ * if they have the same name.
+ *
+ * @param o the object to compare with
+ * @return {@code true} if the objects are equal, {@code false} otherwise
+ */
@Override
public boolean equals(Object o) {
return o instanceof ActionType> actionType && name.equals(actionType.name);
}
+ /**
+ * Returns a hash code value for this action type based on its name.
+ *
+ * @return the hash code value
+ */
@Override
public int hashCode() {
return name.hashCode();
}
+ /**
+ * Returns the string representation of this action type, which is its name.
+ *
+ * @return the action name
+ */
@Override
public String toString() {
return name;
diff --git a/server/src/main/java/org/elasticsearch/action/DelegatingActionListener.java b/server/src/main/java/org/elasticsearch/action/DelegatingActionListener.java
index 688bfd2617514..8ec1dcb845b8e 100644
--- a/server/src/main/java/org/elasticsearch/action/DelegatingActionListener.java
+++ b/server/src/main/java/org/elasticsearch/action/DelegatingActionListener.java
@@ -12,26 +12,84 @@
import static org.elasticsearch.action.ActionListenerImplementations.safeOnFailure;
/**
- * A wrapper around an {@link ActionListener} {@code L} that by default delegates failures to {@code L}'s {@link ActionListener#onFailure}
- * method. The wrapper also provides a {@link #toString()} implementation that describes this class and the delegate.
- * {@code
+ * // Create a custom delegating listener that transforms responses
+ * public class TransformingListener
+ *
+ * @param {@code
+ * // Throwing a FailedNodeException
+ * try {
+ * performNodeOperation();
+ * } catch (Exception e) {
+ * throw new FailedNodeException(
+ * nodeId,
+ * "Operation failed on node: " + nodeId,
+ * e
+ * );
+ * }
+ *
+ * // Handling FailedNodeExceptions
+ * try {
+ * executeMultiNodeOperation();
+ * } catch (FailedNodeException e) {
+ * logger.error("Operation failed on node {}: {}",
+ * e.nodeId(), e.getMessage());
+ * }
+ * }
+ */
public class FailedNodeException extends ElasticsearchException {
private final String nodeId;
+ /**
+ * Constructs a new failed node exception with the specified node ID, message, and cause.
+ *
+ * @param nodeId the ID of the node where the failure occurred
+ * @param msg the detail message explaining the failure
+ * @param cause the underlying cause of the failure, or {@code null} if none
+ */
public FailedNodeException(String nodeId, String msg, Throwable cause) {
super(msg, cause);
this.nodeId = nodeId;
}
+ /**
+ * Returns the ID of the node where the failure occurred.
+ *
+ * @return the node ID
+ */
public String nodeId() {
return this.nodeId;
}
+ /**
+ * Constructs a new failed node exception by reading from a stream input.
+ * This constructor is used for deserialization.
+ *
+ * @param in the stream input to read from
+ * @throws IOException if an I/O error occurs while reading from the stream
+ */
public FailedNodeException(StreamInput in) throws IOException {
super(in);
nodeId = in.readOptionalString();
}
+ /**
+ * Writes this exception to the specified stream output for serialization.
+ *
+ * @param out the stream output to write to
+ * @param nestedExceptionsWriter the writer for nested exceptions
+ * @throws IOException if an I/O error occurs while writing to the stream
+ */
@Override
protected void writeTo(StreamOutput out, Writer{@code
+ * // Implement IndicesRequest in a custom action request
+ * public class MyIndexRequest extends ActionRequest implements IndicesRequest {
+ * private String[] indices;
+ * private IndicesOptions options;
+ *
+ * @Override
+ * public String[] indices() {
+ * return indices;
+ * }
+ *
+ * @Override
+ * public IndicesOptions indicesOptions() {
+ * return options;
+ * }
+ * }
+ *
+ * // Use IndicesRequest methods
+ * IndicesRequest request = new MyIndexRequest();
+ * String[] targetIndices = request.indices();
+ * IndicesOptions options = request.indicesOptions();
+ * }
*/
public interface IndicesRequest {
/**
- * Returns the array of indices that the action relates to
+ * Returns the array of indices that this action relates to.
+ *
+ *
+ *
+ *
+ * @return the indices options for resolving and validating index names
*/
IndicesOptions indicesOptions();
/**
- * Determines whether the request should be applied to data streams. When {@code false}, none of the names or
- * wildcard expressions in {@link #indices} should be applied to or expanded to any data streams. All layers
- * involved in the request's fulfillment including security, name resolution, etc., should respect this flag.
+ * Determines whether the request should be applied to data streams. When {@code false}, none of
+ * the names or wildcard expressions in {@link #indices()} should be applied to or expanded to
+ * any data streams.
+ *
+ * {@code
+ * // Implement Replaceable in a request
+ * public class MyRequest extends ActionRequest implements IndicesRequest.Replaceable {
+ * private String[] indices;
+ *
+ * @Override
+ * public IndicesRequest indices(String... indices) {
+ * this.indices = indices;
+ * return this;
+ * }
+ *
+ * @Override
+ * public String[] indices() {
+ * return indices;
+ * }
+ * }
+ *
+ * // Replace indices during resolution
+ * IndicesRequest.Replaceable request = new MyRequest();
+ * request.indices("index-*"); // Original pattern
+ * // After resolution:
+ * request.indices("index-1", "index-2", "index-3"); // Concrete indices
+ * }
+ */
interface Replaceable extends IndicesRequest {
/**
- * Sets the indices that the action relates to.
+ * Sets the indices that this action relates to. This method is typically called during
+ * index resolution to replace wildcard patterns or aliases with concrete index names.
+ *
+ * @param indices the array of index names to set
+ * @return this request for method chaining
*/
IndicesRequest indices(String... indices);
/**
- * Record the results of index resolution. See {@link ResolvedIndexExpressions} for details.
- * Note: this method does not replace {@link #indices(String...)}. {@link #indices(String...)} must still be called to update
- * the actual list of indices the request relates to.
- * Note: the field is transient and not serialized.
+ * Records the results of index resolution for later inspection or auditing purposes.
+ * See {@link ResolvedIndexExpressions} for details on what information is recorded.
+ *
+ * {@code
+ * // Throwing when a node is not found
+ * if (clusterState.nodes().get(nodeId) == null) {
+ * throw new NoSuchNodeException(nodeId);
+ * }
+ *
+ * // Handling NoSuchNodeException
+ * try {
+ * performOperationOnNode(nodeId);
+ * } catch (NoSuchNodeException e) {
+ * logger.warn("Node {} does not exist in the cluster", e.nodeId());
+ * // Handle missing node, perhaps retry with a different node
+ * }
+ * }
+ */
public class NoSuchNodeException extends FailedNodeException {
+ /**
+ * Constructs a new no such node exception for the specified node ID.
+ *
+ * @param nodeId the ID of the node that does not exist
+ */
public NoSuchNodeException(String nodeId) {
super(nodeId, "No such node [" + nodeId + "]", null);
}
+ /**
+ * Constructs a new no such node exception by reading from a stream input.
+ * This constructor is used for deserialization.
+ *
+ * @param in the stream input to read from
+ * @throws IOException if an I/O error occurs while reading from the stream
+ */
public NoSuchNodeException(StreamInput in) throws IOException {
super(in);
}
diff --git a/server/src/main/java/org/elasticsearch/action/RequestBuilder.java b/server/src/main/java/org/elasticsearch/action/RequestBuilder.java
index 4be8735c9605e..072bb7036c9a2 100644
--- a/server/src/main/java/org/elasticsearch/action/RequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/RequestBuilder.java
@@ -12,18 +12,89 @@
import org.elasticsearch.core.RefCounted;
import org.elasticsearch.core.TimeValue;
+/**
+ * Interface for request builders that provide a fluent API for constructing and executing action requests.
+ * Request builders offer a convenient way to build action requests with a chainable method syntax and
+ * execute them in various ways (synchronously, asynchronously, or with a future).
+ *
+ * {@code
+ * // Using a request builder for a search operation
+ * SearchRequestBuilder builder = client.prepareSearch("myindex");
+ * builder.setQuery(QueryBuilders.matchQuery("field", "value"))
+ * .setSize(10)
+ * .setFrom(0);
+ *
+ * // Execute synchronously
+ * SearchResponse response = builder.get();
+ *
+ * // Execute asynchronously with a listener
+ * builder.execute(ActionListener.wrap(
+ * response -> {
+ * // Handle successful response
+ * System.out.println("Found " + response.getHits().getTotalHits() + " hits");
+ * },
+ * e -> {
+ * // Handle failure
+ * System.err.println("Search failed: " + e.getMessage());
+ * }
+ * ));
+ *
+ * // Execute with a future
+ * ActionFuture
+ *
+ * @param {@code
+ * // Throwing when routing is missing
+ * if (requiresRouting && request.routing() == null) {
+ * throw new RoutingMissingException(index, documentId);
+ * }
+ *
+ * // Handling RoutingMissingException
+ * try {
+ * indexDocument(request);
+ * } catch (RoutingMissingException e) {
+ * logger.error("Routing required for index {} document {}: add routing parameter",
+ * e.getIndex().getName(), e.getId());
+ * // Return error to user instructing them to provide routing
+ * }
+ * }
+ */
public final class RoutingMissingException extends ElasticsearchException {
private final String id;
+ /**
+ * Constructs a new routing missing exception for the specified index and document ID.
+ *
+ * @param index the name of the index requiring routing
+ * @param id the ID of the document for which routing is missing
+ * @throws NullPointerException if index or id is null
+ */
public RoutingMissingException(String index, String id) {
super("routing is required for [" + index + "]/[" + id + "]");
Objects.requireNonNull(index, "index must not be null");
@@ -31,15 +64,34 @@ public RoutingMissingException(String index, String id) {
this.id = id;
}
+ /**
+ * Returns the ID of the document for which routing is missing.
+ *
+ * @return the document ID
+ */
public String getId() {
return id;
}
+ /**
+ * Returns the REST status code for this exception, which is {@link RestStatus#BAD_REQUEST}
+ * since this represents a client error (missing required parameter).
+ *
+ * @return {@link RestStatus#BAD_REQUEST}
+ */
@Override
public RestStatus status() {
return RestStatus.BAD_REQUEST;
}
+ /**
+ * Constructs a new routing missing exception by reading from a stream input.
+ * This constructor is used for deserialization. It handles backward compatibility
+ * with versions before 8.0.0 where type information was included.
+ *
+ * @param in the stream input to read from
+ * @throws IOException if an I/O error occurs while reading from the stream
+ */
public RoutingMissingException(StreamInput in) throws IOException {
super(in);
if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) {
@@ -48,6 +100,15 @@ public RoutingMissingException(StreamInput in) throws IOException {
id = in.readString();
}
+ /**
+ * Writes this exception to the specified stream output for serialization.
+ * This method handles backward compatibility with versions before 8.0.0
+ * where type information was included.
+ *
+ * @param out the stream output to write to
+ * @param nestedExceptionsWriter the writer for nested exceptions
+ * @throws IOException if an I/O error occurs while writing to the stream
+ */
@Override
protected void writeTo(StreamOutput out, Writer{@code
+ * // Creating a custom shard operation failure
+ * public class SearchShardFailure extends ShardOperationFailedException {
+ * public SearchShardFailure(String index, int shardId, String reason,
+ * RestStatus status, Throwable cause) {
+ * super(index, shardId, reason, status, cause);
+ * }
+ * // Implement serialization methods...
+ * }
+ *
+ * // Handling shard failures
+ * try {
+ * performShardOperation(shardId);
+ * } catch (Exception e) {
+ * ShardOperationFailedException failure = new SearchShardFailure(
+ * indexName, shardId, e.getMessage(), RestStatus.INTERNAL_SERVER_ERROR, e
+ * );
+ * collectFailure(failure);
+ * }
+ * }
*/
public abstract class ShardOperationFailedException extends Exception implements Writeable, ToXContentObject {
+ /**
+ * The name of the index where the operation failed.
+ */
protected String index;
+
+ /**
+ * The ID of the shard where the operation failed, or -1 if unknown.
+ */
protected int shardId = -1;
+
+ /**
+ * The reason describing why the operation failed.
+ */
protected String reason;
+
+ /**
+ * The HTTP status code representing the type of failure.
+ */
protected RestStatus status;
+
+ /**
+ * The underlying cause of the failure.
+ */
protected Throwable cause;
+ /**
+ * Default constructor for subclasses and deserialization.
+ */
protected ShardOperationFailedException() {
}
+ /**
+ * Constructs a new shard operation failed exception with full details.
+ *
+ * @param index the name of the index, or {@code null} if it can't be determined
+ * @param shardId the ID of the shard where the failure occurred
+ * @param reason the reason for the failure (must not be null)
+ * @param status the REST status representing the failure (must not be null)
+ * @param cause the underlying cause of the failure (must not be null)
+ * @throws NullPointerException if reason, status, or cause is null
+ */
protected ShardOperationFailedException(@Nullable String index, int shardId, String reason, RestStatus status, Throwable cause) {
this.index = index;
this.shardId = shardId;
@@ -41,7 +104,9 @@ protected ShardOperationFailedException(@Nullable String index, int shardId, Str
}
/**
- * The index the operation failed on. Might return {@code null} if it can't be derived.
+ * Returns the name of the index where the operation failed.
+ *
+ * @return the index name, or {@code null} if it cannot be determined
*/
@Nullable
public final String index() {
@@ -49,28 +114,36 @@ public final String index() {
}
/**
- * The index the operation failed on. Might return {@code -1} if it can't be derived.
+ * Returns the ID of the shard where the operation failed.
+ *
+ * @return the shard ID, or {@code -1} if it cannot be determined
*/
public final int shardId() {
return shardId;
}
/**
- * The reason of the failure.
+ * Returns the reason describing why the operation failed.
+ *
+ * @return the failure reason
*/
public final String reason() {
return reason;
}
/**
- * The status of the failure.
+ * Returns the REST status code representing the type of failure.
+ *
+ * @return the REST status
*/
public final RestStatus status() {
return status;
}
/**
- * The cause of this failure
+ * Returns the underlying cause of this failure.
+ *
+ * @return the cause throwable
*/
public final Throwable getCause() {
return cause;
diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java
index e2644b41519c1..84fd0d5ab47e6 100644
--- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java
+++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java
@@ -14,34 +14,88 @@
import java.util.Objects;
/**
- * Encapsulates a bootstrap check.
+ * Encapsulates a bootstrap check performed during Elasticsearch node startup.
+ * {@code
+ * public class CustomBootstrapCheck implements BootstrapCheck {
+ * @Override
+ * public BootstrapCheckResult check(BootstrapContext context) {
+ * if (isConfigurationValid(context)) {
+ * return BootstrapCheckResult.success();
+ * }
+ * return BootstrapCheckResult.failure("Configuration is invalid");
+ * }
+ *
+ * @Override
+ * public ReferenceDocs referenceDocs() {
+ * return ReferenceDocs.BOOTSTRAP_CHECKS;
+ * }
+ * }
+ * }
*/
public interface BootstrapCheck {
/**
- * Encapsulate the result of a bootstrap check.
+ * Encapsulates the result of a bootstrap check.
+ * {@code
+ * public BootstrapCheckResult check(BootstrapContext context) {
+ * Settings settings = context.settings();
+ * String nodeName = settings.get("node.name");
+ * // perform validation based on settings
+ * return BootstrapCheckResult.success();
+ * }
+ * }
+ *
+ * @param environment the node's environment containing paths and settings
+ * @param metadata the cluster metadata
*/
public record BootstrapContext(Environment environment, Metadata metadata) {
+ /**
+ * Returns the node's settings from the environment.
+ * {@code
+ * try {
+ * initializeNode();
+ * } catch (Exception e) {
+ * throw new StartupException(e);
+ * }
+ * }
*/
public final class StartupException extends Exception {
- /** maximum length of a stacktrace, before we truncate it */
+ /** Maximum length of a stacktrace before truncation */
static final int STACKTRACE_LIMIT = 30;
- /** all lines from this package are RLE-compressed */
+ /** All lines from this package are RLE-compressed */
static final String GUICE_PACKAGE = "org.elasticsearch.injection.guice";
+ /**
+ * Constructs a startup exception wrapping another throwable.
+ *
+ * @param cause the underlying cause of the startup failure
+ * @throws NullPointerException if cause is null
+ */
public StartupException(Throwable cause) {
super(Objects.requireNonNull(cause));
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java
index 5ae8be378082f..a86ea1ae92d05 100644
--- a/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java
+++ b/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java
@@ -45,6 +45,23 @@ public class ClusterChangedEvent {
private final ProjectsDelta projectsDelta;
+ /**
+ * Constructs a new cluster changed event with the specified source, new state, and previous state.
+ * This constructor automatically calculates the differences between the states for efficient comparison.
+ *
+ * {@code
+ * ClusterChangedEvent event = new ClusterChangedEvent("source", newState, oldState);
+ * if (event.routingTableChanged()) {
+ * // Handle routing changes
+ * }
+ * }
+ *
+ * @param source the descriptive source that caused this cluster state change (e.g., "reroute", "create-index")
+ * @param state the new cluster state after the change
+ * @param previousState the cluster state before the change
+ * @throws NullPointerException if any parameter is null
+ */
public ClusterChangedEvent(String source, ClusterState state, ClusterState previousState) {
Objects.requireNonNull(source, "source must not be null");
Objects.requireNonNull(state, "state must not be null");
@@ -57,30 +74,48 @@ public ClusterChangedEvent(String source, ClusterState state, ClusterState previ
}
/**
- * The source that caused this cluster event to be raised.
+ * Returns the source that caused this cluster event to be raised.
+ * The source is a descriptive string indicating the operation or action that triggered the cluster state change.
+ *
+ * @return the source description (e.g., "reroute", "create-index", "node-join")
*/
public String source() {
return this.source;
}
/**
- * The new cluster state that caused this change event.
+ * Returns the new cluster state that caused this change event.
+ * This represents the current state of the cluster after the change has been applied.
+ *
+ * @return the new cluster state
*/
public ClusterState state() {
return this.state;
}
/**
- * The previous cluster state for this change event.
+ * Returns the previous cluster state before this change event.
+ * This represents the state of the cluster immediately before the change was applied.
+ *
+ * @return the previous cluster state
*/
public ClusterState previousState() {
return this.previousState;
}
/**
- * Returns true if the routing tables (for all indices) have
- * changed between the previous cluster state and the current cluster state.
- * Note that this is an object reference equality test, not an equals test.
+ * Determines if the routing tables for all indices have changed between the previous and current cluster states.
+ * This uses object reference equality rather than deep equals comparison for efficiency, relying on the
+ * immutability of {@link GlobalRoutingTable}.
+ *
+ * {@code
+ * if (event.routingTableChanged()) {
+ * logger.info("Routing table has changed, updating shard allocations");
+ * }
+ * }
+ *
+ * @return {@code true} if the routing tables have changed, {@code false} otherwise
*/
public boolean routingTableChanged() {
// GlobalRoutingTable.routingTables is immutable, meaning that we can simply test the reference equality of the global routing
@@ -89,8 +124,20 @@ public boolean routingTableChanged() {
}
/**
- * Returns true iff the routing table has changed for the given index.
- * Note that this is an object reference equality test, not an equals test.
+ * Determines if the routing table has changed for the specified index.
+ * This uses object reference equality rather than deep equals comparison for efficiency.
+ *
+ * {@code
+ * Index myIndex = new Index("my-index", "uuid");
+ * if (event.indexRoutingTableChanged(myIndex)) {
+ * // Handle index-specific routing changes
+ * }
+ * }
+ *
+ * @param index the index to check for routing table changes
+ * @return {@code true} if the routing table for the specified index has changed, {@code false} otherwise
+ * @throws NullPointerException if index is null
*/
public boolean indexRoutingTableChanged(Index index) {
Objects.requireNonNull(index, "index must not be null");
@@ -106,7 +153,19 @@ public boolean indexRoutingTableChanged(Index index) {
}
/**
- * Returns the indices deleted in this event
+ * Returns the list of indices that were deleted in this cluster state change.
+ * The method determines deletions using either tombstones (if the cluster state is not fully recovered)
+ * or by comparing index metadata between the previous and current states.
+ *
+ * {@code
+ * List
+ *
+ * @return a list of deleted indices, or an empty list if no indices were deleted
*/
public Listtrue iff the metadata for the cluster has changed between
- * the previous cluster state and the new cluster state. Note that this is an object
- * reference equality test, not an equals test.
+ * Determines if the cluster metadata has changed between the previous and current cluster states.
+ * This uses object reference equality rather than deep equals comparison for efficiency, relying
+ * on the immutability of the {@link Metadata} objects.
+ *
+ * {@code
+ * if (event.metadataChanged()) {
+ * // Handle metadata changes such as index settings or mappings updates
+ * }
+ * }
+ *
+ * @return {@code true} if the cluster metadata has changed, {@code false} otherwise
*/
public boolean metadataChanged() {
return state.metadata() != previousState.metadata();
}
/**
- * Returns a set of custom meta data types when any custom metadata for the cluster has changed
- * between the previous cluster state and the new cluster state. custom meta data types are
- * returned iff they have been added, updated or removed between the previous and the current state
+ * Returns the set of custom metadata type names that have changed at the cluster level.
+ * A custom metadata type is included if it has been added, updated, or removed between
+ * the previous and current cluster states.
+ *
+ * {@code
+ * Set
+ *
+ * @return a set of custom metadata type names that have changed, or an empty set if none changed
*/
public Set{@code
+ * Set
+ *
+ * @return a set of custom metadata type names that have changed, or an empty set if none changed
*/
public Set{@code
+ * ProjectId projectId = ProjectId.DEFAULT;
+ * if (event.customMetadataChanged(projectId, "my-custom-type")) {
+ * // Handle the change to this specific custom metadata
+ * }
+ * }
+ *
+ * @param projectId the project identifier to check
+ * @param customMetadataType the custom metadata type name to check
+ * @return {@code true} if the custom metadata has changed for the specified project, {@code false} otherwise
*/
public boolean customMetadataChanged(ProjectId projectId, String customMetadataType) {
ProjectMetadata previousProject = previousState.metadata().projects().get(projectId);
@@ -200,9 +300,22 @@ private true iff the {@link IndexMetadata} for a given index
- * has changed between the previous cluster state and the new cluster state.
- * Note that this is an object reference equality test, not an equals test.
+ * Determines if the {@link IndexMetadata} has changed between two metadata instances.
+ * This uses object reference equality rather than deep equals comparison for efficiency.
+ *
+ * {@code
+ * IndexMetadata oldMeta = previousState.metadata().index("my-index");
+ * IndexMetadata newMeta = currentState.metadata().index("my-index");
+ * if (ClusterChangedEvent.indexMetadataChanged(oldMeta, newMeta)) {
+ * // Handle index metadata changes
+ * }
+ * }
+ *
+ * @param metadata1 the first index metadata instance
+ * @param metadata2 the second index metadata instance
+ * @return {@code true} if the metadata instances are different, {@code false} if they are the same
+ * @throws AssertionError if either metadata parameter is null (when assertions are enabled)
*/
public static boolean indexMetadataChanged(IndexMetadata metadata1, IndexMetadata metadata2) {
assert metadata1 != null && metadata2 != null;
@@ -212,62 +325,102 @@ public static boolean indexMetadataChanged(IndexMetadata metadata1, IndexMetadat
}
/**
- * Returns true iff the cluster level blocks have changed between cluster states.
- * Note that this is an object reference equality test, not an equals test.
+ * Determines if the cluster-level blocks have changed between the previous and current cluster states.
+ * This uses object reference equality rather than deep equals comparison for efficiency.
+ *
+ * @return {@code true} if the cluster blocks have changed, {@code false} otherwise
*/
public boolean blocksChanged() {
return state.blocks() != previousState.blocks();
}
/**
- * Returns true iff the local node is the master node of the cluster.
+ * Determines if the local node is the elected master node of the cluster in the current state.
+ *
+ * {@code
+ * if (event.localNodeMaster()) {
+ * // Execute master-specific logic
+ * }
+ * }
+ *
+ * @return {@code true} if the local node is the elected master, {@code false} otherwise
*/
public boolean localNodeMaster() {
return state.nodes().isLocalNodeElectedMaster();
}
/**
- * Returns the {@link org.elasticsearch.cluster.node.DiscoveryNodes.Delta} between
- * the previous cluster state and the new cluster state.
+ * Returns the delta of node changes between the previous and current cluster states.
+ * The delta includes information about nodes that were added, removed, or had their master status change.
+ *
+ * {@code
+ * DiscoveryNodes.Delta delta = event.nodesDelta();
+ * for (DiscoveryNode node : delta.addedNodes()) {
+ * logger.info("Node {} joined the cluster", node.getName());
+ * }
+ * }
+ *
+ * @return the discovery nodes delta representing node changes
*/
public DiscoveryNodes.Delta nodesDelta() {
return this.nodesDelta;
}
/**
- * Returns true iff nodes have been removed from the cluster since the last cluster state.
+ * Determines if any nodes have been removed from the cluster since the previous cluster state.
+ *
+ * @return {@code true} if one or more nodes were removed, {@code false} otherwise
*/
public boolean nodesRemoved() {
return nodesDelta.removed();
}
/**
- * Returns true iff nodes have been added from the cluster since the last cluster state.
+ * Determines if any nodes have been added to the cluster since the previous cluster state.
+ *
+ * @return {@code true} if one or more nodes were added, {@code false} otherwise
*/
public boolean nodesAdded() {
return nodesDelta.added();
}
/**
- * Returns true iff nodes have been changed (added or removed) from the cluster since the last cluster state.
+ * Determines if the set of nodes in the cluster has changed (either added or removed)
+ * since the previous cluster state.
+ *
+ * @return {@code true} if any nodes were added or removed, {@code false} otherwise
*/
public boolean nodesChanged() {
return nodesRemoved() || nodesAdded();
}
/**
- * Returns the {@link ProjectsDelta} between the previous cluster state and the new cluster state.
+ * Returns the delta of project changes between the previous and current cluster states.
+ * The delta includes information about projects that were added or removed.
+ *
+ * @return the projects delta representing project changes
*/
public ProjectsDelta projectDelta() {
return projectsDelta;
}
/**
- * Determines whether or not the current cluster state represents an entirely
- * new cluster, either when a node joins a cluster for the first time or when
- * the node receives a cluster state update from a brand new cluster (different
- * UUID from the previous cluster), which will happen when a master node is
- * elected that has never been part of the cluster before.
+ * Determines if the current cluster state represents an entirely new cluster.
+ * This occurs when a node joins a cluster for the first time, or when the node receives
+ * a cluster state update from a brand new cluster with a different UUID. This typically
+ * happens when a master node is elected that has never been part of the cluster before
+ * or has had its data directory wiped.
+ *
+ * {@code
+ * if (event.isNewCluster()) {
+ * logger.warn("New cluster detected, previous cluster UUID was lost");
+ * }
+ * }
+ *
+ * @return {@code true} if this represents a new cluster with a different UUID, {@code false} otherwise
*/
public boolean isNewCluster() {
final String prevClusterUUID = previousState.metadata().clusterUUID();
diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterName.java b/server/src/main/java/org/elasticsearch/cluster/ClusterName.java
index 8ee90fe713565..6334ad5756f8a 100644
--- a/server/src/main/java/org/elasticsearch/cluster/ClusterName.java
+++ b/server/src/main/java/org/elasticsearch/cluster/ClusterName.java
@@ -35,15 +35,39 @@ public class ClusterName implements Writeable {
private final String value;
+ /**
+ * Constructs a new {@link ClusterName} instance by reading from a stream input.
+ *
+ * @param input the stream input to read from
+ * @throws IOException if an I/O error occurs during reading
+ */
public ClusterName(StreamInput input) throws IOException {
this(input.readString());
}
+ /**
+ * Constructs a new {@link ClusterName} instance with the specified name value.
+ * The value is interned to optimize memory usage since cluster names are typically
+ * used as part of settings.
+ *
+ * {@code
+ * ClusterName clusterName = new ClusterName("my-cluster");
+ * }
+ *
+ * @param value the cluster name value
+ * @throws IllegalArgumentException if the value is empty or contains ':'
+ */
public ClusterName(String value) {
// cluster name string is most likely part of a setting so we can speed things up over outright interning here
this.value = Settings.internKeyOrValue(value);
}
+ /**
+ * Returns the cluster name value.
+ *
+ * @return the cluster name as a string
+ */
public String value() {
return this.value;
}
@@ -73,6 +97,19 @@ public String toString() {
return "Cluster [" + value + "]";
}
+ /**
+ * Returns a predicate that tests for equality with this cluster name.
+ * The predicate can be used to filter or validate cluster names against this instance.
+ *
+ * {@code
+ * ClusterName localCluster = new ClusterName("my-cluster");
+ * Predicate
+ *
+ * @return a predicate that returns {@code true} if the tested cluster name equals this instance
+ */
public Predicate
+ *
+ *
+ * {@code
+ * ClusterStateApplier applier = event -> {
+ * // Update internal data structures before state becomes visible
+ * if (event.routingTableChanged()) {
+ * internalCache.update(event.state().routingTable());
+ * }
+ * };
+ * clusterService.addStateApplier(applier);
+ * }
+ *
+ * @see ClusterStateListener
+ * @see ClusterChangedEvent
*/
public interface ClusterStateApplier {
/**
- * Called when a new cluster state ({@link ClusterChangedEvent#state()} needs to be applied. The cluster state to be applied is already
- * committed when this method is called, so an applier must therefore be prepared to deal with any state it receives without throwing an
- * exception. Throwing an exception from an applier is very bad because it will stop the application of this state before it has reached
- * all the other appliers, and will likely result in another attempt to apply the same (or very similar) cluster state which might
- * continue until this node is removed from the cluster.
- *
+ *
+ *
+ * {@code
+ * public void applyClusterState(ClusterChangedEvent event) {
+ * try {
+ * // Safely update internal structures
+ * if (event.metadataChanged()) {
+ * updateInternalMetadata(event.state().metadata());
+ * }
+ * } catch (Exception e) {
+ * // MUST handle all exceptions internally
+ * logger.error("Failed to apply cluster state", e);
+ * }
+ * }
+ * }
+ *
+ * @param event the cluster changed event containing the new state to apply
*/
void applyClusterState(ClusterChangedEvent event);
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateListener.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateListener.java
index bb2f2caddbeb1..83fcea0fbcb85 100644
--- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateListener.java
+++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateListener.java
@@ -12,16 +12,57 @@
import org.elasticsearch.cluster.service.ClusterService;
/**
- * A listener to be notified when a cluster state changes. The {@link #clusterChanged} method is called after the cluster state becomes
- * visible via {@link ClusterService#state()}. See also {@link ClusterStateApplier}.
+ * A listener interface for receiving notifications when cluster state changes occur.
+ * The {@link #clusterChanged} method is invoked after the cluster state becomes visible
+ * via {@link ClusterService#state()}, allowing implementations to react to state changes.
+ *
+ * {@code
+ * ClusterStateListener listener = event -> {
+ * if (event.routingTableChanged()) {
+ * // React to routing table changes
+ * logger.info("Routing table changed in cluster state version {}", event.state().version());
+ * }
+ * };
+ * clusterService.addListener(listener);
+ * }
+ *
+ * @see ClusterStateApplier
+ * @see ClusterChangedEvent
*/
public interface ClusterStateListener {
/**
- * Called when cluster state changes.
- *
+ *
+ *
+ * {@code
+ * public void clusterChanged(ClusterChangedEvent event) {
+ * if (event.nodesAdded()) {
+ * for (DiscoveryNode node : event.nodesDelta().addedNodes()) {
+ * logger.info("Node {} joined the cluster", node.getName());
+ * }
+ * }
+ * }
+ * }
+ *
+ * @param event the cluster changed event containing both the new and previous states
*/
void clusterChanged(ClusterChangedEvent event);
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java
index 3979e5c7a4683..144c37eeda65c 100644
--- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java
+++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java
@@ -14,7 +14,34 @@
import org.elasticsearch.core.TimeValue;
/**
- * A task that can update the cluster state.
+ * An abstract base class for tasks that can update the cluster state.
+ * Implementations define the actual state transformation logic in the {@link #execute} method
+ * and can optionally be notified when the update completes via {@link #clusterStateProcessed}.
+ *
+ * {@code
+ * ClusterStateUpdateTask task = new ClusterStateUpdateTask(Priority.URGENT) {
+ * {@literal @}Override
+ * public ClusterState execute(ClusterState currentState) {
+ * // Return a new state or the same instance if no changes needed
+ * return ClusterState.builder(currentState)
+ * .metadata(updatedMetadata)
+ * .build();
+ * }
+ *
+ * {@literal @}Override
+ * public void onFailure(Exception e) {
+ * logger.error("Cluster state update failed", e);
+ * }
+ * };
+ * clusterService.submitStateUpdateTask("source", task);
+ * }
+ *
+ * @see ClusterStateTaskListener
+ * @see Priority
*/
public abstract class ClusterStateUpdateTask implements ClusterStateTaskListener {
@@ -23,50 +50,124 @@ public abstract class ClusterStateUpdateTask implements ClusterStateTaskListener
@Nullable
private final TimeValue timeout;
+ /**
+ * Constructs a cluster state update task with {@link Priority#NORMAL} priority and no timeout.
+ */
public ClusterStateUpdateTask() {
this(Priority.NORMAL);
}
+ /**
+ * Constructs a cluster state update task with the specified priority and no timeout.
+ *
+ * @param priority the execution priority for this task
+ */
public ClusterStateUpdateTask(Priority priority) {
this(priority, null);
}
+ /**
+ * Constructs a cluster state update task with {@link Priority#NORMAL} priority and the specified timeout.
+ *
+ * @param timeout the maximum time to wait for execution, or null for no timeout
+ */
public ClusterStateUpdateTask(TimeValue timeout) {
this(Priority.NORMAL, timeout);
}
+ /**
+ * Constructs a cluster state update task with the specified priority and timeout.
+ *
+ * {@code
+ * // High priority task with 30 second timeout
+ * ClusterStateUpdateTask task = new ClusterStateUpdateTask(
+ * Priority.HIGH,
+ * TimeValue.timeValueSeconds(30)
+ * ) {
+ * // ... implementation
+ * };
+ * }
+ *
+ * @param priority the execution priority for this task
+ * @param timeout the maximum time to wait for execution, or null for no timeout
+ */
public ClusterStateUpdateTask(Priority priority, TimeValue timeout) {
this.priority = priority;
this.timeout = timeout;
}
/**
- * Computes the cluster state that results from executing this task on the given state. Returns the *same instance* if no change is
- * required, which is an important and valuable optimisation since it short-circuits the whole publication process and saves a bunch of
- * time and effort.
+ * Computes and returns the new cluster state that results from executing this task.
+ *
+ * {@code
+ * public ClusterState execute(ClusterState currentState) throws Exception {
+ * // Check if update is actually needed
+ * if (currentState.metadata().hasIndex("my-index")) {
+ * return currentState; // Return same instance - no change needed
+ * }
+ *
+ * // Build and return new state
+ * return ClusterState.builder(currentState)
+ * .metadata(Metadata.builder(currentState.metadata())
+ * .put(newIndexMetadata)
+ * .build())
+ * .build();
+ * }
+ * }
+ *
+ * @param currentState the current cluster state before this task executes
+ * @return the new cluster state, or the same instance if no changes are needed
+ * @throws Exception if the state update cannot be computed
*/
public abstract ClusterState execute(ClusterState currentState) throws Exception;
/**
- * Called when the result of the {@link #execute} method has been processed properly by all listeners.
+ * Called after the result of {@link #execute} has been successfully processed by all listeners.
+ * This callback indicates that the cluster state update has been fully applied.
*
- * The {@param newState} parameter is the state that was ultimately published.
+ *
+ *
*
- * Implementations of this callback must not throw exceptions: an exception thrown here is logged by the master service at {@code ERROR}
- * level and otherwise ignored, except in tests where it raises an {@link AssertionError}. If log-and-ignore is the right behaviour then
- * implementations must do so themselves, typically using a more specific logger and at a less dramatic log level.
+ * {@code
+ * public void clusterStateProcessed(ClusterState initialState, ClusterState newState) {
+ * logger.info("Cluster state updated from version {} to {}",
+ * initialState.version(), newState.version());
+ * // Notify completion handlers, update metrics, etc.
+ * }
+ * }
+ *
+ * @param initialState the cluster state before the update
+ * @param newState the cluster state that was ultimately published
*/
public void clusterStateProcessed(ClusterState initialState, ClusterState newState) {}
/**
- * If the cluster state update task wasn't processed by the provided timeout, call
- * {@link ClusterStateTaskListener#onFailure(Exception)}. May return null to indicate no timeout is needed (default).
+ * Returns the timeout for this task. If the task is not processed within this time,
+ * {@link ClusterStateTaskListener#onFailure(Exception)} will be called.
+ *
+ * @return the timeout value, or {@code null} if no timeout is configured
*/
@Nullable
public final TimeValue timeout() {
return timeout;
}
+ /**
+ * Returns the execution priority for this task. Higher priority tasks are processed
+ * before lower priority tasks.
+ *
+ * @return the priority level for this task
+ */
public final Priority priority() {
return priority;
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/Diff.java b/server/src/main/java/org/elasticsearch/cluster/Diff.java
index 1b4f33e4d15ee..dc2c07ef71cb1 100644
--- a/server/src/main/java/org/elasticsearch/cluster/Diff.java
+++ b/server/src/main/java/org/elasticsearch/cluster/Diff.java
@@ -12,12 +12,31 @@
import org.elasticsearch.common.io.stream.Writeable;
/**
- * Represents difference between states of cluster state parts
+ * Represents a serializable difference between two states of a cluster state component.
+ * A diff object captures the changes needed to transform one state into another,
+ * enabling efficient transmission and application of cluster state updates.
+ *
+ * {@code
+ * // Applying a diff to reconstruct a state
+ * ClusterState previousState = getPreviousState();
+ * Diff
+ *
+ * @param {@code
+ * // Creating a diff between two cluster states
+ * ClusterState previous = getPreviousState();
+ * ClusterState current = getCurrentState();
+ * Diff
+ *
+ * @param {@code
+ * DiskUsage diskUsage = new DiskUsage("node-1", "my-node", "/data", 1000000000L, 500000000L);
+ * double freePercent = diskUsage.freeDiskAsPercentage(); // Returns 50.0
+ * long usedBytes = diskUsage.usedBytes(); // Returns 500000000L
+ * }
+ *
+ * @param nodeId the unique identifier of the node
+ * @param nodeName the human-readable name of the node
+ * @param path the filesystem path being measured
+ * @param totalBytes the total size of the disk in bytes
+ * @param freeBytes the available free space in bytes
*/
public record DiskUsage(String nodeId, String nodeName, String path, long totalBytes, long freeBytes)
implements
@@ -34,6 +49,12 @@ public record DiskUsage(String nodeId, String nodeName, String path, long totalB
private static final Logger logger = LogManager.getLogger(DiskUsage.class);
+ /**
+ * Constructs a {@link DiskUsage} instance by reading from a stream input.
+ *
+ * @param in the stream input to read from
+ * @throws IOException if an I/O error occurs during reading
+ */
public DiskUsage(StreamInput in) throws IOException {
this(in.readString(), in.readString(), in.readString(), in.readVLong(), in.readVLong());
}
@@ -68,6 +89,18 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
return builder;
}
+ /**
+ * Calculates the percentage of disk space that is free.
+ * If total bytes is zero, returns 100.0% to fail "open" (as if we don't know disk usage).
+ *
+ * {@code
+ * DiskUsage diskUsage = new DiskUsage("node-1", "my-node", "/data", 1000L, 400L);
+ * double freePct = diskUsage.freeDiskAsPercentage(); // Returns 40.0
+ * }
+ *
+ * @return the percentage of free disk space (0.0 to 100.0), or 100.0 if total bytes is zero
+ */
public double freeDiskAsPercentage() {
// We return 100.0% in order to fail "open", in that if we have invalid
// numbers for the total bytes, it's as if we don't know disk usage.
@@ -77,10 +110,21 @@ public double freeDiskAsPercentage() {
return 100.0 * freeBytes / totalBytes;
}
+ /**
+ * Calculates the percentage of disk space that is used.
+ * This is simply 100.0 minus the free percentage.
+ *
+ * @return the percentage of used disk space (0.0 to 100.0)
+ */
public double usedDiskAsPercentage() {
return 100.0 - freeDiskAsPercentage();
}
+ /**
+ * Calculates the number of bytes currently in use.
+ *
+ * @return the number of used bytes (total bytes minus free bytes)
+ */
public long usedBytes() {
return totalBytes - freeBytes;
}
@@ -100,13 +144,40 @@ public String toString() {
+ "]";
}
+ /**
+ * Creates a copy of this {@link DiskUsage} with updated free bytes while preserving all other fields.
+ *
+ * {@code
+ * DiskUsage original = new DiskUsage("node-1", "my-node", "/data", 1000L, 400L);
+ * DiskUsage updated = original.copyWithFreeBytes(300L);
+ * }
+ *
+ * @param freeBytes the new free bytes value to use
+ * @return a new {@link DiskUsage} instance with updated free bytes
+ */
public DiskUsage copyWithFreeBytes(long freeBytes) {
return new DiskUsage(nodeId, nodeName, path, totalBytes, freeBytes);
}
/**
- * Finds the path with the least available disk space and returns its disk usage. It returns null if there is no
- * file system data in the NodeStats or if the total bytes are a negative number.
+ * Finds the filesystem path with the least available disk space on the specified node.
+ * This method examines all filesystem paths reported in the node statistics and returns
+ * the one with the smallest amount of free space.
+ *
+ * {@code
+ * NodeStats nodeStats = getNodeStats("node-1");
+ * DiskUsage leastAvailable = DiskUsage.findLeastAvailablePath(nodeStats);
+ * if (leastAvailable != null) {
+ * logger.info("Least available path: {} with {}% free",
+ * leastAvailable.path(), leastAvailable.freeDiskAsPercentage());
+ * }
+ * }
+ *
+ * @param nodeStats the node statistics containing filesystem information
+ * @return the {@link DiskUsage} for the path with least available space, or {@code null} if no valid
+ * filesystem data is available or if total bytes is negative
*/
@Nullable
public static DiskUsage findLeastAvailablePath(NodeStats nodeStats) {
@@ -159,8 +230,23 @@ public static DiskUsage findLeastAvailablePath(NodeStats nodeStats) {
}
/**
- * Finds the path with the most available disk space and returns its disk usage. It returns null if there are no
- * file system data in the node stats or if the total bytes are a negative number.
+ * Finds the filesystem path with the most available disk space on the specified node.
+ * This method examines all filesystem paths reported in the node statistics and returns
+ * the one with the largest amount of free space.
+ *
+ * {@code
+ * NodeStats nodeStats = getNodeStats("node-1");
+ * DiskUsage mostAvailable = DiskUsage.findMostAvailable(nodeStats);
+ * if (mostAvailable != null) {
+ * logger.info("Most available path: {} with {}% free",
+ * mostAvailable.path(), mostAvailable.freeDiskAsPercentage());
+ * }
+ * }
+ *
+ * @param nodeStats the node statistics containing filesystem information
+ * @return the {@link DiskUsage} for the path with most available space, or {@code null} if no valid
+ * filesystem data is available or if total bytes is negative
*/
@Nullable
public static DiskUsage findMostAvailable(NodeStats nodeStats) {
diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java
index a3955845c34fc..8517e0f97c75e 100644
--- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java
+++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java
@@ -24,6 +24,32 @@
import java.util.Objects;
import java.util.function.Predicate;
+/**
+ * Represents a cluster block that restricts certain operations at specified levels
+ * (read, write, metadata, etc.). Cluster blocks are used to prevent operations during
+ * critical cluster states such as recovery, snapshot operations, or when the cluster
+ * is in a degraded state.
+ *
+ * {@code
+ * // Create a write block for an index
+ * ClusterBlock writeBlock = new ClusterBlock(
+ * 1,
+ * "index-read-only",
+ * false, // not retryable
+ * false, // don't disable state persistence
+ * false, // don't allow release resources
+ * RestStatus.FORBIDDEN,
+ * EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)
+ * );
+ * }
+ *
+ * @see ClusterBlockLevel
+ * @see org.elasticsearch.cluster.block.ClusterBlocks
+ */
public class ClusterBlock implements Writeable, ToXContentFragment {
private final int id;
@@ -36,6 +62,12 @@ public class ClusterBlock implements Writeable, ToXContentFragment {
private final boolean allowReleaseResources;
private final RestStatus status;
+ /**
+ * Constructs a {@link ClusterBlock} by reading from a stream input.
+ *
+ * @param in the stream input to read from
+ * @throws IOException if an I/O error occurs during reading
+ */
public ClusterBlock(StreamInput in) throws IOException {
id = in.readVInt();
uuid = in.readOptionalString();
@@ -47,6 +79,17 @@ public ClusterBlock(StreamInput in) throws IOException {
allowReleaseResources = in.readBoolean();
}
+ /**
+ * Constructs a new cluster block without a UUID.
+ *
+ * @param id the unique identifier for this block
+ * @param description a human-readable description of why this block exists
+ * @param retryable whether operations should retry when encountering this block
+ * @param disableStatePersistence whether to disable state persistence (global blocks only)
+ * @param allowReleaseResources whether to allow resource release operations
+ * @param status the REST status code to return when this block prevents an operation
+ * @param levels the operation levels at which this block applies
+ */
public ClusterBlock(
int id,
String description,
@@ -59,6 +102,32 @@ public ClusterBlock(
this(id, null, description, retryable, disableStatePersistence, allowReleaseResources, status, levels);
}
+ /**
+ * Constructs a new cluster block with a UUID for index-specific blocks.
+ *
+ * {@code
+ * ClusterBlock block = new ClusterBlock(
+ * 10,
+ * indexUUID,
+ * "Index is read-only",
+ * false,
+ * false,
+ * false,
+ * RestStatus.FORBIDDEN,
+ * EnumSet.of(ClusterBlockLevel.WRITE)
+ * );
+ * }
+ *
+ * @param id the unique identifier for this block
+ * @param uuid optional UUID for index-specific blocks
+ * @param description a human-readable description of why this block exists
+ * @param retryable whether operations should retry when encountering this block
+ * @param disableStatePersistence whether to disable state persistence (global blocks only)
+ * @param allowReleaseResources whether to allow resource release operations
+ * @param status the REST status code to return when this block prevents an operation
+ * @param levels the operation levels at which this block applies
+ */
public ClusterBlock(
int id,
String uuid,
@@ -79,41 +148,86 @@ public ClusterBlock(
this.allowReleaseResources = allowReleaseResources;
}
+ /**
+ * Returns the unique identifier for this cluster block.
+ *
+ * @return the block ID
+ */
public int id() {
return this.id;
}
+ /**
+ * Returns the optional UUID associated with this block, typically used for index-specific blocks.
+ *
+ * @return the block UUID, or {@code null} if not set
+ */
@Nullable
public String uuid() {
return uuid;
}
+ /**
+ * Returns the human-readable description explaining why this block exists.
+ *
+ * @return the block description
+ */
public String description() {
return this.description;
}
+ /**
+ * Returns the REST status code that should be returned when this block prevents an operation.
+ *
+ * @return the HTTP status code
+ */
public RestStatus status() {
return this.status;
}
+ /**
+ * Returns the set of operation levels at which this block applies.
+ *
+ * @return the set of blocked operation levels
+ */
public EnumSet{@code
+ * ClusterBlock block = getClusterBlock();
+ * if (block.contains(ClusterBlockLevel.WRITE)) {
+ * // This block prevents write operations
+ * }
+ * }
+ *
+ * @param level the operation level to check
+ * @return {@code true} if this block applies to the specified level, {@code false} otherwise
+ */
public boolean contains(ClusterBlockLevel level) {
return levels.contains(level);
}
/**
- * Should operations get into retry state if this block is present.
+ * Determines if operations should enter a retry state when encountering this block.
+ * Retryable blocks indicate temporary conditions that may be resolved automatically.
+ *
+ * @return {@code true} if operations should retry, {@code false} otherwise
*/
public boolean retryable() {
return this.retryable;
}
/**
- * Should global state persistence be disabled when this block is present. Note,
- * only relevant for global blocks.
+ * Determines if global cluster state persistence should be disabled when this block is present.
+ * This flag is only relevant for global blocks and is used to prevent state persistence during
+ * critical cluster operations.
+ *
+ * @return {@code true} if state persistence should be disabled, {@code false} otherwise
*/
public boolean disableStatePersistence() {
return this.disableStatePersistence;
@@ -189,6 +303,12 @@ public int hashCode() {
return 31 * Integer.hashCode(id) + Objects.hashCode(uuid);
}
+ /**
+ * Determines if resource release operations are allowed even when this block is active.
+ * This is important for allowing cleanup operations to proceed during blocked states.
+ *
+ * @return {@code true} if resource release is allowed, {@code false} otherwise
+ */
public boolean isAllowReleaseResources() {
return allowReleaseResources;
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java
index 262044b091ac7..df784d848b6ff 100644
--- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java
+++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java
@@ -11,13 +11,52 @@
import java.util.EnumSet;
+/**
+ * Defines the different levels at which cluster blocks can restrict operations.
+ * Each level represents a category of operations that can be independently blocked.
+ *
+ *
+ *
+ *
+ * {@code
+ * // Create a block for write operations only
+ * EnumSet
+ *
+ * @see org.elasticsearch.cluster.block.ClusterBlock
+ */
public enum ClusterBlockLevel {
+ /** Blocks data read operations such as searches and get requests */
READ,
+
+ /** Blocks data write operations such as indexing, updates, and deletes */
WRITE,
+
+ /** Blocks metadata read operations */
METADATA_READ,
+
+ /** Blocks metadata write operations such as mapping and settings updates */
METADATA_WRITE,
+
+ /** Blocks refresh operations */
REFRESH;
+ /** A set containing all possible cluster block levels */
public static final EnumSet{@code
+ * AsyncBiFunction
+ *
+ * @param {@code
+ * CheckedBiConsumer
+ *
+ * @param {@code
+ * CheckedBiFunction
+ *
+ * @param {@code
+ * CheckedIntFunction
+ *
+ * @param {@code
+ * CheckedSupplier
+ *
+ * @param {@code
+ * // Check if a class is an inner (non-static nested) class
+ * boolean isInner = Classes.isInnerClass(MyClass.InnerClass.class);
+ *
+ * // Check if a class is concrete (not interface and not abstract)
+ * boolean isConcrete = Classes.isConcrete(MyImplementation.class);
+ * }
+ */
public class Classes {
+ /**
+ * Determines whether the specified class is a non-static inner class.
+ * A class is considered an inner class if it is not static and has an enclosing class.
+ *
+ * @param clazz the class to check
+ * @return {@code true} if the class is a non-static inner class, {@code false} otherwise
+ */
public static boolean isInnerClass(Class> clazz) {
return Modifier.isStatic(clazz.getModifiers()) == false && clazz.getEnclosingClass() != null;
}
+ /**
+ * Determines whether the specified class is concrete.
+ * A class is considered concrete if it is not an interface and not abstract.
+ *
+ * @param clazz the class to check
+ * @return {@code true} if the class is concrete (not an interface and not abstract), {@code false} otherwise
+ */
public static boolean isConcrete(Class> clazz) {
int modifiers = clazz.getModifiers();
return clazz.isInterface() == false && Modifier.isAbstract(modifiers) == false;
diff --git a/server/src/main/java/org/elasticsearch/common/Explicit.java b/server/src/main/java/org/elasticsearch/common/Explicit.java
index bbd586e236bea..9ec5427215479 100644
--- a/server/src/main/java/org/elasticsearch/common/Explicit.java
+++ b/server/src/main/java/org/elasticsearch/common/Explicit.java
@@ -16,10 +16,27 @@
* a) set implicitly e.g. through some default value
* b) set explicitly e.g. from a user selection
*
- * When merging conflicting configuration settings such as
+ * {@code
+ * // Creating explicit and implicit values
+ * Explicit
+ *
+ * @param
+ * newAvg = (alpha * newValue) + ((1 - alpha) * currentAvg)
+ *
+ *
+ * {@code
+ * // Create EWMA with alpha=0.2 and initial average of 100
+ * ExponentiallyWeightedMovingAverage ewma = new ExponentiallyWeightedMovingAverage(0.2, 100.0);
+ *
+ * // Add new values
+ * ewma.addValue(110.0);
+ * ewma.addValue(105.0);
+ *
+ * // Get current average
+ * double average = ewma.getAverage();
+ * }
*/
public class ExponentiallyWeightedMovingAverage {
@@ -21,9 +39,13 @@ public class ExponentiallyWeightedMovingAverage {
private final AtomicLong averageBits;
/**
- * Create a new EWMA with a given {@code alpha} and {@code initialAvg}. A smaller alpha means
- * that new data points will have less weight, where a high alpha means older data points will
- * have a lower influence.
+ * Creates a new EWMA with the specified smoothing factor and initial average.
+ * A smaller alpha gives less weight to new data points (slower response to changes),
+ * while a higher alpha gives more weight to new data points (faster response to changes).
+ *
+ * @param alpha the smoothing factor, must be between 0 and 1 (inclusive)
+ * @param initialAvg the initial average value
+ * @throws IllegalArgumentException if alpha is not between 0 and 1
*/
public ExponentiallyWeightedMovingAverage(double alpha, double initialAvg) {
if (alpha < 0 || alpha > 1) {
@@ -33,10 +55,21 @@ public ExponentiallyWeightedMovingAverage(double alpha, double initialAvg) {
this.averageBits = new AtomicLong(Double.doubleToLongBits(initialAvg));
}
+ /**
+ * Returns the current exponentially weighted moving average.
+ *
+ * @return the current average value
+ */
public double getAverage() {
return Double.longBitsToDouble(this.averageBits.get());
}
+ /**
+ * Adds a new value to the moving average calculation. This method updates the average
+ * using a lock-free compare-and-set operation, making it thread-safe.
+ *
+ * @param newValue the new value to incorporate into the moving average
+ */
public void addValue(double newValue) {
boolean successful = false;
do {
diff --git a/server/src/main/java/org/elasticsearch/common/Numbers.java b/server/src/main/java/org/elasticsearch/common/Numbers.java
index 6e86604adad89..9e42c92a23e4e 100644
--- a/server/src/main/java/org/elasticsearch/common/Numbers.java
+++ b/server/src/main/java/org/elasticsearch/common/Numbers.java
@@ -16,7 +16,24 @@
import java.math.BigInteger;
/**
- * A set of utilities for numbers.
+ * A set of utilities for numbers. This class provides methods for converting between
+ * primitive numeric types and byte arrays, as well as performing exact numeric conversions
+ * with overflow checking.
+ *
+ * {@code
+ * // Convert primitives to byte arrays
+ * byte[] intBytes = Numbers.intToBytes(12345);
+ * byte[] longBytes = Numbers.longToBytes(123456789L);
+ *
+ * // Convert byte arrays to primitives
+ * int value = Numbers.bytesToInt(intBytes, 0);
+ * long longValue = Numbers.bytesToLong(longBytes, 0);
+ *
+ * // Exact conversions with overflow checking
+ * Number num = BigDecimal.valueOf(123.0);
+ * long exactValue = Numbers.toLongExact(num);
+ * }
*/
public final class Numbers {
private static final BigInteger MAX_LONG_VALUE = BigInteger.valueOf(Long.MAX_VALUE);
@@ -24,18 +41,45 @@ public final class Numbers {
private Numbers() {}
+ /**
+ * Converts a byte array to a short value using big-endian byte order.
+ *
+ * @param bytes the byte array to read from
+ * @param offset the offset in the array to start reading from
+ * @return the short value read from the byte array
+ */
public static short bytesToShort(byte[] bytes, int offset) {
return ByteUtils.readShortBE(bytes, offset);
}
+ /**
+ * Converts a byte array to an int value using big-endian byte order.
+ *
+ * @param bytes the byte array to read from
+ * @param offset the offset in the array to start reading from
+ * @return the int value read from the byte array
+ */
public static int bytesToInt(byte[] bytes, int offset) {
return ByteUtils.readIntBE(bytes, offset);
}
+ /**
+ * Converts a byte array to a long value using big-endian byte order.
+ *
+ * @param bytes the byte array to read from
+ * @param offset the offset in the array to start reading from
+ * @return the long value read from the byte array
+ */
public static long bytesToLong(byte[] bytes, int offset) {
return ByteUtils.readLongBE(bytes, offset);
}
+ /**
+ * Converts a {@link BytesRef} to a long value using big-endian byte order.
+ *
+ * @param bytes the BytesRef containing the bytes to convert
+ * @return the long value read from the BytesRef
+ */
public static long bytesToLong(BytesRef bytes) {
return bytesToLong(bytes.bytes, bytes.offset);
}
diff --git a/server/src/main/java/org/elasticsearch/common/Priority.java b/server/src/main/java/org/elasticsearch/common/Priority.java
index 0057b6ffa8f81..d67cda0926805 100644
--- a/server/src/main/java/org/elasticsearch/common/Priority.java
+++ b/server/src/main/java/org/elasticsearch/common/Priority.java
@@ -14,6 +14,32 @@
import java.io.IOException;
+/**
+ * Represents the priority levels for tasks and operations in Elasticsearch.
+ * Priority determines the order in which tasks are executed, with higher priority
+ * tasks running before lower priority ones.
+ *
+ * {@code
+ * // Set task priority
+ * Priority priority = Priority.HIGH;
+ *
+ * // Compare priorities
+ * if (priority.after(Priority.NORMAL)) {
+ * // This priority runs after NORMAL priority
+ * }
+ *
+ * // Serialize/deserialize
+ * StreamOutput out = ...;
+ * Priority.writeTo(Priority.URGENT, out);
+ *
+ * StreamInput in = ...;
+ * Priority p = Priority.readFrom(in);
+ * }
+ */
public enum Priority {
IMMEDIATE((byte) 0),
@@ -23,14 +49,35 @@ public enum Priority {
LOW((byte) 4),
LANGUID((byte) 5);
+ /**
+ * Reads a Priority value from the stream input.
+ *
+ * @param input the stream to read from
+ * @return the Priority read from the stream
+ * @throws IOException if an I/O error occurs
+ */
public static Priority readFrom(StreamInput input) throws IOException {
return fromByte(input.readByte());
}
+ /**
+ * Writes a Priority value to the stream output.
+ *
+ * @param priority the priority to write
+ * @param output the stream to write to
+ * @throws IOException if an I/O error occurs
+ */
public static void writeTo(Priority priority, StreamOutput output) throws IOException {
output.writeByte(priority.value);
}
+ /**
+ * Converts a byte value to its corresponding Priority.
+ *
+ * @param b the byte value (0-5)
+ * @return the Priority corresponding to the byte value
+ * @throws IllegalArgumentException if the byte value does not correspond to a valid Priority
+ */
public static Priority fromByte(byte b) {
return switch (b) {
case 0 -> IMMEDIATE;
diff --git a/server/src/main/java/org/elasticsearch/common/StopWatch.java b/server/src/main/java/org/elasticsearch/common/StopWatch.java
index 67e59f8a393bd..d426d822a465b 100644
--- a/server/src/main/java/org/elasticsearch/common/StopWatch.java
+++ b/server/src/main/java/org/elasticsearch/common/StopWatch.java
@@ -30,7 +30,22 @@
* This class is normally used to verify performance during proof-of-concepts
* and in development, rather than as part of production applications.
*
+ * {@code
+ * StopWatch stopWatch = new StopWatch("My Operations");
*
+ * stopWatch.start("Task 1");
+ * // ... perform task 1
+ * stopWatch.stop();
+ *
+ * stopWatch.start("Task 2");
+ * // ... perform task 2
+ * stopWatch.stop();
+ *
+ * System.out.println(stopWatch.prettyPrint());
+ * System.out.println("Total time: " + stopWatch.totalTime());
+ * System.out.println("Last task time: " + stopWatch.lastTaskTime());
+ * }
*/
public class StopWatch {
diff --git a/server/src/main/java/org/elasticsearch/common/TriConsumer.java b/server/src/main/java/org/elasticsearch/common/TriConsumer.java
index 7e9445cddcce0..7d41314561455 100644
--- a/server/src/main/java/org/elasticsearch/common/TriConsumer.java
+++ b/server/src/main/java/org/elasticsearch/common/TriConsumer.java
@@ -12,6 +12,21 @@
/**
* Represents an operation that accepts three arguments and returns no result.
+ * This is a three-arity specialization of {@link java.util.function.Consumer}.
+ * Unlike most other functional interfaces, {@code TriConsumer} is expected to operate via side-effects.
+ *
+ * {@code
+ * TriConsumer
*
* @param the type of the first argument
* @param {
/**
- * Applies this function to the given arguments.
+ * Performs this operation on the given arguments.
*
- * @param s the first function argument
- * @param t the second function argument
- * @param u the third function argument
+ * @param s the first input argument
+ * @param t the second input argument
+ * @param u the third input argument
*/
void apply(S s, T t, U u);
}
diff --git a/server/src/main/java/org/elasticsearch/common/TriFunction.java b/server/src/main/java/org/elasticsearch/common/TriFunction.java
index f833df5afe16c..67cfe37571efb 100644
--- a/server/src/main/java/org/elasticsearch/common/TriFunction.java
+++ b/server/src/main/java/org/elasticsearch/common/TriFunction.java
@@ -12,6 +12,18 @@
/**
* Represents a function that accepts three arguments and produces a result.
+ * This is a three-arity specialization of {@link java.util.function.Function}.
+ *
+ * {@code
+ * TriFunction
*
* @param the type of the first argument
* @param {
* @param s the first function argument
* @param t the second function argument
* @param u the third function argument
- * @return the result
+ * @return the result of applying this function
*/
R apply(S s, T t, U u);
}
diff --git a/server/src/main/java/org/elasticsearch/common/UUIDs.java b/server/src/main/java/org/elasticsearch/common/UUIDs.java
index 6b19fcddb87ca..a5957238e9db5 100644
--- a/server/src/main/java/org/elasticsearch/common/UUIDs.java
+++ b/server/src/main/java/org/elasticsearch/common/UUIDs.java
@@ -17,7 +17,28 @@
import java.util.function.Supplier;
/**
- * Utility class for generating various types of UUIDs.
+ * Utility class for generating various types of UUIDs. This class provides methods for generating
+ * both time-based and random UUIDs, with Base64 encoding for compact string representations.
+ *
+ * {@code
+ * // Generate a time-based UUID for use as a Lucene document ID
+ * String documentId = UUIDs.base64UUID();
+ *
+ * // Generate a random UUID
+ * String randomId = UUIDs.randomBase64UUID();
+ *
+ * // Generate a random UUID with a specific Random instance
+ * Random random = new Random();
+ * String customRandomId = UUIDs.randomBase64UUID(random);
+ *
+ * // Generate a secure random UUID as a SecureString
+ * SecureString secureId = UUIDs.randomBase64UUIDSecureString();
+ * }
*/
public class UUIDs {
private static final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt());
diff --git a/server/src/main/java/org/elasticsearch/common/ValidationException.java b/server/src/main/java/org/elasticsearch/common/ValidationException.java
index aad91dbac9b4a..718bcf6de7440 100644
--- a/server/src/main/java/org/elasticsearch/common/ValidationException.java
+++ b/server/src/main/java/org/elasticsearch/common/ValidationException.java
@@ -13,7 +13,29 @@
import java.util.List;
/**
- * Encapsulates an accumulation of validation errors
+ * Encapsulates an accumulation of validation errors. This exception allows multiple validation
+ * errors to be collected and reported together, making it easier to provide comprehensive
+ * feedback about what went wrong during validation.
+ *
+ * {@code
+ * ValidationException validationException = new ValidationException();
+ *
+ * if (name == null || name.isEmpty()) {
+ * validationException.addValidationError("name cannot be null or empty");
+ * }
+ * if (age < 0) {
+ * validationException.addValidationError("age must be positive");
+ * }
+ *
+ * // Throw if any validation errors were found
+ * validationException.throwIfValidationErrorsExist();
+ *
+ * // Or manually check
+ * if (!validationException.validationErrors().isEmpty()) {
+ * throw validationException;
+ * }
+ * }
*/
public class ValidationException extends IllegalArgumentException {
private final List{@code
+ * // Create from byte array
+ * BytesReference ref = new BytesArray(new byte[]{1, 2, 3, 4});
+ *
+ * // Create from XContentBuilder
+ * XContentBuilder builder = XContentFactory.jsonBuilder().startObject().field("key", "value").endObject();
+ * BytesReference jsonBytes = BytesReference.bytes(builder);
+ *
+ * // Access bytes
+ * byte b = ref.get(0);
+ * int length = ref.length();
+ *
+ * // Convert to byte array
+ * byte[] array = BytesReference.toBytes(ref);
+ *
+ * // Create stream input for reading
+ * StreamInput input = ref.streamInput();
+ * }
*/
public interface BytesReference extends Comparable{@code
+ * // Create a simple cache with maximum weight
+ * Cache
+ *
+ * @param {@code
+ * Cache
+ *
+ * @param {@code
+ * RemovalListener
+ *
+ * @param {@code
+ * // Create a single-element iterator
+ * Iterator
+ */
public class Iterators {
/**
- * Returns a single element iterator over the supplied value.
+ * Returns a single-element iterator over the supplied value.
+ * The returned iterator will produce exactly one element before being exhausted.
+ *
+ * @param element the single element to iterate over (must not be null)
+ * @param
+ *
+ * {@code
+ * Settings settings = Settings.builder()
+ * .put(Environment.PATH_HOME_SETTING.getKey(), "/opt/elasticsearch")
+ * .putList(Environment.PATH_DATA_SETTING.getKey(), "/data1", "/data2")
+ * .build();
+ * Environment env = new Environment(settings, configPath);
+ * Path[] dataDirs = env.dataDirs();
+ * }
*/
@SuppressForbidden(reason = "configures paths for the system")
// TODO: move PathUtils to be package-private here instead of
diff --git a/server/src/main/java/org/elasticsearch/env/ShardLock.java b/server/src/main/java/org/elasticsearch/env/ShardLock.java
index a522cf5176ca0..e7dca42a49a38 100644
--- a/server/src/main/java/org/elasticsearch/env/ShardLock.java
+++ b/server/src/main/java/org/elasticsearch/env/ShardLock.java
@@ -15,9 +15,24 @@
import java.util.concurrent.atomic.AtomicBoolean;
/**
- * A shard lock guarantees exclusive access to a shards data
- * directory. Internal processes should acquire a lock on a shard
- * before executing any write operations on the shards data directory.
+ * Provides exclusive access to a shard's data directory through a locking mechanism.
+ * {@code
+ * ShardId shardId = new ShardId("myindex", "_na_", 0);
+ * try (ShardLock lock = nodeEnvironment.shardLock(shardId, "operation description")) {
+ * // perform write operations on shard data directory
+ * modifyShardData(shardId);
+ * } // lock automatically released
+ * }
*
* @see NodeEnvironment
*/
@@ -26,17 +41,31 @@ public abstract class ShardLock implements Closeable {
private final ShardId shardId;
private final AtomicBoolean closed = new AtomicBoolean(false);
+ /**
+ * Constructs a shard lock for the specified shard ID.
+ *
+ * @param id the shard identifier for this lock
+ */
public ShardLock(ShardId id) {
this.shardId = id;
}
/**
- * Returns the locks shards Id.
+ * Returns the shard ID protected by this lock.
+ *
+ * @return the shard identifier
*/
public final ShardId getShardId() {
return shardId;
}
+ /**
+ * Releases the shard lock.
+ * {@code
+ * AbstractIndexComponent component = ...;
+ * Index index = component.index();
+ * String indexName = index.getName();
+ * String indexUuid = index.getUUID();
+ * }
+ */
public Index index() {
return indexSettings.getIndex();
}
+ /**
+ * Retrieves the index settings for this component.
+ *
+ * @return the {@link IndexSettings} containing all configuration and settings for the index
+ *
+ * {@code
+ * AbstractIndexComponent component = ...;
+ * IndexSettings settings = component.getIndexSettings();
+ * int numberOfShards = settings.getNumberOfShards();
+ * int numberOfReplicas = settings.getNumberOfReplicas();
+ * }
+ */
public IndexSettings getIndexSettings() {
return indexSettings;
}
diff --git a/server/src/main/java/org/elasticsearch/index/CloseUtils.java b/server/src/main/java/org/elasticsearch/index/CloseUtils.java
index 86698a0f4a295..9a92e4a4de76e 100644
--- a/server/src/main/java/org/elasticsearch/index/CloseUtils.java
+++ b/server/src/main/java/org/elasticsearch/index/CloseUtils.java
@@ -34,8 +34,20 @@ public synchronized Throwable fillInStackTrace() {
};
/**
- * Execute a naturally-async action (e.g. to close a shard) but using the current thread so that it completes synchronously, re-throwing
- * any exception that might be passed to its listener.
+ * Executes a naturally-async action synchronously on the current thread, blocking until completion.
+ * This method is useful for closing shards or performing other async operations that must complete
+ * before proceeding. Any exception passed to the listener is re-thrown.
+ *
+ * @param action the async action to execute, accepting an ActionListener and potentially throwing IOException
+ * @throws IOException if the action completes with an IOException
+ * @throws RuntimeException if the action completes with a RuntimeException
+ *
+ * {@code
+ * CloseUtils.executeDirectly(listener -> {
+ * shard.close(listener);
+ * });
+ * }
*/
public static void executeDirectly(CheckedConsumer{@code
+ * Index index = new Index("my-index", "abc123-def456");
+ * }
+ */
public Index(String name, String uuid) {
this.name = Objects.requireNonNull(name);
this.uuid = Objects.requireNonNull(uuid);
}
/**
- * Read from a stream.
+ * Constructs an Index by reading from a stream.
+ * Deserializes the index name and UUID from the provided input stream.
+ *
+ * @param in the stream to read from
+ * @throws IOException if an I/O error occurs while reading from the stream
*/
public Index(StreamInput in) throws IOException {
this.name = in.readString();
this.uuid = in.readString();
}
+ /**
+ * Retrieves the name of this index.
+ *
+ * @return the index name
+ *
+ * {@code
+ * Index index = new Index("my-index", "abc123");
+ * String name = index.getName(); // Returns "my-index"
+ * }
+ */
public String getName() {
return this.name;
}
+ /**
+ * Retrieves the unique identifier (UUID) of this index.
+ *
+ * @return the index UUID
+ *
+ * {@code
+ * Index index = new Index("my-index", "abc123");
+ * String uuid = index.getUUID(); // Returns "abc123"
+ * }
+ */
public String getUUID() {
return uuid;
}
@@ -95,12 +133,36 @@ public int hashCode() {
return result;
}
+ /**
+ * Serializes this index to the provided output stream.
+ * Writes the index name and UUID in order.
+ *
+ * @param out the output stream to write to
+ * @throws IOException if an I/O error occurs during serialization
+ */
@Override
public void writeTo(final StreamOutput out) throws IOException {
out.writeString(name);
out.writeString(uuid);
}
+ /**
+ * Converts this index to XContent format as a complete object.
+ * The output includes both the index name and UUID.
+ *
+ * @param builder the XContent builder to write to
+ * @param params additional parameters for the conversion (unused)
+ * @return the XContent builder for method chaining
+ * @throws IOException if an I/O error occurs during conversion
+ *
+ * {@code
+ * Index index = new Index("my-index", "abc123");
+ * XContentBuilder builder = XContentFactory.jsonBuilder();
+ * index.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ * // Result: {"index_name":"my-index","index_uuid":"abc123"}
+ * }
+ */
@Override
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
builder.startObject();
@@ -108,12 +170,42 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa
return builder.endObject();
}
+ /**
+ * Converts this index to XContent format as a fragment (without wrapping object).
+ * Useful when embedding index information within a larger XContent structure.
+ *
+ * @param builder the XContent builder to write to
+ * @return the XContent builder for method chaining
+ * @throws IOException if an I/O error occurs during conversion
+ *
+ * {@code
+ * Index index = new Index("my-index", "abc123");
+ * XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ * index.toXContentFragment(builder);
+ * builder.endObject();
+ * }
+ */
public XContentBuilder toXContentFragment(final XContentBuilder builder) throws IOException {
builder.field(INDEX_NAME_KEY, name);
builder.field(INDEX_UUID_KEY, uuid);
return builder;
}
+ /**
+ * Parses an Index from XContent format.
+ * Expects the XContent to contain both "index_name" and "index_uuid" fields.
+ *
+ * @param parser the XContent parser to read from
+ * @return the parsed Index object
+ * @throws IOException if an I/O error occurs or the content is malformed
+ *
+ * {@code
+ * XContentParser parser = ... // parser with content {"index_name":"my-index","index_uuid":"abc123"}
+ * Index index = Index.fromXContent(parser);
+ * }
+ */
public static Index fromXContent(final XContentParser parser) throws IOException {
return INDEX_PARSER.parse(parser, null).build();
}
diff --git a/server/src/main/java/org/elasticsearch/index/IndexFeatures.java b/server/src/main/java/org/elasticsearch/index/IndexFeatures.java
index 051e746af00ee..8da43c302c410 100644
--- a/server/src/main/java/org/elasticsearch/index/IndexFeatures.java
+++ b/server/src/main/java/org/elasticsearch/index/IndexFeatures.java
@@ -14,25 +14,52 @@
import java.util.Set;
+/**
+ * Defines the set of features supported by Elasticsearch indices.
+ * This class provides both production and test features for index functionality.
+ */
public class IndexFeatures implements FeatureSpecification {
+ /**
+ * Retrieves the set of production node features supported by indices.
+ *
+ * @return an empty set as all index features are currently test-only
+ */
@Override
public Set{@code
+ * IndexMode mode = IndexMode.TIME_SERIES;
+ * String name = mode.getName(); // Returns "time_series"
+ * }
+ */
public String getName() {
return name;
}
@@ -560,19 +571,43 @@ public String getName() {
*/
public abstract SourceFieldMapper.Mode defaultSourceMode();
+ /**
+ * Retrieves the default codec for this index mode.
+ *
+ * @return the codec name to use for indices in this mode
+ *
+ * {@code
+ * IndexMode mode = IndexMode.LOGSDB;
+ * String codec = mode.getDefaultCodec(); // Returns "best_compression"
+ * }
+ */
public String getDefaultCodec() {
return CodecService.DEFAULT_CODEC;
}
/**
- * Whether the default posting format (for inverted indices) from Lucene should be used.
+ * Determines whether the default posting format from Lucene should be used.
+ * By default, most index modes use custom postings formats.
+ *
+ * @return true if the default Lucene postings format should be used, false otherwise
*/
public boolean useDefaultPostingsFormat() {
return false;
}
/**
- * Parse a string into an {@link IndexMode}.
+ * Parses a string value into the corresponding IndexMode.
+ *
+ * @param value the string representation of the index mode (e.g., "standard", "time_series", "logsdb", "lookup")
+ * @return the corresponding IndexMode enum value
+ * @throws IllegalArgumentException if the value does not match any valid index mode
+ *
+ * {@code
+ * IndexMode mode = IndexMode.fromString("time_series");
+ * // mode is IndexMode.TIME_SERIES
+ * }
*/
public static IndexMode fromString(String value) {
return switch (value) {
@@ -590,6 +625,20 @@ public static IndexMode fromString(String value) {
};
}
+ /**
+ * Deserializes an IndexMode from a stream input.
+ *
+ * @param in the stream to read from
+ * @return the deserialized IndexMode
+ * @throws IOException if an I/O error occurs while reading
+ * @throws IllegalStateException if the read value does not correspond to a valid index mode
+ *
+ * {@code
+ * StreamInput in = ...;
+ * IndexMode mode = IndexMode.readFrom(in);
+ * }
+ */
public static IndexMode readFrom(StreamInput in) throws IOException {
int mode = in.readByte();
return switch (mode) {
@@ -601,6 +650,21 @@ public static IndexMode readFrom(StreamInput in) throws IOException {
};
}
+ /**
+ * Serializes an IndexMode to a stream output.
+ * Handles backwards compatibility by mapping LOOKUP mode to STANDARD for older transport versions.
+ *
+ * @param indexMode the IndexMode to serialize
+ * @param out the stream to write to
+ * @throws IOException if an I/O error occurs while writing
+ *
+ * {@code
+ * IndexMode mode = IndexMode.TIME_SERIES;
+ * StreamOutput out = ...;
+ * IndexMode.writeTo(mode, out);
+ * }
+ */
public static void writeTo(IndexMode indexMode, StreamOutput out) throws IOException {
final int code = switch (indexMode) {
case STANDARD -> 0;
diff --git a/server/src/main/java/org/elasticsearch/index/IndexNotFoundException.java b/server/src/main/java/org/elasticsearch/index/IndexNotFoundException.java
index 28dbeb8f827c3..33770c08c654f 100644
--- a/server/src/main/java/org/elasticsearch/index/IndexNotFoundException.java
+++ b/server/src/main/java/org/elasticsearch/index/IndexNotFoundException.java
@@ -14,9 +14,23 @@
import java.io.IOException;
+/**
+ * Exception thrown when a requested index cannot be found in the cluster.
+ * This exception is used to indicate that an operation failed because the specified index does not exist.
+ */
public final class IndexNotFoundException extends ResourceNotFoundException {
/**
- * Construct with a custom message.
+ * Constructs an IndexNotFoundException with a custom message and index name.
+ * The final message will be formatted as "no such index [indexName] and customMessage".
+ *
+ * @param message additional context message to append to the standard error message
+ * @param index the name of the index that was not found
+ *
+ * {@code
+ * throw new IndexNotFoundException("it was deleted", "my-index");
+ * // Message: "no such index [my-index] and it was deleted"
+ * }
*/
public IndexNotFoundException(String message, String index) {
super("no such index [" + index + "] and " + message);
@@ -24,34 +38,121 @@ public IndexNotFoundException(String message, String index) {
}
+ /**
+ * Constructs an IndexNotFoundException with a custom message and Index object.
+ * The final message will be formatted as "no such index [indexName] and customMessage".
+ *
+ * @param message additional context message to append to the standard error message
+ * @param index the Index object that was not found
+ *
+ * {@code
+ * Index index = new Index("my-index", "abc123");
+ * throw new IndexNotFoundException("it was deleted", index);
+ * }
+ */
public IndexNotFoundException(String message, Index index) {
super("no such index [" + index + "] and " + message);
setIndex(index);
}
+ /**
+ * Constructs an IndexNotFoundException with just an index name.
+ *
+ * @param index the name of the index that was not found
+ *
+ * {@code
+ * throw new IndexNotFoundException("my-index");
+ * // Message: "no such index [my-index]"
+ * }
+ */
public IndexNotFoundException(String index) {
this(index, (Throwable) null);
}
+ /**
+ * Constructs an IndexNotFoundException with an index name and a cause.
+ *
+ * @param index the name of the index that was not found
+ * @param cause the underlying cause of this exception (may be null)
+ *
+ * {@code
+ * try {
+ * // some operation
+ * } catch (IOException e) {
+ * throw new IndexNotFoundException("my-index", e);
+ * }
+ * }
+ */
public IndexNotFoundException(String index, Throwable cause) {
super("no such index [" + index + "]", cause);
setIndex(index);
}
+ /**
+ * Constructs an IndexNotFoundException for an index within a specific project.
+ * The message will indicate both the index and the project that were searched.
+ *
+ * @param index the Index object that was not found
+ * @param id the project identifier where the index was searched
+ *
+ * {@code
+ * Index index = new Index("my-index", "abc123");
+ * ProjectId projectId = new ProjectId("project-1");
+ * throw new IndexNotFoundException(index, projectId);
+ * // Message: "no such index [my-index] in project [project-1]"
+ * }
+ */
public IndexNotFoundException(Index index, ProjectId id) {
super("no such index [" + index.getName() + "] in project [" + id + "]");
setIndex(index);
}
+ /**
+ * Constructs an IndexNotFoundException with an Index object.
+ *
+ * @param index the Index object that was not found
+ *
+ * {@code
+ * Index index = new Index("my-index", "abc123");
+ * throw new IndexNotFoundException(index);
+ * }
+ */
public IndexNotFoundException(Index index) {
this(index, (Throwable) null);
}
+ /**
+ * Constructs an IndexNotFoundException with an Index object and a cause.
+ *
+ * @param index the Index object that was not found
+ * @param cause the underlying cause of this exception (may be null)
+ *
+ * {@code
+ * Index index = new Index("my-index", "abc123");
+ * try {
+ * // some operation
+ * } catch (IOException e) {
+ * throw new IndexNotFoundException(index, e);
+ * }
+ * }
+ */
public IndexNotFoundException(Index index, Throwable cause) {
super("no such index [" + index.getName() + "]", cause);
setIndex(index);
}
+ /**
+ * Deserializes an IndexNotFoundException from a stream input.
+ *
+ * @param in the stream to read from
+ * @throws IOException if an I/O error occurs while reading from the stream
+ */
public IndexNotFoundException(StreamInput in) throws IOException {
super(in);
}
diff --git a/server/src/main/java/org/elasticsearch/index/SlowLogLevel.java b/server/src/main/java/org/elasticsearch/index/SlowLogLevel.java
index cc830926d0058..35e266f66cad4 100644
--- a/server/src/main/java/org/elasticsearch/index/SlowLogLevel.java
+++ b/server/src/main/java/org/elasticsearch/index/SlowLogLevel.java
@@ -12,15 +12,23 @@
import java.util.Locale;
/**
- * Legacy enum class for index settings, kept for 7.x BWC compatibility. Do not use.
+ * Legacy enum class for slow log level settings in index operations.
+ * Kept for 7.x backwards compatibility. Do not use in new code.
+ *
+ * @deprecated This class is deprecated and will be removed in version 9.0.
+ * Use standard logging levels from log4j instead.
* TODO: Remove in 9.0
*/
@Deprecated
public enum SlowLogLevel {
- WARN(3), // most specific - little logging
+ /** Warning level - most specific, minimal logging */
+ WARN(3),
+ /** Info level - moderate logging */
INFO(2),
+ /** Debug level - detailed logging */
DEBUG(1),
- TRACE(0); // least specific - lots of logging
+ /** Trace level - least specific, maximum logging */
+ TRACE(0);
private final int specificity;
@@ -28,10 +36,37 @@ public enum SlowLogLevel {
this.specificity = specificity;
}
+ /**
+ * Parses a string into a SlowLogLevel enum value.
+ *
+ * @param level the string representation of the log level (case-insensitive)
+ * @return the corresponding SlowLogLevel
+ * @throws IllegalArgumentException if the level string doesn't match any enum value
+ *
+ * {@code
+ * SlowLogLevel level = SlowLogLevel.parse("warn");
+ * // level is SlowLogLevel.WARN
+ * }
+ */
public static SlowLogLevel parse(String level) {
return valueOf(level.toUpperCase(Locale.ROOT));
}
+ /**
+ * Determines if this log level is enabled for the given log level to be used.
+ * A level is enabled if its specificity is less than or equal to the level to be used.
+ *
+ * @param levelToBeUsed the log level to check against
+ * @return true if this level is enabled for the given level, false otherwise
+ *
+ * {@code
+ * // INFO tries to log with WARN level - should allow
+ * boolean enabled = SlowLogLevel.INFO.isLevelEnabledFor(SlowLogLevel.WARN);
+ * // returns true because INFO (2) <= WARN (3)
+ * }
+ */
boolean isLevelEnabledFor(SlowLogLevel levelToBeUsed) {
// example: this.info(2) tries to log with levelToBeUsed.warn(3) - should allow
return this.specificity <= levelToBeUsed.specificity;
diff --git a/server/src/main/java/org/elasticsearch/index/TimestampBounds.java b/server/src/main/java/org/elasticsearch/index/TimestampBounds.java
index ca07bb4b598e7..bdb1624a50f0f 100644
--- a/server/src/main/java/org/elasticsearch/index/TimestampBounds.java
+++ b/server/src/main/java/org/elasticsearch/index/TimestampBounds.java
@@ -11,12 +11,31 @@
import java.time.Instant;
/**
- * Bounds for the {@code @timestamp} field on this index.
+ * Represents the time bounds for the {@code @timestamp} field on an index.
+ * Used primarily for time-series indices to enforce temporal boundaries on documents.
*/
public class TimestampBounds {
/**
- * @return an updated instance based on current instance with a new end time.
+ * Creates a new TimestampBounds instance with an updated end time.
+ * The new end time must be greater than the current end time.
+ *
+ * @param current the current TimestampBounds instance
+ * @param newEndTime the new end time to set
+ * @return a new TimestampBounds instance with the updated end time
+ * @throws IllegalArgumentException if the new end time is not greater than the current end time
+ *
+ * {@code
+ * TimestampBounds current = new TimestampBounds(
+ * Instant.parse("2024-01-01T00:00:00Z"),
+ * Instant.parse("2024-02-01T00:00:00Z")
+ * );
+ * TimestampBounds updated = TimestampBounds.updateEndTime(
+ * current,
+ * Instant.parse("2024-03-01T00:00:00Z")
+ * );
+ * }
*/
public static TimestampBounds updateEndTime(TimestampBounds current, Instant newEndTime) {
long newEndTimeMillis = newEndTime.toEpochMilli();
@@ -31,6 +50,20 @@ public static TimestampBounds updateEndTime(TimestampBounds current, Instant new
private final long startTime;
private final long endTime;
+ /**
+ * Constructs a TimestampBounds with the specified start and end times.
+ *
+ * @param startTime the first valid timestamp for the index
+ * @param endTime the first invalid timestamp for the index (exclusive upper bound)
+ *
+ * {@code
+ * TimestampBounds bounds = new TimestampBounds(
+ * Instant.parse("2024-01-01T00:00:00Z"),
+ * Instant.parse("2024-02-01T00:00:00Z")
+ * );
+ * }
+ */
public TimestampBounds(Instant startTime, Instant endTime) {
this(startTime.toEpochMilli(), endTime.toEpochMilli());
}
@@ -41,14 +74,18 @@ private TimestampBounds(long startTime, long endTime) {
}
/**
- * The first valid {@code @timestamp} for the index.
+ * Retrieves the first valid {@code @timestamp} for the index in milliseconds since epoch.
+ *
+ * @return the start time in milliseconds since epoch (inclusive lower bound)
*/
public long startTime() {
return startTime;
}
/**
- * The first invalid {@code @timestamp} for the index.
+ * Retrieves the first invalid {@code @timestamp} for the index in milliseconds since epoch.
+ *
+ * @return the end time in milliseconds since epoch (exclusive upper bound)
*/
public long endTime() {
return endTime;
diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisMode.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisMode.java
index b81beb2184ec4..fb34dc8ae65e8 100644
--- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisMode.java
+++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisMode.java
@@ -56,17 +56,43 @@ public AnalysisMode merge(AnalysisMode other) {
this.readableName = name;
}
+ /**
+ * Retrieves the human-readable name of this analysis mode.
+ *
+ * @return the readable name (e.g., "index time", "search time", "all")
+ *
+ * {@code
+ * AnalysisMode mode = AnalysisMode.INDEX_TIME;
+ * String name = mode.getReadableName(); // Returns "index time"
+ * }
+ */
public String getReadableName() {
return this.readableName;
}
/**
- * Returns a mode that is compatible with both this mode and the other mode, that is:
+ * Merges this analysis mode with another mode, returning a mode compatible with both.
+ * The merge rules are:
*
- *
+ *
+ * @param other the analysis mode to merge with
+ * @return the merged analysis mode
+ * @throws IllegalStateException if attempting to merge incompatible modes (INDEX_TIME with SEARCH_TIME)
+ *
+ * {@code
+ * AnalysisMode merged = AnalysisMode.ALL.merge(AnalysisMode.INDEX_TIME);
+ * // merged is INDEX_TIME
+ *
+ * // This will throw IllegalStateException:
+ * AnalysisMode invalid = AnalysisMode.INDEX_TIME.merge(AnalysisMode.SEARCH_TIME);
+ * }
*/
public abstract AnalysisMode merge(AnalysisMode other);
}
diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java
index 757c22ca7d8b5..ee21138c4cd69 100644
--- a/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java
+++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerProvider.java
@@ -12,12 +12,53 @@
import org.apache.lucene.analysis.Analyzer;
import org.elasticsearch.injection.guice.Provider;
+/**
+ * Provides instances of Lucene {@link Analyzer} implementations for index analysis.
+ * This interface defines the contract for creating and managing analyzers used in text analysis operations.
+ *
+ * @param {@code
+ * AnalyzerProvider
+ */
String name();
+ /**
+ * Retrieves the scope of this analyzer, indicating whether it is index-specific or global.
+ *
+ * @return the {@link AnalyzerScope} defining the analyzer's visibility
+ *
+ * {@code
+ * AnalyzerProvider
+ */
AnalyzerScope scope();
+ /**
+ * Retrieves an instance of the analyzer.
+ * This method may return a new instance or a cached instance depending on the implementation.
+ *
+ * @return the Analyzer instance
+ *
+ * {@code
+ * AnalyzerProvider
+ */
@Override
T get();
}
diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerScope.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerScope.java
index 3b9dec6deee25..91bcc5d236db4 100644
--- a/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerScope.java
+++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalyzerScope.java
@@ -9,8 +9,40 @@
package org.elasticsearch.index.analysis;
+/**
+ * Defines the scope of an analyzer, indicating its visibility and lifecycle within Elasticsearch.
+ * Analyzers can be scoped to a single index, multiple indices, or globally across the cluster.
+ */
public enum AnalyzerScope {
+ /**
+ * Analyzer is scoped to a single specific index.
+ * The analyzer lifecycle is tied to the index and is not shared.
+ *
+ * {@code
+ * // Index-scoped analyzers are defined in index settings
+ * Settings settings = Settings.builder()
+ * .put("index.analysis.analyzer.my_analyzer.type", "standard")
+ * .build();
+ * }
+ */
INDEX,
+
+ /**
+ * Analyzer is scoped to multiple indices.
+ * The analyzer can be shared across different indices.
+ */
INDICES,
+
+ /**
+ * Analyzer is scoped globally across the entire Elasticsearch cluster.
+ * These are typically built-in analyzers like "standard" or "keyword".
+ *
+ * {@code
+ * // Global analyzers are available to all indices without configuration
+ * // Examples: "standard", "simple", "whitespace", "keyword"
+ * }
+ */
GLOBAL
}
diff --git a/server/src/main/java/org/elasticsearch/index/analysis/CharFilterFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/CharFilterFactory.java
index 1b9ca599fa9c6..a90e648c80ac8 100644
--- a/server/src/main/java/org/elasticsearch/index/analysis/CharFilterFactory.java
+++ b/server/src/main/java/org/elasticsearch/index/analysis/CharFilterFactory.java
@@ -11,12 +11,56 @@
import java.io.Reader;
+/**
+ * Factory interface for creating character filters in the analysis chain.
+ * Character filters process the input text before tokenization, performing operations
+ * such as HTML stripping, pattern replacement, or character mapping.
+ */
public interface CharFilterFactory {
+ /**
+ * Retrieves the name of this character filter factory.
+ *
+ * @return the character filter name
+ *
+ * {@code
+ * CharFilterFactory factory = ...;
+ * String name = factory.name(); // e.g., "html_strip"
+ * }
+ */
String name();
+ /**
+ * Creates a character filter that wraps the provided reader.
+ * This method is called during the analysis process to build the character filtering chain.
+ *
+ * @param reader the input reader to filter
+ * @return a new Reader that filters the input
+ *
+ * {@code
+ * CharFilterFactory factory = ...;
+ * Reader input = new StringReader("text");
+ * Reader filtered = factory.create(input);
+ * }
+ */
Reader create(Reader reader);
+ /**
+ * Normalizes a reader for use in multi-term queries.
+ * The default implementation returns the reader unchanged.
+ *
+ * @param reader the input reader to normalize
+ * @return a normalized Reader, by default the same reader
+ *
+ * {@code
+ * CharFilterFactory factory = ...;
+ * Reader input = new StringReader("text");
+ * Reader normalized = factory.normalize(input);
+ * }
+ */
default Reader normalize(Reader reader) {
return reader;
}
diff --git a/server/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java
index 0777009f72996..3842f1ed1a286 100644
--- a/server/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java
+++ b/server/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java
@@ -18,9 +18,39 @@
import java.util.List;
import java.util.function.Function;
+/**
+ * Factory interface for creating token filters in the analysis chain.
+ * Token filters process token streams produced by tokenizers, performing transformations
+ * such as lowercasing, stemming, synonym expansion, or stop word removal.
+ */
public interface TokenFilterFactory {
+ /**
+ * Retrieves the name of this token filter factory.
+ *
+ * @return the token filter name
+ *
+ * {@code
+ * TokenFilterFactory factory = ...;
+ * String name = factory.name(); // e.g., "lowercase"
+ * }
+ */
String name();
+ /**
+ * Creates a token filter that wraps the provided token stream.
+ * This method is called during the analysis process to build the analysis chain.
+ *
+ * @param tokenStream the input token stream to filter
+ * @return a new TokenStream that filters the input
+ *
+ * {@code
+ * TokenFilterFactory factory = ...;
+ * TokenStream input = tokenizer.create();
+ * TokenStream filtered = factory.create(input);
+ * }
+ */
TokenStream create(TokenStream tokenStream);
/**
diff --git a/server/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java
index ab492d65b57b0..87fd1fbb47250 100644
--- a/server/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java
+++ b/server/src/main/java/org/elasticsearch/index/analysis/TokenizerFactory.java
@@ -13,12 +13,56 @@
import java.util.function.Supplier;
+/**
+ * Factory interface for creating tokenizers in the analysis chain.
+ * Tokenizers break text into tokens and form the first stage of text analysis.
+ */
public interface TokenizerFactory {
+ /**
+ * Retrieves the name of this tokenizer factory.
+ *
+ * @return the tokenizer name
+ *
+ * {@code
+ * TokenizerFactory factory = ...;
+ * String name = factory.name(); // e.g., "standard"
+ * }
+ */
String name();
+ /**
+ * Creates a new tokenizer instance.
+ * Each call should return a new instance suitable for tokenizing a single document.
+ *
+ * @return a new Tokenizer instance
+ *
+ * {@code
+ * TokenizerFactory factory = ...;
+ * Tokenizer tokenizer = factory.create();
+ * tokenizer.setReader(new StringReader("text to tokenize"));
+ * }
+ */
Tokenizer create();
+ /**
+ * Creates a simple TokenizerFactory from a name and supplier.
+ * Useful for creating lightweight tokenizer factories without implementing the full interface.
+ *
+ * @param name the name of the tokenizer
+ * @param supplier a supplier that creates new Tokenizer instances
+ * @return a new TokenizerFactory implementation
+ *
+ * {@code
+ * TokenizerFactory factory = TokenizerFactory.newFactory(
+ * "custom",
+ * () -> new StandardTokenizer()
+ * );
+ * }
+ */
static TokenizerFactory newFactory(String name, Supplier{@code
+ * BulkOperationListener listener = new BulkOperationListener() {
+ * @Override
+ * public void afterBulk(long bulkShardSizeInBytes, long tookInNanos) {
+ * logger.info("Bulk operation completed: {} bytes in {} ns",
+ * bulkShardSizeInBytes, tookInNanos);
+ * }
+ * };
+ * }
*/
default void afterBulk(long bulkShardSizeInBytes, long tookInNanos) {}
}
diff --git a/server/src/main/java/org/elasticsearch/index/bulk/stats/BulkStats.java b/server/src/main/java/org/elasticsearch/index/bulk/stats/BulkStats.java
index ea7287460013d..1c2566021747e 100644
--- a/server/src/main/java/org/elasticsearch/index/bulk/stats/BulkStats.java
+++ b/server/src/main/java/org/elasticsearch/index/bulk/stats/BulkStats.java
@@ -32,10 +32,19 @@ public class BulkStats implements Writeable, ToXContentFragment {
private long avgTimeInMillis = 0;
private long avgSizeInBytes = 0;
+ /**
+ * Constructs a new BulkStats instance with all statistics initialized to zero.
+ */
public BulkStats() {
}
+ /**
+ * Deserializes a BulkStats instance from a stream input.
+ *
+ * @param in the stream to read from
+ * @throws IOException if an I/O error occurs while reading from the stream
+ */
public BulkStats(StreamInput in) throws IOException {
totalOperations = in.readVLong();
totalTimeInMillis = in.readVLong();
@@ -44,6 +53,20 @@ public BulkStats(StreamInput in) throws IOException {
avgSizeInBytes = in.readVLong();
}
+ /**
+ * Constructs a BulkStats instance with specified values.
+ *
+ * @param totalOperations the total number of bulk operations
+ * @param totalTimeInMillis the total time spent on bulk operations in milliseconds
+ * @param totalSizeInBytes the total size of bulk operations in bytes
+ * @param avgTimeInMillis the average time per bulk operation in milliseconds
+ * @param avgSizeInBytes the average size per bulk operation in bytes
+ *
+ * {@code
+ * BulkStats stats = new BulkStats(100, 5000, 1024000, 50, 10240);
+ * }
+ */
public BulkStats(long totalOperations, long totalTimeInMillis, long totalSizeInBytes, long avgTimeInMillis, long avgSizeInBytes) {
this.totalOperations = totalOperations;
this.totalTimeInMillis = totalTimeInMillis;
@@ -52,10 +75,28 @@ public BulkStats(long totalOperations, long totalTimeInMillis, long totalSizeInB
this.avgSizeInBytes = avgSizeInBytes;
}
+ /**
+ * Adds the statistics from another BulkStats instance to this one.
+ *
+ * @param bulkStats the BulkStats to add
+ *
+ * {@code
+ * BulkStats stats1 = new BulkStats(100, 5000, 1024000, 50, 10240);
+ * BulkStats stats2 = new BulkStats(50, 2500, 512000, 50, 10240);
+ * stats1.add(stats2); // Combines the statistics
+ * }
+ */
public void add(BulkStats bulkStats) {
addTotals(bulkStats);
}
+ /**
+ * Adds the total statistics from another BulkStats instance to this one.
+ * Recalculates averages based on the combined totals.
+ *
+ * @param bulkStats the BulkStats to add, or null (which is ignored)
+ */
public void addTotals(BulkStats bulkStats) {
if (bulkStats == null) {
return;
@@ -71,30 +112,65 @@ public void addTotals(BulkStats bulkStats) {
this.totalSizeInBytes += bulkStats.totalSizeInBytes;
}
+ /**
+ * Retrieves the total size of all bulk operations in bytes.
+ *
+ * @return the total size in bytes
+ */
public long getTotalSizeInBytes() {
return totalSizeInBytes;
}
+ /**
+ * Retrieves the total number of bulk operations.
+ *
+ * @return the total operation count
+ */
public long getTotalOperations() {
return totalOperations;
}
+ /**
+ * Retrieves the total time spent on bulk operations as a TimeValue.
+ *
+ * @return the total time as a TimeValue
+ */
public TimeValue getTotalTime() {
return new TimeValue(totalTimeInMillis);
}
+ /**
+ * Retrieves the average time per bulk operation as a TimeValue.
+ *
+ * @return the average time as a TimeValue
+ */
public TimeValue getAvgTime() {
return new TimeValue(avgTimeInMillis);
}
+ /**
+ * Retrieves the total time spent on bulk operations in milliseconds.
+ *
+ * @return the total time in milliseconds
+ */
public long getTotalTimeInMillis() {
return totalTimeInMillis;
}
+ /**
+ * Retrieves the average time per bulk operation in milliseconds.
+ *
+ * @return the average time in milliseconds
+ */
public long getAvgTimeInMillis() {
return avgTimeInMillis;
}
+ /**
+ * Retrieves the average size per bulk operation in bytes.
+ *
+ * @return the average size in bytes
+ */
public long getAvgSizeInBytes() {
return avgSizeInBytes;
}
diff --git a/server/src/main/java/org/elasticsearch/index/bulk/stats/ShardBulkStats.java b/server/src/main/java/org/elasticsearch/index/bulk/stats/ShardBulkStats.java
index ad05e09dca021..e152ab08aa69f 100644
--- a/server/src/main/java/org/elasticsearch/index/bulk/stats/ShardBulkStats.java
+++ b/server/src/main/java/org/elasticsearch/index/bulk/stats/ShardBulkStats.java
@@ -17,18 +17,41 @@
import java.util.concurrent.TimeUnit;
/**
- * Internal class that maintains relevant shard bulk statistics / metrics.
+ * Internal class that maintains relevant shard bulk statistics and metrics.
+ * Tracks bulk operation counts, timings, and sizes using exponentially weighted moving averages.
+ *
* @see IndexShard
*/
public class ShardBulkStats implements BulkOperationListener {
private final StatsHolder totalStats = new StatsHolder();
+ /** Alpha value for exponentially weighted moving average calculation */
private static final double ALPHA = 0.1;
+ /**
+ * Retrieves a snapshot of the current bulk statistics.
+ *
+ * @return a BulkStats object containing aggregated statistics
+ *
+ * {@code
+ * ShardBulkStats shardStats = new ShardBulkStats();
+ * // ... bulk operations occur ...
+ * BulkStats stats = shardStats.stats();
+ * long totalOps = stats.getTotalOperations();
+ * }
+ */
public BulkStats stats() {
return totalStats.stats();
}
+ /**
+ * Called after a bulk operation completes.
+ * Updates all metrics including counts, sizes, and moving averages.
+ *
+ * @param shardBulkSizeInBytes the size of the bulk operation in bytes
+ * @param tookInNanos the time taken for the bulk operation in nanoseconds
+ */
@Override
public void afterBulk(long shardBulkSizeInBytes, long tookInNanos) {
totalStats.totalSizeInBytes.inc(shardBulkSizeInBytes);
diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java
index 31429dd91d9a9..4b9f4fce91419 100644
--- a/server/src/main/java/org/elasticsearch/search/SearchHit.java
+++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java
@@ -114,14 +114,48 @@ public final class SearchHit implements Writeable, ToXContentObject, RefCounted
private final RefCounted refCounted;
+ /**
+ * Constructs a new SearchHit with the specified document ID.
+ *
+ * @param docId the Lucene document ID
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123);
+ * }
+ */
public SearchHit(int docId) {
this(docId, null);
}
+ /**
+ * Constructs a new SearchHit with the specified document ID and document identifier.
+ *
+ * @param docId the Lucene document ID
+ * @param id the document identifier (may be null)
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123, "doc-id-456");
+ * }
+ */
public SearchHit(int docId, String id) {
this(docId, id, null);
}
+ /**
+ * Constructs a new SearchHit with the specified document ID, identifier, and nested identity.
+ *
+ * @param nestedTopDocId the Lucene document ID of the nested top document
+ * @param id the document identifier (may be null)
+ * @param nestedIdentity the nested document identity information (may be null)
+ *
+ * {@code
+ * NestedIdentity identity = new NestedIdentity("nested_field", 0, null);
+ * SearchHit hit = new SearchHit(123, "doc-id-456", identity);
+ * }
+ */
public SearchHit(int nestedTopDocId, String id, NestedIdentity nestedIdentity) {
this(nestedTopDocId, id, nestedIdentity, null);
}
@@ -195,6 +229,20 @@ public SearchHit(
this.refCounted = refCounted == null ? LeakTracker.wrap(new SimpleRefCounted()) : refCounted;
}
+ /**
+ * Reads a SearchHit from the provided stream input.
+ *
+ * @param in the stream input to read from
+ * @param pooled whether to use pooled (ref-counted) instances for memory efficiency
+ * @return the SearchHit instance read from the stream
+ * @throws IOException if an I/O error occurs during deserialization
+ *
+ * {@code
+ * StreamInput in = ...;
+ * SearchHit hit = SearchHit.readFrom(in, true);
+ * }
+ */
public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOException {
final float score = in.readFloat();
final int rank;
@@ -293,14 +341,57 @@ public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOExcept
);
}
+ /**
+ * Creates an unpooled SearchHit that doesn't require manual reference counting.
+ * Unpooled hits are not automatically deallocated and are suitable for long-lived objects.
+ *
+ * @param docId the Lucene document ID
+ * @return an unpooled SearchHit instance
+ *
+ * {@code
+ * SearchHit hit = SearchHit.unpooled(123);
+ * // No need to call decRef() on unpooled hits
+ * }
+ */
public static SearchHit unpooled(int docId) {
return unpooled(docId, null);
}
+ /**
+ * Creates an unpooled SearchHit with the specified document ID and identifier.
+ * Unpooled hits are not automatically deallocated and are suitable for long-lived objects.
+ *
+ * @param docId the Lucene document ID
+ * @param id the document identifier (may be null)
+ * @return an unpooled SearchHit instance
+ *
+ * {@code
+ * SearchHit hit = SearchHit.unpooled(123, "doc-id-456");
+ * // No need to call decRef() on unpooled hits
+ * }
+ */
public static SearchHit unpooled(int docId, String id) {
return unpooled(docId, id, null);
}
+ /**
+ * Creates an unpooled SearchHit with full specification of document identity.
+ * Unpooled hits are not automatically deallocated and are suitable for long-lived objects.
+ *
+ * @param nestedTopDocId the Lucene document ID of the nested top document
+ * @param id the document identifier (may be null)
+ * @param nestedIdentity the nested document identity information (may be null)
+ * @return an unpooled SearchHit instance
+ *
+ * {@code
+ * NestedIdentity identity = new NestedIdentity("nested_field", 0, null);
+ * SearchHit hit = SearchHit.unpooled(123, "doc-id-456", identity);
+ * // No need to call decRef() on unpooled hits
+ * }
+ */
public static SearchHit unpooled(int nestedTopDocId, String id, NestedIdentity nestedIdentity) {
// always referenced search hits do NOT call #deallocate
return new SearchHit(nestedTopDocId, id, nestedIdentity, ALWAYS_REFERENCED);
@@ -359,44 +450,127 @@ public void writeTo(StreamOutput out) throws IOException {
}
}
+ /**
+ * Returns the Lucene document ID for this search hit.
+ *
+ * @return the document ID
+ */
public int docId() {
return this.docId;
}
+ /**
+ * Sets the score for this search hit.
+ *
+ * @param score the relevance score to set
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123);
+ * hit.score(0.85f);
+ * }
+ */
public void score(float score) {
this.score = score;
}
/**
- * The score.
+ * Returns the relevance score of this search hit.
+ * Returns {@link Float#NaN} if scoring was disabled for the query.
+ *
+ * @return the score, or {@link Float#NaN} if not scored
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * float score = hit.getScore();
+ * if (!Float.isNaN(score)) {
+ * // Process the score
+ * }
+ * }
*/
public float getScore() {
return this.score;
}
+ /**
+ * Sets the rank position of this hit in the search results.
+ *
+ * @param rank the rank position (0-based)
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123);
+ * hit.setRank(5); // 6th result
+ * }
+ */
public void setRank(int rank) {
this.rank = rank;
}
+ /**
+ * Returns the rank position of this hit in the search results.
+ * Returns {@link #NO_RANK} (-1) if ranking was not applied.
+ *
+ * @return the rank position, or {@link #NO_RANK} if not ranked
+ */
public int getRank() {
return this.rank;
}
+ /**
+ * Sets the document version for this search hit.
+ *
+ * @param version the document version
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123);
+ * hit.version(3L); // Document is at version 3
+ * }
+ */
public void version(long version) {
this.version = version;
}
/**
- * The version of the hit.
+ * Returns the document version of this hit.
+ * Returns -1 if version was not requested or not available.
+ *
+ * @return the version, or -1 if not available
*/
public long getVersion() {
return this.version;
}
+ /**
+ * Sets the sequence number for this document.
+ * The sequence number is used for optimistic concurrency control.
+ *
+ * @param seqNo the sequence number
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123);
+ * hit.setSeqNo(42L);
+ * }
+ */
public void setSeqNo(long seqNo) {
this.seqNo = seqNo;
}
+ /**
+ * Sets the primary term for this document.
+ * The primary term is used for optimistic concurrency control.
+ *
+ * @param primaryTerm the primary term
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123);
+ * hit.setPrimaryTerm(1L);
+ * }
+ */
public void setPrimaryTerm(long primaryTerm) {
this.primaryTerm = primaryTerm;
}
@@ -417,28 +591,71 @@ public long getPrimaryTerm() {
}
/**
- * The index of the hit.
+ * Returns the name of the index this hit belongs to.
+ *
+ * @return the index name, or null if not set
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * String indexName = hit.getIndex();
+ * System.out.println("Hit from index: " + indexName);
+ * }
*/
public String getIndex() {
return this.index;
}
/**
- * The id of the document.
+ * Returns the unique identifier of the document.
+ *
+ * @return the document ID, or null if not set
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * String docId = hit.getId();
+ * System.out.println("Document ID: " + docId);
+ * }
*/
public String getId() {
return id != null ? id.string() : null;
}
/**
- * If this is a nested hit then nested reference information is returned otherwise null is returned.
+ * Returns the nested document identity information if this is a nested hit.
+ *
+ * @return the nested identity, or null if this is not a nested document
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * NestedIdentity nested = hit.getNestedIdentity();
+ * if (nested != null) {
+ * String field = nested.getField().string();
+ * int offset = nested.getOffset();
+ * }
+ * }
*/
public NestedIdentity getNestedIdentity() {
return nestedIdentity;
}
/**
- * Returns bytes reference, also uncompress the source if needed.
+ * Returns the source document as a bytes reference, decompressing if necessary.
+ * The source is the original JSON document that was indexed.
+ *
+ * @return the source bytes reference, or null if source is not available
+ * @throws ElasticsearchParseException if decompression fails
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * BytesReference sourceRef = hit.getSourceRef();
+ * if (sourceRef != null) {
+ * // Process source bytes
+ * }
+ * }
*/
public BytesReference getSourceRef() {
assert hasReferences();
@@ -455,7 +672,17 @@ public BytesReference getSourceRef() {
}
/**
- * Sets representation, might be compressed....
+ * Sets the source document for this hit. The source may be compressed.
+ *
+ * @param source the source bytes reference (may be compressed)
+ * @return this SearchHit instance for method chaining
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123);
+ * BytesReference source = ...;
+ * hit.sourceRef(source);
+ * }
*/
public SearchHit sourceRef(BytesReference source) {
this.source = source;
@@ -463,8 +690,18 @@ public SearchHit sourceRef(BytesReference source) {
}
/**
- * Is the source available or not. A source with no fields will return true. This will return false if {@code fields} doesn't contain
- * {@code _source} or if source is disabled in the mapping.
+ * Checks whether the source is available for this hit.
+ * Returns false if the _source field was not requested or if source is disabled in the mapping.
+ *
+ * @return true if source is available, false otherwise
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * if (hit.hasSource()) {
+ * Map
*/
public boolean hasSource() {
assert hasReferences();
@@ -472,7 +709,17 @@ public boolean hasSource() {
}
/**
- * The source of the document as string (can be {@code null}).
+ * Returns the source document as a JSON string.
+ *
+ * @return the source as a JSON string, or null if source is not available
+ * @throws ElasticsearchParseException if conversion to JSON fails
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * String jsonSource = hit.getSourceAsString();
+ * System.out.println("Source: " + jsonSource);
+ * }
*/
public String getSourceAsString() {
assert hasReferences();
@@ -487,9 +734,21 @@ public String getSourceAsString() {
}
/**
- * The source of the document as a map (can be {@code null}). This method is expected
- * to be called at most once during the lifetime of the object as the generated map
- * is expensive to generate and it does not get cache.
+ * Returns the source document as a Map.
+ * {@code
+ * SearchHit hit = ...;
+ * Map
*/
public Map{@code
+ * SearchHit hit = ...;
+ * DocumentField field = hit.field("my_field");
+ * if (field != null) {
+ * List
*/
public DocumentField field(String fieldName) {
assert hasReferences();
@@ -514,25 +786,71 @@ public DocumentField field(String fieldName) {
}
}
- /*
- * Adds a new DocumentField to the map in case both parameters are not null.
- * */
+ /**
+ * Adds a DocumentField to this hit.
+ * Does nothing if the field parameter is null.
+ *
+ * @param field the DocumentField to add
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123);
+ * DocumentField field = new DocumentField("my_field", List.of("value1", "value2"));
+ * hit.setDocumentField(field);
+ * }
+ */
public void setDocumentField(DocumentField field) {
if (field == null) return;
this.documentFields.put(field.getName(), field);
}
+ /**
+ * Adds multiple document fields and metadata fields to this hit.
+ *
+ * @param docFields the document fields to add
+ * @param metaFields the metadata fields to add
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123);
+ * Map
+ */
public void addDocumentFields(Map{@code
+ * SearchHit hit = ...;
+ * DocumentField removed = hit.removeDocumentField("my_field");
+ * }
+ */
public DocumentField removeDocumentField(String field) {
return documentFields.remove(field);
}
/**
- * @return a map of metadata fields for this hit
+ * Returns an unmodifiable map of metadata fields for this hit.
+ * Metadata fields include system fields like _index, _id, _version, etc.
+ *
+ * @return an unmodifiable map of metadata fields
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * Map
*/
public Map{@code
+ * SearchHit hit = ...;
+ * Map
*/
public Map{@code
+ * SearchHit hit = ...;
+ * Map
*/
public Map{@code
+ * SearchHit hit = ...;
+ * if (hit.hasLookupFields()) {
+ * // Resolve lookup fields
+ * Map
*/
public boolean hasLookupFields() {
return getDocumentFields().values().stream().anyMatch(doc -> doc.getLookupFields().isEmpty() == false);
}
/**
- * Resolve the lookup fields with the given results and merge them as regular fetch fields.
+ * Resolves lookup fields with the given results and merges them as regular fetch fields.
+ * Lookup fields are replaced with their resolved values.
+ *
+ * @param lookupResults a map of lookup fields to their resolved values
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * Map
*/
public void resolveLookupFields(Map{@code
+ * SearchHit hit = ...;
+ * Map
*/
public Map{@code
+ * SearchHit hit = new SearchHit(123);
+ * Map
+ */
public void highlightFields(Map{@code
+ * SearchHit hit = new SearchHit(123);
+ * Object[] values = new Object[]{100, "text"};
+ * DocValueFormat[] formats = new DocValueFormat[]{DocValueFormat.RAW, DocValueFormat.RAW};
+ * hit.sortValues(values, formats);
+ * }
+ */
public void sortValues(Object[] sortValues, DocValueFormat[] sortValueFormats) {
sortValues(new SearchSortValues(sortValues, sortValueFormats));
}
+ /**
+ * Sets the sort values for this hit.
+ *
+ * @param sortValues the SearchSortValues instance containing sort information
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123);
+ * SearchSortValues sortValues = ...;
+ * hit.sortValues(sortValues);
+ * }
+ */
public void sortValues(SearchSortValues sortValues) {
this.sortValues = sortValues;
}
/**
- * An array of the (formatted) sort values used.
+ * Returns an array of the formatted sort values used to sort this hit.
+ * These are the human-readable versions of the sort values.
+ *
+ * @return an array of formatted sort values
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * Object[] sortValues = hit.getSortValues();
+ * for (Object value : sortValues) {
+ * System.out.println("Sort value: " + value);
+ * }
+ * }
*/
public Object[] getSortValues() {
return sortValues.getFormattedSortValues();
}
/**
- * An array of the (raw) sort values used.
+ * Returns an array of the raw (unformatted) sort values used to sort this hit.
+ * These are the internal representation of sort values.
+ *
+ * @return an array of raw sort values
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * Object[] rawSortValues = hit.getRawSortValues();
+ * }
*/
public Object[] getRawSortValues() {
return sortValues.getRawSortValues();
}
/**
- * If enabled, the explanation of the search hit.
+ * Returns the explanation of why this document matched the query, if requested.
+ * The explanation provides detailed information about the scoring process.
+ *
+ * @return the Explanation object, or null if explanation was not requested
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * Explanation explanation = hit.getExplanation();
+ * if (explanation != null) {
+ * System.out.println("Score explanation: " + explanation.getDescription());
+ * System.out.println("Score value: " + explanation.getValue());
+ * }
+ * }
*/
public Explanation getExplanation() {
return explanation;
}
+ /**
+ * Sets the explanation for this hit.
+ *
+ * @param explanation the Lucene Explanation object
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123);
+ * Explanation explanation = ...;
+ * hit.explanation(explanation);
+ * }
+ */
public void explanation(Explanation explanation) {
this.explanation = explanation;
}
/**
- * The shard of the search hit.
+ * Returns the shard target information for this search hit.
+ * This indicates which shard and node the hit came from.
+ *
+ * @return the SearchShardTarget, or null if not set
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * SearchShardTarget shard = hit.getShard();
+ * if (shard != null) {
+ * System.out.println("Shard: " + shard.getShardId());
+ * System.out.println("Node: " + shard.getNodeId());
+ * }
+ * }
*/
public SearchShardTarget getShard() {
return shard;
}
+ /**
+ * Sets the shard target information for this hit and all its inner hits.
+ *
+ * @param target the SearchShardTarget to set
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123);
+ * SearchShardTarget target = new SearchShardTarget("node1", shardId, null);
+ * hit.shard(target);
+ * }
+ */
public void shard(SearchShardTarget target) {
if (innerHits != null) {
for (SearchHits innerHits : innerHits.values()) {
@@ -669,45 +1156,133 @@ public void shard(SearchShardTarget target) {
}
/**
- * Returns the cluster alias this hit comes from or null if it comes from a local cluster
+ * Returns the cluster alias this hit comes from, or null if it comes from a local cluster.
+ * Used in cross-cluster search scenarios.
+ *
+ * @return the cluster alias, or null if from local cluster
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * String cluster = hit.getClusterAlias();
+ * if (cluster != null) {
+ * System.out.println("Hit from remote cluster: " + cluster);
+ * }
+ * }
*/
public String getClusterAlias() {
return clusterAlias;
}
+ /**
+ * Sets the map of matched named queries and their scores.
+ *
+ * @param matchedQueries the map of query names to scores
+ *
+ * {@code
+ * SearchHit hit = new SearchHit(123);
+ * Map
+ */
public void matchedQueries(Map{@code
+ * SearchHit hit = ...;
+ * String[] matchedQueries = hit.getMatchedQueries();
+ * for (String queryName : matchedQueries) {
+ * System.out.println("Matched query: " + queryName);
+ * }
+ * }
*/
public String[] getMatchedQueries() {
return matchedQueries == null ? new String[0] : matchedQueries.keySet().toArray(new String[0]);
}
/**
- * @return The score of the provided named query if it matches, {@code null} otherwise.
+ * Returns the score of a specific named query if it matched this document.
+ *
+ * @param name the name of the query
+ * @return the score of the named query, or null if the query didn't match
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * Float score = hit.getMatchedQueryScore("my_query");
+ * if (score != null) {
+ * System.out.println("Query score: " + score);
+ * }
+ * }
*/
public Float getMatchedQueryScore(String name) {
return getMatchedQueriesAndScores().get(name);
}
/**
- * @return The map of the named queries that matched and their associated score.
+ * Returns a map of all matched named queries and their associated scores.
+ *
+ * @return a map of query names to scores, or an empty map if no queries matched
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * Map
*/
public Mapnull if there are none
+ * Returns the inner hits (nested or parent-child) associated with this hit.
+ * Inner hits allow retrieving nested documents or related documents.
+ *
+ * @return a map of inner hit names to SearchHits, or null if there are no inner hits
+ *
+ * {@code
+ * SearchHit hit = ...;
+ * Map
*/
public Map{@code
+ * SearchHit hit = new SearchHit(123);
+ * Map
+ */
public void setInnerHits(Map{@code
+ * TotalHits totalHits = new TotalHits(100, TotalHits.Relation.EQUAL_TO);
+ * SearchHits emptyHits = SearchHits.empty(totalHits, 1.5f);
+ * }
+ */
public static SearchHits empty(@Nullable TotalHits totalHits, float maxScore) {
return new SearchHits(EMPTY, totalHits, maxScore);
}
+ /**
+ * Constructs a new SearchHits with the provided hits, total hits information, and maximum score.
+ *
+ * @param hits the array of SearchHit instances
+ * @param totalHits the total number of hits (may be null if tracking is disabled)
+ * @param maxScore the maximum score across all hits
+ *
+ * {@code
+ * SearchHit[] hits = new SearchHit[]{hit1, hit2, hit3};
+ * TotalHits totalHits = new TotalHits(100, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
+ * SearchHits searchHits = new SearchHits(hits, totalHits, 2.5f);
+ * }
+ */
public SearchHits(SearchHit[] hits, @Nullable TotalHits totalHits, float maxScore) {
this(hits, totalHits, maxScore, null, null, null);
}
@@ -95,6 +122,23 @@ private SearchHits(
this.refCounted = refCounted;
}
+ /**
+ * Creates an unpooled SearchHits instance that doesn't require manual reference counting.
+ * Unpooled instances are not automatically deallocated and are suitable for long-lived objects.
+ *
+ * @param hits the array of SearchHit instances
+ * @param totalHits the total number of hits (may be null if tracking is disabled)
+ * @param maxScore the maximum score across all hits
+ * @return an unpooled SearchHits instance
+ *
+ * {@code
+ * SearchHit[] hits = new SearchHit[]{hit1, hit2, hit3};
+ * TotalHits totalHits = new TotalHits(100, TotalHits.Relation.EQUAL_TO);
+ * SearchHits searchHits = SearchHits.unpooled(hits, totalHits, 2.5f);
+ * // No need to call decRef() on unpooled hits
+ * }
+ */
public static SearchHits unpooled(SearchHit[] hits, @Nullable TotalHits totalHits, float maxScore) {
return unpooled(hits, totalHits, maxScore, null, null, null);
}
@@ -118,6 +162,20 @@ private static boolean assertUnpooled(SearchHit[] searchHits) {
return true;
}
+ /**
+ * Reads SearchHits from the provided stream input.
+ *
+ * @param in the stream input to read from
+ * @param pooled whether to use pooled (ref-counted) instances for memory efficiency
+ * @return the SearchHits instance read from the stream
+ * @throws IOException if an I/O error occurs during deserialization
+ *
+ * {@code
+ * StreamInput in = ...;
+ * SearchHits hits = SearchHits.readFrom(in, true);
+ * }
+ */
public static SearchHits readFrom(StreamInput in, boolean pooled) throws IOException {
final TotalHits totalHits;
if (in.readBoolean()) {
@@ -150,6 +208,24 @@ public static SearchHits readFrom(StreamInput in, boolean pooled) throws IOExcep
}
}
+ /**
+ * Checks whether this SearchHits instance is pooled (uses reference counting).
+ * Pooled instances require manual reference counting via incRef()/decRef().
+ *
+ * @return true if this instance is pooled, false otherwise
+ *
+ * {@code
+ * SearchHits hits = ...;
+ * if (hits.isPooled()) {
+ * try {
+ * // Process hits
+ * } finally {
+ * hits.decRef();
+ * }
+ * }
+ * }
+ */
public boolean isPooled() {
return refCounted != ALWAYS_REFERENCED;
}
@@ -170,8 +246,21 @@ public void writeTo(StreamOutput out) throws IOException {
}
/**
- * The total number of hits for the query or null if the tracking of total hits
- * is disabled in the request.
+ * Returns the total number of hits for the query.
+ * Returns null if total hits tracking is disabled in the request.
+ *
+ * @return the TotalHits object containing the total count and relation, or null if not tracked
+ *
+ * {@code
+ * SearchHits hits = ...;
+ * TotalHits totalHits = hits.getTotalHits();
+ * if (totalHits != null) {
+ * long count = totalHits.value();
+ * TotalHits.Relation relation = totalHits.relation();
+ * System.out.println("Total hits: " + count + " (" + relation + ")");
+ * }
+ * }
*/
@Nullable
public TotalHits getTotalHits() {
@@ -179,14 +268,38 @@ public TotalHits getTotalHits() {
}
/**
- * The maximum score of this query.
+ * Returns the maximum score across all hits in this result set.
+ * Returns {@link Float#NaN} if scoring was disabled.
+ *
+ * @return the maximum score, or {@link Float#NaN} if not scored
+ *
+ * {@code
+ * SearchHits hits = ...;
+ * float maxScore = hits.getMaxScore();
+ * if (!Float.isNaN(maxScore)) {
+ * System.out.println("Max score: " + maxScore);
+ * }
+ * }
*/
public float getMaxScore() {
return maxScore;
}
/**
- * The hits of the search request (based on the search type, and from / size provided).
+ * Returns the array of search hits for this result set.
+ * The hits returned are based on the search type and from/size parameters.
+ *
+ * @return an array of SearchHit instances
+ *
+ * {@code
+ * SearchHits searchHits = ...;
+ * SearchHit[] hits = searchHits.getHits();
+ * for (SearchHit hit : hits) {
+ * System.out.println("ID: " + hit.getId() + ", Score: " + hit.getScore());
+ * }
+ * }
*/
public SearchHit[] getHits() {
assert hasReferences();
@@ -194,7 +307,18 @@ public SearchHit[] getHits() {
}
/**
- * Return the hit as the provided position.
+ * Returns the hit at the specified position in the hits array.
+ *
+ * @param position the zero-based index of the hit to retrieve
+ * @return the SearchHit at the specified position
+ * @throws ArrayIndexOutOfBoundsException if the position is out of bounds
+ *
+ * {@code
+ * SearchHits hits = ...;
+ * SearchHit firstHit = hits.getAt(0);
+ * SearchHit secondHit = hits.getAt(1);
+ * }
*/
public SearchHit getAt(int position) {
assert hasReferences();
@@ -202,8 +326,22 @@ public SearchHit getAt(int position) {
}
/**
- * In case documents were sorted by field(s), returns information about such field(s), null otherwise
+ * Returns information about the fields used for sorting, if any.
+ * Returns null if the results were not sorted by field.
+ *
+ * @return an array of SortField instances, or null if not sorted by field
* @see SortField
+ *
+ * {@code
+ * SearchHits hits = ...;
+ * SortField[] sortFields = hits.getSortFields();
+ * if (sortFields != null) {
+ * for (SortField field : sortFields) {
+ * System.out.println("Sorted by: " + field.getField());
+ * }
+ * }
+ * }
*/
@Nullable
public SortField[] getSortFields() {
@@ -211,7 +349,19 @@ public SortField[] getSortFields() {
}
/**
- * In case field collapsing was performed, returns the field used for field collapsing, null otherwise
+ * Returns the field name used for field collapsing, if any.
+ * Returns null if field collapsing was not performed.
+ *
+ * @return the collapse field name, or null if no collapsing was performed
+ *
+ * {@code
+ * SearchHits hits = ...;
+ * String collapseField = hits.getCollapseField();
+ * if (collapseField != null) {
+ * System.out.println("Collapsed by field: " + collapseField);
+ * }
+ * }
*/
@Nullable
public String getCollapseField() {
@@ -219,7 +369,21 @@ public String getCollapseField() {
}
/**
- * In case field collapsing was performed, returns the values of the field that field collapsing was performed on, null otherwise
+ * Returns the values of the collapse field for the collapsed results.
+ * Returns null if field collapsing was not performed.
+ *
+ * @return an array of collapse field values, or null if no collapsing was performed
+ *
+ * {@code
+ * SearchHits hits = ...;
+ * Object[] collapseValues = hits.getCollapseValues();
+ * if (collapseValues != null) {
+ * for (Object value : collapseValues) {
+ * System.out.println("Collapse value: " + value);
+ * }
+ * }
+ * }
*/
@Nullable
public Object[] getCollapseValues() {
diff --git a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java
index 41664c30cc1b4..10721aa0c0cf6 100644
--- a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java
+++ b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java
@@ -28,6 +28,18 @@ public final class SearchShardTarget implements Writeable, Comparable{@code
+ * StreamInput in = ...;
+ * SearchShardTarget target = new SearchShardTarget(in);
+ * }
+ */
public SearchShardTarget(StreamInput in) throws IOException {
if (in.readBoolean()) {
nodeId = in.readText();
@@ -38,36 +50,118 @@ public SearchShardTarget(StreamInput in) throws IOException {
clusterAlias = in.readOptionalString();
}
+ /**
+ * Constructs a new SearchShardTarget with the specified node ID, shard ID, and cluster alias.
+ *
+ * @param nodeId the node identifier (may be null)
+ * @param shardId the shard identifier
+ * @param clusterAlias the cluster alias for cross-cluster search (may be null for local clusters)
+ *
+ * {@code
+ * ShardId shardId = new ShardId("my_index", "_na_", 0);
+ * SearchShardTarget target = new SearchShardTarget("node1", shardId, null);
+ * }
+ */
public SearchShardTarget(String nodeId, ShardId shardId, @Nullable String clusterAlias) {
this.nodeId = nodeId == null ? null : new Text(nodeId);
this.shardId = shardId;
this.clusterAlias = clusterAlias;
}
+ /**
+ * Returns the node identifier where this shard resides.
+ *
+ * @return the node ID, or null if not set
+ *
+ * {@code
+ * SearchShardTarget target = ...;
+ * String nodeId = target.getNodeId();
+ * System.out.println("Shard on node: " + nodeId);
+ * }
+ */
@Nullable
public String getNodeId() {
return nodeId != null ? nodeId.string() : null;
}
+ /**
+ * Returns the node identifier as a Text object.
+ *
+ * @return the node ID as Text, or null if not set
+ */
public Text getNodeIdText() {
return this.nodeId;
}
+ /**
+ * Returns the name of the index this shard belongs to.
+ *
+ * @return the index name
+ *
+ * {@code
+ * SearchShardTarget target = ...;
+ * String indexName = target.getIndex();
+ * System.out.println("Index: " + indexName);
+ * }
+ */
public String getIndex() {
return shardId.getIndexName();
}
+ /**
+ * Returns the shard identifier for this target.
+ *
+ * @return the ShardId
+ *
+ * {@code
+ * SearchShardTarget target = ...;
+ * ShardId shardId = target.getShardId();
+ * System.out.println("Shard ID: " + shardId.getId());
+ * System.out.println("Index: " + shardId.getIndexName());
+ * }
+ */
public ShardId getShardId() {
return shardId;
}
+ /**
+ * Returns the cluster alias for cross-cluster search scenarios.
+ *
+ * @return the cluster alias, or null for local clusters
+ *
+ * {@code
+ * SearchShardTarget target = ...;
+ * String cluster = target.getClusterAlias();
+ * if (cluster != null) {
+ * System.out.println("From remote cluster: " + cluster);
+ * }
+ * }
+ */
@Nullable
public String getClusterAlias() {
return clusterAlias;
}
/**
- * Returns the fully qualified index name, including the index prefix that indicates which cluster results come from.
+ * Returns the fully qualified index name, including the cluster prefix for remote clusters.
+ * For local clusters, this returns just the index name.
+ * For remote clusters, this returns "cluster:index".
+ *
+ * @return the fully qualified index name
+ *
+ * {@code
+ * SearchShardTarget target = ...;
+ * String fqIndexName = target.getFullyQualifiedIndexName();
+ * // For local: "my_index"
+ * // For remote: "remote_cluster:my_index"
+ * System.out.println("Fully qualified index: " + fqIndexName);
+ * }
*/
public String getFullyQualifiedIndexName() {
return RemoteClusterAware.buildRemoteIndexName(clusterAlias, getIndex());
diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java
index 0be95c337838a..469e0be19df1f 100644
--- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java
+++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java
@@ -21,6 +21,14 @@
import java.util.Collections;
import java.util.List;
+/**
+ * Plugin for APM (Application Performance Monitoring) data management in Elasticsearch.
+ * {@code
+ * POST /my-index/_async_search?wait_for_completion_timeout=2s
+ * {
+ * "query": {
+ * "match_all": {}
+ * }
+ * }
+ *
+ * // Returns an ID to retrieve results later
+ * GET /_async_search/
+ */
public final class AsyncSearch extends Plugin implements ActionPlugin {
+ /**
+ * Returns the list of action handlers provided by this plugin.
+ *
+ *
+ *
+ *
+ * {@code
+ * // Creating a failure for a node with a formatted message
+ * Failure failure = Failure.fail(node, "Column {} not found in table {}", columnName, tableName);
+ *
+ * // Creating a failure directly
+ * Failure failure = new Failure(node, "Invalid operation on this node");
+ *
+ * // Collecting failures during verification
+ * Collection
+ */
public class Failure {
private final Node> node;
private final String message;
+ /**
+ * Constructs a failure associated with a specific node.
+ *
+ * @param node the query tree node where the failure occurred
+ * @param message the failure message describing the problem
+ */
public Failure(Node> node, String message) {
this.node = node;
this.message = message;
}
+ /**
+ * Returns the node associated with this failure.
+ *
+ * @return the query tree node where the failure occurred
+ */
public Node> node() {
return node;
}
+ /**
+ * Returns the failure message.
+ *
+ * @return the message describing the failure
+ */
public String message() {
return message;
}
@@ -69,15 +111,51 @@ public boolean equals(Object obj) {
return Objects.equals(node, other.node);
}
+ /**
+ * Returns the string representation of this failure.
+ *
+ *
+ * The resulting message is suitable for displaying to end users.
+ *
+ * This plugin provides support for frozen indices, which are read-only indices optimized + * for reduced memory footprint. It registers the necessary transport actions for tracking + * frozen indices usage statistics. + *
+ */ public class FrozenIndices extends Plugin implements ActionPlugin { + /** + * Returns the list of action handlers provided by this plugin. + *+ * Registers the {@link FrozenIndicesUsageTransportAction} for tracking usage + * statistics of the frozen indices feature. + *
+ * + * @return a list of action handlers for frozen indices operations + */ @Override public List+ * This plugin provides the Graph API, which enables exploration of relationships + * in data through relevance-based graph analysis. Graph can discover how items + * are related using the {@code _explore} API endpoint. This feature requires a + * Platinum or Enterprise license. + *
+ *Usage Example:
+ *{@code
+ * POST /my-index/_graph/explore
+ * {
+ * "query": {
+ * "match": {
+ * "field": "value"
+ * }
+ * },
+ * "vertices": [
+ * {
+ * "field": "user"
+ * }
+ * ],
+ * "connections": {
+ * "vertices": [
+ * {
+ * "field": "product"
+ * }
+ * ]
+ * }
+ * }
+ * }
+ */
public class Graph extends Plugin implements ActionPlugin {
+ /**
+ * Licensed feature definition for Graph functionality.
+ * Requires a Platinum or Enterprise license.
+ */
public static final LicensedFeature.Momentary GRAPH_FEATURE = LicensedFeature.momentary(null, "graph", License.OperationMode.PLATINUM);
protected final boolean enabled;
+ /**
+ * Constructs a new Graph plugin with the specified settings.
+ *
+ * @param settings the node settings used to determine if Graph is enabled
+ */
public Graph(Settings settings) {
this.enabled = XPackSettings.GRAPH_ENABLED.get(settings);
}
+ /**
+ * Returns the list of action handlers provided by this plugin.
+ * + * Registers the Graph explore action along with usage and info actions. + * If the plugin is disabled, only the usage and info actions are registered. + *
+ * + * @return a list of action handlers for graph operations + */ @Override public List+ * Registers the REST endpoint for graph exploration at {@code /_graph/explore}. + * If the plugin is disabled, no REST handlers are registered. + *
+ * + * @param settings the node settings + * @param namedWriteableRegistry the named writeable registry + * @param restController the REST controller + * @param clusterSettings the cluster settings + * @param indexScopedSettings the index-scoped settings + * @param settingsFilter the settings filter + * @param indexNameExpressionResolver the index name expression resolver + * @param nodesInCluster supplier for discovery nodes + * @param clusterSupportsFeature predicate to check feature support + * @return a list containing the graph REST handler if enabled, empty otherwise + */ @Override public List+ * This plugin provides the {@code constant_keyword} field type, which is optimized for + * fields that have the same value across all documents in an index. This field type uses + * minimal storage and provides efficient query performance by storing the value once in + * metadata rather than for each document. + *
+ *Usage Example:
+ *{@code
+ * PUT /my-index
+ * {
+ * "mappings": {
+ * "properties": {
+ * "environment": {
+ * "type": "constant_keyword",
+ * "value": "production"
+ * }
+ * }
+ * }
+ * }
+ * }
+ */
public class ConstantKeywordMapperPlugin extends Plugin implements MapperPlugin {
+ /**
+ * Returns the field mappers provided by this plugin.
+ *
+ * @return a map containing the constant keyword field type parser
+ */
@Override
public MapThis plugin adds two associated features:
*counted_keyword that behaves like keyword except that it counts duplicate values.counted_terms aggregation that operates on fields mapped as counted_keyword and considers
- * duplicate values in the doc_count that it returns.Both features are considered a tech preview and are thus intentionally undocumented.
*/ public class CountedKeywordMapperPlugin extends Plugin implements MapperPlugin, SearchPlugin { + /** + * Returns the field mappers provided by this plugin. + *+ * Registers the {@link CountedKeywordFieldMapper} which provides the {@code counted_keyword} + * field type that tracks duplicate value counts. + *
+ * + * @return a map containing the counted keyword field type parser + */ @Override public Map+ * Registers the {@code counted_terms} aggregation which works specifically with + * {@code counted_keyword} fields to provide accurate document counts that include + * duplicate value frequencies. + *
+ * + * @return a list containing the counted terms aggregation specification + */ @Override public List+ * This plugin provides the {@code unsigned_long} field type, which supports storing + * and querying unsigned 64-bit integers (range: 0 to 2^64-1). This is useful for + * fields that store values exceeding the signed long range (2^63-1), such as large + * counters, timestamps in nanoseconds, or unsigned identifiers. + *
+ *Usage Example:
+ *{@code
+ * PUT /my-index
+ * {
+ * "mappings": {
+ * "properties": {
+ * "counter": {
+ * "type": "unsigned_long"
+ * }
+ * }
+ * }
+ * }
+ *
+ * POST /my-index/_doc
+ * {
+ * "counter": 18446744073709551615
+ * }
+ * }
+ */
public class UnsignedLongMapperPlugin extends Plugin implements MapperPlugin {
+ /**
+ * Returns the field mappers provided by this plugin.
+ *
+ * @return a map containing the unsigned long field type parser
+ */
@Override
public Map+ * This plugin provides the {@code version} field type, which is optimized for storing + * and querying software version strings (e.g., "1.2.3", "2.0.0-beta1"). The field type + * uses a specialized encoding that enables efficient sorting and range queries on version + * values while understanding semantic versioning conventions. + *
+ *Usage Example:
+ *{@code
+ * PUT /my-index
+ * {
+ * "mappings": {
+ * "properties": {
+ * "software_version": {
+ * "type": "version"
+ * }
+ * }
+ * }
+ * }
+ * }
+ */
public class VersionFieldPlugin extends Plugin implements MapperPlugin {
+ /**
+ * Constructs a new VersionFieldPlugin with the specified settings.
+ *
+ * @param settings the node settings (not used by this plugin)
+ */
public VersionFieldPlugin(Settings settings) {}
+ /**
+ * Returns the field mappers provided by this plugin.
+ *
+ * @return a map containing the version field type parser
+ */
@Override
public Map+ * Registers the version-specific doc value format for serialization + * of version field values across the cluster. + *
+ * + * @return a list containing the version doc value format entry + */ @Override public List+ * This plugin provides functionality for downloading and importing trained ML models + * from a configured repository (default: https://ml-models.elastic.co). It manages + * model package downloads, including support for air-gapped installations using local + * file repositories. + *
+ *+ * The plugin creates a dedicated thread pool for parallel model downloads and validates + * the model repository configuration at bootstrap time. + *
+ */ public class MachineLearningPackageLoader extends Plugin implements ActionPlugin { + /** + * The default URL for the Elastic ML models repository. + */ public static final String DEFAULT_ML_MODELS_REPOSITORY = "https://ml-models.elastic.co"; + + /** + * Setting for configuring the ML model repository location. + *+ * This can be an HTTP/HTTPS URL or a file:// URI pointing to a local directory + * under the Elasticsearch config directory. This setting is dynamic and node-scoped. + *
+ */ public static final Setting+ * These are internal actions with no REST endpoints, used for model + * package configuration retrieval and loading operations. + *
+ * + * @return a list of action handlers for ML package operations + */ @Override public List+ * Registers the model download task status for serialization across the cluster. + *
+ * + * @return a list containing the model download status entry + */ @Override public List+ * Creates a dedicated thread pool for parallel model file downloads. + *
+ * + * @param settings the node settings + * @return a list containing the model download executor builder + */ @Override public List+ * Creates a fixed-size thread pool with an unbounded queue for downloading + * model definition files in parallel streams. + *
+ * + * @param settings the node settings + * @return the model download executor builder + */ public static FixedExecutorBuilder modelDownloadExecutor(Settings settings) { // Threadpool with a fixed number of threads for // downloading the model definition files @@ -101,6 +172,16 @@ public static FixedExecutorBuilder modelDownloadExecutor(Settings settings) { ); } + /** + * Returns the bootstrap checks for this plugin. + *+ * Validates the model repository configuration at startup, ensuring it uses + * a supported scheme (http, https, or file) and meets security requirements. + * This check is always enforced. + *
+ * + * @return a list containing the model repository validation check + */ @Override public List+ * Ensures the repository URI uses a supported scheme (http, https, or file), + * does not contain authentication credentials, and if using file://, points + * to a location under the Elasticsearch config directory. + *
+ * + * @param repository the repository URI string to validate + * @param configPath the Elasticsearch configuration directory path + * @throws URISyntaxException if the repository URI is malformed + * @throws IllegalArgumentException if the repository configuration is invalid + */ static void validateModelRepository(String repository, Path configPath) throws URISyntaxException { URI baseUri = new URI(repository.endsWith("/") ? repository : repository + "/").normalize(); URI normalizedConfigUri = configPath.toUri().normalize(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java index f46652978753c..846f2bd0bc722 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java @@ -11,25 +11,141 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.ml.autoscaling.AbstractNodeAvailabilityZoneMapper; +/** + * Extension interface for customizing Machine Learning plugin behavior. + *+ * This interface allows external implementations to configure and control various aspects + * of the Machine Learning plugin, including feature enablement, lifecycle management, + * and infrastructure configuration. + *
+ * + *Usage Examples:
+ *{@code
+ * // Implementing a custom ML extension
+ * public class CustomMlExtension implements MachineLearningExtension {
+ *
+ * {@literal @}Override
+ * public void configure(Settings settings) {
+ * // Custom configuration logic
+ * }
+ *
+ * {@literal @}Override
+ * public boolean useIlm() {
+ * return true; // Enable Index Lifecycle Management
+ * }
+ *
+ * {@literal @}Override
+ * public boolean isAnomalyDetectionEnabled() {
+ * return true; // Enable anomaly detection feature
+ * }
+ *
+ * {@literal @}Override
+ * public boolean isNlpEnabled() {
+ * return true; // Enable NLP capabilities
+ * }
+ * }
+ * }
+ */
public interface MachineLearningExtension {
+ /**
+ * Configures the extension with the provided settings.
+ * + * This method is called during initialization to allow the extension to + * configure itself based on cluster settings. + *
+ * + * @param settings the cluster settings + */ default void configure(Settings settings) {} + /** + * Indicates whether to use Index Lifecycle Management (ILM) for ML indices. + *+ * When enabled, ML indices will be managed by ILM policies for automatic + * lifecycle management including rollover, retention, and deletion. + *
+ * + * @return true if ILM should be used for ML indices, false otherwise + */ boolean useIlm(); + /** + * Indicates whether to include node information in ML audit messages. + *+ * When enabled, audit messages will include the names of nodes where + * ML tasks are assigned or running. + *
+ * + * @return true if node information should be included in audit messages, false otherwise + */ boolean includeNodeInfo(); + /** + * Indicates whether anomaly detection features are enabled. + *+ * Controls the availability of anomaly detection jobs and related functionality. + *
+ * + * @return true if anomaly detection is enabled, false otherwise + */ boolean isAnomalyDetectionEnabled(); + /** + * Indicates whether data frame analytics features are enabled. + *+ * Controls the availability of data frame analytics jobs including regression, + * classification, and outlier detection. + *
+ * + * @return true if data frame analytics is enabled, false otherwise + */ boolean isDataFrameAnalyticsEnabled(); + /** + * Indicates whether Natural Language Processing (NLP) features are enabled. + *+ * Controls the availability of NLP models and inference capabilities. + *
+ * + * @return true if NLP is enabled, false otherwise + */ boolean isNlpEnabled(); + /** + * Indicates whether the inference process cache should be disabled. + *+ * When true, the inference process cache will not be used, which may impact + * performance but can be useful in certain deployment scenarios. + *
+ * + * @return true if the inference process cache should be disabled, false otherwise + */ default boolean disableInferenceProcessCache() { return false; } + /** + * Returns the list of allowed settings for analytics destination indices. + *+ * These settings can be specified when creating destination indices for + * data frame analytics jobs. + *
+ * + * @return an array of allowed setting names + */ String[] getAnalyticsDestIndexAllowedSettings(); + /** + * Creates a node availability zone mapper for the given settings. + *+ * The availability zone mapper is used for autoscaling and determining + * node placement across availability zones. + *
+ * + * @param settings the cluster settings + * @param clusterSettings the cluster settings service + * @return a configured availability zone mapper + */ AbstractNodeAvailabilityZoneMapper getNodeAvailabilityZoneMapper(Settings settings, ClusterSettings clusterSettings); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java index 9b66ecfcd9875..ed74f45145421 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java @@ -42,6 +42,37 @@ import java.util.Map; import java.util.Objects; +/** + * Monitors and audits Machine Learning task assignments in the cluster. + *+ * This class listens to cluster state changes and generates audit messages when ML tasks + * (jobs, datafeeds, and data frame analytics) are assigned, unassigned, or relocated across + * cluster nodes. It also monitors and logs warnings for tasks that remain unassigned for + * extended periods. + *
+ *+ * The notifier tracks assignment state and implements throttling to avoid excessive logging + * and audit message spam. It maintains in-memory state about unassigned tasks to determine + * how long they've been unassigned and when they were last reported. + *
+ * + *Usage Examples:
+ *{@code
+ * // Creating an assignment notifier (typically done by the ML plugin)
+ * MlAssignmentNotifier notifier = new MlAssignmentNotifier(
+ * anomalyDetectionAuditor,
+ * dataFrameAnalyticsAuditor,
+ * threadPool,
+ * clusterService
+ * );
+ *
+ * // The notifier automatically listens to cluster state changes
+ * // and generates audit messages when assignments change
+ *
+ * // Manually trigger an audit of unassigned tasks
+ * notifier.auditUnassignedMlTasks(projectId, nodes, tasks);
+ * }
+ */
public class MlAssignmentNotifier implements ClusterStateListener {
private static final Logger logger = LogManager.getLogger(MlAssignmentNotifier.class);
@@ -124,9 +155,24 @@ private void auditChangesToMlTasks(ClusterChangedEvent event) {
}
/**
- * Creates an audit warning for all currently unassigned ML
- * tasks, even if a previous audit warning has been created.
- * Care must be taken not to call this method frequently.
+ * Creates audit warnings for all currently unassigned ML tasks.
+ * + * This method generates audit messages for all unassigned tasks, regardless of whether + * they have been previously audited. It should be used sparingly to avoid flooding + * the audit log. + *
+ *+ * Typical use cases include: + *
+ * This method attempts to retrieve the human-readable name of a node from the cluster + * state. If the node is no longer in the cluster or doesn't have a name configured, + * the method falls back to returning the node ID. + *
+ *+ * This fallback behavior is important because: + *
+ * This plugin provides the {@code rank_vectors} field type, which is optimized for storing + * and querying vectors used in ranking and similarity search operations. The field type + * stores vectors in a memory-efficient format suitable for large-scale retrieval tasks. + * This feature requires an Enterprise license. + *
+ *Usage Example:
+ *{@code
+ * PUT /my-index
+ * {
+ * "mappings": {
+ * "properties": {
+ * "embedding": {
+ * "type": "rank_vectors",
+ * "dims": 128
+ * }
+ * }
+ * }
+ * }
+ * }
+ */
public class RankVectorsPlugin extends Plugin implements MapperPlugin {
+ /**
+ * Licensed feature definition for rank vectors functionality.
+ * Requires an Enterprise license.
+ */
public static final LicensedFeature.Momentary RANK_VECTORS_FEATURE = LicensedFeature.momentary(
null,
"rank-vectors",
License.OperationMode.ENTERPRISE
);
+ /**
+ * Returns the field mappers provided by this plugin.
+ * + * Registers the {@link RankVectorsFieldMapper} with license checking. + * The mapper cannot be used in multi-fields and requires an active Enterprise license. + *
+ * + * @return a map containing the rank vectors field type parser + */ @Override public Map+ * This plugin provides an ingest processor that can redact sensitive information + * from documents during ingestion. The redaction processor uses pattern matching + * to identify and replace sensitive data such as credit card numbers, email addresses, + * or custom patterns. + *
+ */ public class RedactPlugin extends Plugin implements IngestPlugin { private final Settings settings; + /** + * Constructs a new RedactPlugin with the specified settings. + * + * @param settings the node settings + */ public RedactPlugin(final Settings settings) { this.settings = settings; } + /** + * Returns the ingest processors provided by this plugin. + *+ * Registers the {@link RedactProcessor} which can be used in ingest pipelines + * to redact sensitive information from documents. The processor is license-aware + * and uses the matcher watchdog to prevent runaway regex operations. + *
+ * + * @param parameters the processor parameters including the matcher watchdog + * @return a map containing the redact processor factory + */ @Override public Map+ * The redact processor may require specific license levels to operate. + *
+ * + * @return the shared X-Pack license state + */ protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/ShutdownPlugin.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/ShutdownPlugin.java index c0fb3c353ecbc..ebbec1f446da4 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/ShutdownPlugin.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/ShutdownPlugin.java @@ -26,7 +26,41 @@ import java.util.function.Predicate; import java.util.function.Supplier; +/** + * Plugin for node shutdown management in Elasticsearch. + *+ * This plugin provides APIs for gracefully shutting down nodes in a cluster. It allows + * administrators to mark nodes for shutdown, which triggers the cluster to prepare for the + * node's removal by relocating shards, stopping allocations, and ensuring data safety. + *
+ *Usage Example:
+ *{@code
+ * // Mark a node for shutdown
+ * PUT /_nodes//shutdown
+ * {
+ * "type": "restart",
+ * "reason": "Planned maintenance",
+ * "allocation_delay": "10m"
+ * }
+ *
+ * // Get shutdown status
+ * GET /_nodes//shutdown
+ *
+ * // Cancel shutdown
+ * DELETE /_nodes//shutdown
+ * }
+ */
public class ShutdownPlugin extends Plugin implements ActionPlugin {
+ /**
+ * Creates the plugin components.
+ * + * Initializes the {@link NodeSeenService} which tracks when nodes are last seen + * in the cluster, helping coordinate graceful shutdowns. + *
+ * + * @param services the plugin services providing access to cluster resources + * @return a collection containing the node seen service + */ @Override public Collection> createComponents(PluginServices services) { @@ -35,6 +69,15 @@ public Collection> createComponents(PluginServices services) { return Collections.singletonList(nodeSeenService); } + /** + * Returns the list of action handlers provided by this plugin. + *+ * Registers transport actions for putting, deleting, and getting shutdown status + * for nodes in the cluster. + *
+ * + * @return a list of action handlers for shutdown operations + */ @Override public List
+ * Registers REST endpoints for node shutdown management at
+ * {@code /_nodes/
+ * This plugin manages the index templates used by various Elastic Stack components + * for monitoring, logging, and observability data. It maintains both current and + * legacy template registries to ensure compatibility across versions. + *
+ */ public class StackPlugin extends Plugin implements ActionPlugin { private final Settings settings; + /** + * Constructs a new StackPlugin with the specified settings. + * + * @param settings the node settings used to configure the template registries + */ public StackPlugin(Settings settings) { this.settings = settings; } + /** + * Returns the list of settings provided by this plugin. + * + * @return a list containing the stack templates enabled setting + */ @Override public List+ * This method creates both legacy and current template registries for Elastic Stack + * components. The legacy registry maintains backward compatibility with older versions, + * while the current registry provides the latest template definitions. + *
+ * + * @param services the plugin services providing access to cluster resources + * @return a list containing both the legacy and current stack template registries + */ @Override public Collection> createComponents(PluginServices services) { LegacyStackTemplateRegistry legacyStackTemplateRegistry = new LegacyStackTemplateRegistry( diff --git a/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/VectorTilePlugin.java b/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/VectorTilePlugin.java index 5ca9b489431e4..306920aef7ef8 100644 --- a/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/VectorTilePlugin.java +++ b/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/VectorTilePlugin.java @@ -26,13 +26,58 @@ import java.util.function.Predicate; import java.util.function.Supplier; +/** + * Plugin for vector tile search functionality in Elasticsearch. + *+ * This plugin provides the ability to return search results as Mapbox Vector Tiles (MVT), + * which is a compact binary format for efficiently transmitting geographic data for rendering + * in maps. The plugin aggregates geo_point and geo_shape data into vector tiles at specified + * zoom levels. + *
+ *Usage Example:
+ *{@code
+ * GET /my-index/_mvt/geo_field/15/5242/12661
+ * {
+ * "grid_precision": 2,
+ * "fields": ["field1", "field2"],
+ * "query": {
+ * "match_all": {}
+ * }
+ * }
+ * }
+ */
public class VectorTilePlugin extends Plugin implements ActionPlugin {
- // to be overriden by tests
+ /**
+ * Returns the X-Pack license state.
+ * + * This method can be overridden by tests to provide a different license state. + *
+ * + * @return the shared X-Pack license state + */ protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + /** + * Returns the REST handlers provided by this plugin. + *
+ * Registers the REST endpoint for vector tile search at
+ * {@code /
+ * A voting-only node is one with the 'master' and 'voting-only' roles, which means + * the node may participate in voting for master elections but is ineligible to become + * the elected master itself. This allows for increased cluster resilience while + * minimizing resource requirements for master-eligible nodes. + *
+ *+ * The plugin implements a custom election strategy that ensures full master nodes + * are preferred over voting-only nodes during elections, and that voting-only nodes + * only broadcast cluster state to full master nodes for efficiency. + *
*/ public class VotingOnlyNodePlugin extends Plugin implements ClusterCoordinationPlugin, NetworkPlugin, ActionPlugin { @@ -59,26 +69,64 @@ public class VotingOnlyNodePlugin extends Plugin implements ClusterCoordinationP private final boolean isVotingOnlyNode; + /** + * Constructs a new VotingOnlyNodePlugin with the specified settings. + * + * @param settings the node settings used to determine if this node has the voting-only role + */ public VotingOnlyNodePlugin(Settings settings) { this.settings = settings; threadPool = new SetOnce<>(); isVotingOnlyNode = DiscoveryNode.hasRole(settings, DiscoveryNodeRole.VOTING_ONLY_NODE_ROLE); } + /** + * Checks if the given discovery node is a voting-only node. + * + * @param discoveryNode the node to check + * @return {@code true} if the node has the voting-only role, {@code false} otherwise + */ public static boolean isVotingOnlyNode(DiscoveryNode discoveryNode) { return discoveryNode.getRoles().contains(DiscoveryNodeRole.VOTING_ONLY_NODE_ROLE); } + /** + * Checks if the given discovery node is a full master node. + *+ * A full master node is one that has master capabilities but is NOT voting-only, + * meaning it can be elected as the cluster master. + *
+ * + * @param discoveryNode the node to check + * @return {@code true} if the node is master-eligible but not voting-only, {@code false} otherwise + */ public static boolean isFullMasterNode(DiscoveryNode discoveryNode) { return discoveryNode.isMasterNode() && discoveryNode.getRoles().contains(DiscoveryNodeRole.VOTING_ONLY_NODE_ROLE) == false; } + /** + * Creates the plugin components. + *+ * This method initializes the thread pool reference for use by transport interceptors. + *
+ * + * @param services the plugin services providing access to cluster resources + * @return an empty collection as this plugin does not export any components + */ @Override public Collection> createComponents(PluginServices services) { this.threadPool.set(services.threadPool()); return Collections.emptyList(); } + /** + * Returns the list of action handlers provided by this plugin. + *+ * Registers transport actions for tracking voting-only node usage and information. + *
+ * + * @return a list of action handlers for voting-only node operations + */ @Override public List+ * Provides a custom election strategy that ensures full master nodes are preferred + * over voting-only nodes during master elections. + *
+ * + * @return a map containing the voting-only election strategy + */ @Override public Map+ * On voting-only nodes, installs an interceptor that modifies cluster state publication + * behavior to only broadcast state to full master nodes for efficiency. + *
+ * + * @param namedWriteableRegistry the named writeable registry + * @param threadContext the thread context + * @return a list containing the transport interceptor if this is a voting-only node, empty otherwise + */ @Override public List+ * Configures the cluster to use the voting-only election strategy. + *
+ * + * @return settings that enable the voting-only election strategy + */ @Override public Settings additionalSettings() { return Settings.builder().put(DiscoveryModule.ELECTION_STRATEGY_SETTING.getKey(), VOTING_ONLY_ELECTION_STRATEGY).build(); diff --git a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/Wildcard.java b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/Wildcard.java index 8562548a43e33..91747f3069a78 100644 --- a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/Wildcard.java +++ b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/Wildcard.java @@ -16,8 +16,44 @@ import java.util.LinkedHashMap; import java.util.Map; +/** + * Plugin for wildcard field mapping in Elasticsearch. + *+ * This plugin provides the {@code wildcard} field type, which is optimized for fields + * that will be queried using wildcard and regexp patterns. Unlike the {@code keyword} + * field type, which is optimized for exact matches, the wildcard field uses an n-gram + * index structure that provides efficient wildcard searches even on large text values. + *
+ *Usage Example:
+ *{@code
+ * PUT /my-index
+ * {
+ * "mappings": {
+ * "properties": {
+ * "file_path": {
+ * "type": "wildcard"
+ * }
+ * }
+ * }
+ * }
+ *
+ * GET /my-index/_search
+ * {
+ * "query": {
+ * "wildcard": {
+ * "file_path": "**/config/*.yaml"
+ * }
+ * }
+ * }
+ * }
+ */
public class Wildcard extends Plugin implements MapperPlugin {
+ /**
+ * Returns the field mappers provided by this plugin.
+ *
+ * @return a map containing the wildcard field type parser
+ */
@Override
public Map+ * This plugin provides predictive analytics for write load distribution across shards, + * helping optimize shard allocation and cluster resource management. The forecasting + * functionality requires an Enterprise license. + *
+ */ public class WriteLoadForecasterPlugin extends Plugin implements ClusterPlugin { + /** + * Licensed feature definition for write load forecasting. + *+ * This feature requires an Enterprise license to operate. + *
+ */ public static final LicensedFeature.Momentary WRITE_LOAD_FORECAST_FEATURE = LicensedFeature.momentary( null, "write-load-forecast", License.OperationMode.ENTERPRISE ); + /** + * Setting to manually override the write load forecast for an index. + *+ * When set to a value greater than 0, this setting overrides the automatically + * calculated write load forecast. This is useful for testing or manual optimization. + * The value must be non-negative. + *
+ */ public static final Setting+ * This method instantiates a {@link LicensedWriteLoadForecaster} that uses historical + * write patterns to predict future write load distribution. The forecaster is only + * active when a valid Enterprise license is present. + *
+ * + * @param threadPool the thread pool for executing forecasting operations + * @param settings the cluster settings + * @param clusterSettings the dynamic cluster settings manager + * @return a collection containing the licensed write load forecaster + */ @Override public Collection