diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/nav.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/nav.adoc index c8c37e28f6a..32172638d6a 100644 --- a/spring-ai-docs/src/main/antora/modules/ROOT/nav.adoc +++ b/spring-ai-docs/src/main/antora/modules/ROOT/nav.adoc @@ -1,117 +1,124 @@ * xref:index.adoc[Overview] ** xref:concepts.adoc[AI Concepts] * xref:getting-started.adoc[Getting Started] -* xref:api/chatclient.adoc[] -** xref:api/advisors.adoc[Advisors] -* xref:api/index.adoc[AI Models] -** xref:api/chatmodel.adoc[Chat Models] -*** xref:api/chat/comparison.adoc[Chat Models Comparison] -*** xref:api/chat/bedrock-converse.adoc[Amazon Bedrock Converse] -*** xref:api/chat/anthropic-chat.adoc[Anthropic 3] -**** xref:api/chat/functions/anthropic-chat-functions.adoc[Anthropic Function Calling (Deprecated)] -*** xref:api/chat/azure-openai-chat.adoc[Azure OpenAI] -**** xref:api/chat/functions/azure-open-ai-chat-functions.adoc[Azure OpenAI Function Calling] -*** xref:api/chat/deepseek-chat.adoc[DeepSeek] -*** xref:api/chat/dmr-chat.adoc[Docker Model Runner] -*** xref:api/chat/google-vertexai.adoc[Google VertexAI] -**** xref:api/chat/vertexai-gemini-chat.adoc[VertexAI Gemini] -*** xref:api/chat/groq-chat.adoc[Groq] -*** xref:api/chat/huggingface.adoc[Hugging Face] -*** xref:api/chat/mistralai-chat.adoc[Mistral AI] -**** xref:api/chat/functions/mistralai-chat-functions.adoc[Mistral Function Calling (Deprecated)] -*** xref:api/chat/minimax-chat.adoc[MiniMax] -**** xref:api/chat/functions/minimax-chat-functions.adoc[MinmaxFunction Calling] -*** xref:api/chat/moonshot-chat.adoc[Moonshot AI] -//// **** xref:api/chat/functions/moonshot-chat-functions.adoc[Moonshot Function Calling] -*** xref:api/chat/nvidia-chat.adoc[NVIDIA] -*** xref:api/chat/ollama-chat.adoc[Ollama] -**** xref:api/chat/functions/ollama-chat-functions.adoc[Ollama Function Calling (Deprecated)] -*** xref:api/chat/perplexity-chat.adoc[Perplexity AI] -*** OCI Generative AI -**** xref:api/chat/oci-genai/cohere-chat.adoc[Cohere] -*** xref:api/chat/openai-chat.adoc[OpenAI] -**** xref:api/chat/functions/openai-chat-functions.adoc[OpenAI Function Calling (Deprecated)] -*** xref:api/chat/qianfan-chat.adoc[QianFan] -*** xref:api/chat/zhipuai-chat.adoc[ZhiPu AI] -** xref:api/embeddings.adoc[Embedding Models] -*** xref:api/bedrock.adoc[Amazon Bedrock] -**** xref:api/embeddings/bedrock-cohere-embedding.adoc[Cohere] -**** xref:api/embeddings/bedrock-titan-embedding.adoc[Titan] -*** xref:api/embeddings/azure-openai-embeddings.adoc[Azure OpenAI] -*** xref:api/embeddings/mistralai-embeddings.adoc[Mistral AI] -*** xref:api/embeddings/minimax-embeddings.adoc[MiniMax] -*** xref:api/embeddings/oci-genai-embeddings.adoc[OCI GenAI] -*** xref:api/embeddings/ollama-embeddings.adoc[Ollama] -*** xref:api/embeddings/onnx.adoc[(ONNX) Transformers] -*** xref:api/embeddings/openai-embeddings.adoc[OpenAI] -*** xref:api/embeddings/postgresml-embeddings.adoc[PostgresML] -*** xref:api/embeddings/qianfan-embeddings.adoc[QianFan] -*** VertexAI -**** xref:api/embeddings/vertexai-embeddings-text.adoc[Text Embedding] -**** xref:api/embeddings/vertexai-embeddings-multimodal.adoc[Multimodal Embedding] -*** xref:api/embeddings/zhipuai-embeddings.adoc[ZhiPu AI] -** xref:api/imageclient.adoc[Image Models] -*** xref:api/image/azure-openai-image.adoc[Azure OpenAI] -*** xref:api/image/openai-image.adoc[OpenAI] -*** xref:api/image/stabilityai-image.adoc[Stability] -*** xref:api/image/zhipuai-image.adoc[ZhiPuAI] -*** xref:api/image/qianfan-image.adoc[QianFan] -** xref:api/audio[Audio Models] -*** xref:api/audio/transcriptions.adoc[] -**** xref:api/audio/transcriptions/azure-openai-transcriptions.adoc[Azure OpenAI] -**** xref:api/audio/transcriptions/openai-transcriptions.adoc[OpenAI] -*** xref:api/audio/speech.adoc[] -**** xref:api/audio/speech/openai-speech.adoc[OpenAI] -** xref:api/moderation[Moderation Models] -*** xref:api/moderation/openai-moderation.adoc[OpenAI] -*** xref:api/moderation/mistral-ai-moderation.adoc[Mistral AI] -// ** xref:api/generic-model.adoc[] -* xref:api/vectordbs.adoc[] -** xref:api/vectordbs/azure.adoc[] -** xref:api/vectordbs/azure-cosmos-db.adoc[] -** xref:api/vectordbs/apache-cassandra.adoc[] -** xref:api/vectordbs/chroma.adoc[] -** xref:api/vectordbs/couchbase.adoc[] -** xref:api/vectordbs/elasticsearch.adoc[] -** xref:api/vectordbs/gemfire.adoc[GemFire] -** xref:api/vectordbs/mariadb.adoc[] -** xref:api/vectordbs/milvus.adoc[] -** xref:api/vectordbs/mongodb.adoc[] -** xref:api/vectordbs/neo4j.adoc[] -** xref:api/vectordbs/opensearch.adoc[] -** xref:api/vectordbs/oracle.adoc[Oracle] -** xref:api/vectordbs/pgvector.adoc[] -** xref:api/vectordbs/pinecone.adoc[] -** xref:api/vectordbs/qdrant.adoc[] -** xref:api/vectordbs/redis.adoc[] -** xref:api/vectordbs/hana.adoc[SAP Hana] -** xref:api/vectordbs/typesense.adoc[] -** xref:api/vectordbs/weaviate.adoc[] - -* xref:api/retrieval-augmented-generation.adoc[Retrieval Augmented Generation (RAG)] -** xref:api/etl-pipeline.adoc[] -* xref:api/structured-output-converter.adoc[Structured Output] -* xref:api/chat-memory.adoc[Chat Memory] -* xref:api/tools.adoc[Tool Calling] -** xref:api/tools-migration.adoc[Migrating to ToolCallback API] -* xref:api/mcp/mcp-overview.adoc[Model Context Protocol (MCP)] -** xref:api/mcp/mcp-client-boot-starter-docs.adoc[MCP Client Boot Starters] -** xref:api/mcp/mcp-server-boot-starter-docs.adoc[MCP Server Boot Starters] -** xref:api/mcp/mcp-helpers.adoc[MCP Utilities] -* xref:api/multimodality.adoc[Multimodality] -* xref:observability/index.adoc[] -* Prompt Engineering +* Reference +** xref:api/chatclient.adoc[] +*** xref:api/advisors.adoc[Advisors] + ** xref:api/prompt.adoc[] -** xref:api/chat/prompt-engineering-patterns.adoc[] -* xref:api/testing.adoc[AI Model Evaluation] +** xref:api/structured-output-converter.adoc[Structured Output] +** xref:api/multimodality.adoc[Multimodality] +** xref:api/index.adoc[Models] + +*** xref:api/chatmodel.adoc[Chat Models] +**** xref:api/chat/comparison.adoc[Chat Models Comparison] +**** xref:api/chat/bedrock-converse.adoc[Amazon Bedrock Converse] +**** xref:api/chat/anthropic-chat.adoc[Anthropic 3] +**** xref:api/chat/azure-openai-chat.adoc[Azure OpenAI] +**** xref:api/chat/deepseek-chat.adoc[DeepSeek] +**** xref:api/chat/dmr-chat.adoc[Docker Model Runner] +**** xref:api/chat/google-vertexai.adoc[Google VertexAI] +***** xref:api/chat/vertexai-gemini-chat.adoc[VertexAI Gemini] +**** xref:api/chat/groq-chat.adoc[Groq] +**** xref:api/chat/huggingface.adoc[Hugging Face] +**** xref:api/chat/mistralai-chat.adoc[Mistral AI] +**** xref:api/chat/minimax-chat.adoc[MiniMax] +**** xref:api/chat/moonshot-chat.adoc[Moonshot AI] +**** xref:api/chat/nvidia-chat.adoc[NVIDIA] +**** xref:api/chat/ollama-chat.adoc[Ollama] +**** xref:api/chat/perplexity-chat.adoc[Perplexity AI] +**** OCI Generative AI +***** xref:api/chat/oci-genai/cohere-chat.adoc[Cohere] +**** xref:api/chat/openai-chat.adoc[OpenAI] +**** xref:api/chat/qianfan-chat.adoc[QianFan] +**** xref:api/chat/zhipuai-chat.adoc[ZhiPu AI] + +*** xref:api/embeddings.adoc[Embedding Models] +**** xref:api/bedrock.adoc[Amazon Bedrock] +***** xref:api/embeddings/bedrock-cohere-embedding.adoc[Cohere] +***** xref:api/embeddings/bedrock-titan-embedding.adoc[Titan] +**** xref:api/embeddings/azure-openai-embeddings.adoc[Azure OpenAI] +**** xref:api/embeddings/mistralai-embeddings.adoc[Mistral AI] +**** xref:api/embeddings/minimax-embeddings.adoc[MiniMax] +**** xref:api/embeddings/oci-genai-embeddings.adoc[OCI GenAI] +**** xref:api/embeddings/ollama-embeddings.adoc[Ollama] +**** xref:api/embeddings/onnx.adoc[(ONNX) Transformers] +**** xref:api/embeddings/openai-embeddings.adoc[OpenAI] +**** xref:api/embeddings/postgresml-embeddings.adoc[PostgresML] +**** xref:api/embeddings/qianfan-embeddings.adoc[QianFan] +**** VertexAI +***** xref:api/embeddings/vertexai-embeddings-text.adoc[Text Embedding] +***** xref:api/embeddings/vertexai-embeddings-multimodal.adoc[Multimodal Embedding] +**** xref:api/embeddings/zhipuai-embeddings.adoc[ZhiPu AI] +*** xref:api/imageclient.adoc[Image Models] +**** xref:api/image/azure-openai-image.adoc[Azure OpenAI] +**** xref:api/image/openai-image.adoc[OpenAI] +**** xref:api/image/stabilityai-image.adoc[Stability] +**** xref:api/image/zhipuai-image.adoc[ZhiPuAI] +**** xref:api/image/qianfan-image.adoc[QianFan] + +*** xref:api/audio[Audio Models] +**** xref:api/audio/transcriptions.adoc[] +***** xref:api/audio/transcriptions/azure-openai-transcriptions.adoc[Azure OpenAI] +***** xref:api/audio/transcriptions/openai-transcriptions.adoc[OpenAI] +**** xref:api/audio/speech.adoc[] +***** xref:api/audio/speech/openai-speech.adoc[OpenAI] + +*** xref:api/moderation[Moderation Models] +**** xref:api/moderation/openai-moderation.adoc[OpenAI] +**** xref:api/moderation/mistral-ai-moderation.adoc[Mistral AI] +// ** xref:api/generic-model.adoc[] + +** xref:api/chat-memory.adoc[Chat Memory] +** xref:api/tools.adoc[Tool Calling] +*** xref:api/tools-migration.adoc[Migrating to ToolCallback API] -* Service Connections -** xref:api/docker-compose.adoc[Docker Compose] -** xref:api/testcontainers.adoc[Testcontainers] -** xref:api/cloud-bindings.adoc[Cloud Bindings] +** xref:api/mcp/mcp-overview.adoc[Model Context Protocol (MCP)] +*** xref:api/mcp/mcp-client-boot-starter-docs.adoc[MCP Client Boot Starters] +*** xref:api/mcp/mcp-server-boot-starter-docs.adoc[MCP Server Boot Starters] +*** xref:api/mcp/mcp-helpers.adoc[MCP Utilities] + +** xref:api/retrieval-augmented-generation.adoc[Retrieval Augmented Generation (RAG)] +*** xref:api/etl-pipeline.adoc[] + +** xref:api/testing.adoc[Model Evaluation] + +** xref:api/vectordbs.adoc[] +*** xref:api/vectordbs/azure.adoc[] +*** xref:api/vectordbs/azure-cosmos-db.adoc[] +*** xref:api/vectordbs/apache-cassandra.adoc[] +*** xref:api/vectordbs/chroma.adoc[] +*** xref:api/vectordbs/couchbase.adoc[] +*** xref:api/vectordbs/elasticsearch.adoc[] +*** xref:api/vectordbs/gemfire.adoc[GemFire] +*** xref:api/vectordbs/mariadb.adoc[] +*** xref:api/vectordbs/milvus.adoc[] +*** xref:api/vectordbs/mongodb.adoc[] +*** xref:api/vectordbs/neo4j.adoc[] +*** xref:api/vectordbs/opensearch.adoc[] +*** xref:api/vectordbs/oracle.adoc[Oracle] +*** xref:api/vectordbs/pgvector.adoc[] +*** xref:api/vectordbs/pinecone.adoc[] +*** xref:api/vectordbs/qdrant.adoc[] +*** xref:api/vectordbs/redis.adoc[] +*** xref:api/vectordbs/hana.adoc[SAP Hana] +*** xref:api/vectordbs/typesense.adoc[] +*** xref:api/vectordbs/weaviate.adoc[] + +** xref:observability/index.adoc[] + +** xref:api/docker-compose.adoc[Development-time Services] + +** Testing +*** xref:api/testcontainers.adoc[Testcontainers] + +* Guides +** https://github.com/spring-ai-community/awesome-spring-ai[Awesome Spring AI] +** xref:api/chat/prompt-engineering-patterns.adoc[] +** xref:api/cloud-bindings.adoc[Deploying to the Cloud] -* xref:contribution-guidelines.adoc[Contribution Guidelines] +// * xref:contribution-guidelines.adoc[Contribution Guidelines] * xref:upgrade-notes.adoc[] diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/anthropic-chat.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/anthropic-chat.adoc index 262eac0ce60..66024221d3e 100644 --- a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/anthropic-chat.adoc +++ b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/anthropic-chat.adoc @@ -165,7 +165,7 @@ TIP: In addition to the model specific https://github.com/spring-projects/spring You can register custom Java Tools with the `AnthropicChatModel` and have the Anthropic Claude model intelligently choose to output a JSON object containing arguments to call one or many of the registered functions. This is a powerful technique to connect the LLM capabilities with external tools and APIs. -Read more about xref:api/chat/functions/anthropic-chat-functions.adoc[Anthropic Function Calling]. +Read more about xref:api/tools.adoc[Tool Calling]. == Multimodal diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/azure-openai-chat.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/azure-openai-chat.adoc index a41539ab193..84202df53e0 100644 --- a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/azure-openai-chat.adoc +++ b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/azure-openai-chat.adoc @@ -226,7 +226,7 @@ TIP: In addition to the model specific link:https://github.com/spring-projects/s You can register custom Java functions with the AzureOpenAiChatModel and have the model intelligently choose to output a JSON object containing arguments to call one or many of the registered functions. This is a powerful technique to connect the LLM capabilities with external tools and APIs. -Read more about xref:api/chat/functions/azure-open-ai-chat-functions.adoc[Azure OpenAI Function Calling]. +Read more about xref:api/tools.adoc[Tool Calling]. == Multimodal diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/anthropic-chat-functions.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/anthropic-chat-functions.adoc deleted file mode 100644 index 8fdeecedcd3..00000000000 --- a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/anthropic-chat-functions.adoc +++ /dev/null @@ -1,192 +0,0 @@ -= Anthropic Function Calling (Deprecated) - -WARNING: This page describes the previous version of the Function Calling API, which has been deprecated and marked for remove in the next release. The current version is available at xref:api/tools.adoc[Tool Calling]. See the xref:api/tools-migration.adoc[Migration Guide] for more information. - -TIP: Starting of Jul 1st, 2024, streaming function calling and Tool use is supported. - -You can register custom Java functions with the `AnthropicChatModel` and have the Anthropic models intelligently choose to output a JSON object containing arguments to call one or many of the registered functions. -This allows you to connect the LLM capabilities with external tools and APIs. -The `claude-3-5-sonnet-20241022`, `claude-3-opus`, `claude-3-sonnet` and `claude-3-haiku` link:https://docs.anthropic.com/claude/docs/tool-use#tool-use-best-practices-and-limitations[models are trained to detect when a function should be called] and to respond with JSON that adheres to the function signature. - -The Anthropic API does not call the function directly; instead, the model generates JSON that you can use to call the function in your code and return the result back to the model to complete the conversation. - -Spring AI provides flexible and user-friendly ways to register and call custom functions. -In general, the custom functions need to provide a function `name`, `description`, and the function call `signature` (as JSON schema) to let the model know what arguments the function expects. -The `description` helps the model to understand when to call the function. - -As a developer, you need to implement a function that takes the function call arguments sent from the AI model, and responds with the result back to the model. -Your function can in turn invoke other 3rd party services to provide the results. - -Spring AI makes this as easy as defining a `@Bean` definition that returns a `java.util.Function` and supplying the bean name as an option when invoking the `ChatModel`. - -Under the hood, Spring wraps your POJO (the function) with the appropriate adapter code that enables interaction with the AI Model, saving you from writing tedious boilerplate code. -The basis of the underlying infrastructure is the link:https://github.com/spring-projects/spring-ai/blob/main/spring-ai-model/src/main/java/org/springframework/ai/tool/ToolCallback.java[ToolCallback.java] interface and the companion Builder utility class to simplify the implementation and registration of Java callback functions. - -== How it works - -Suppose we want the AI model to respond with information that it does not have, for example the current temperature at a given location. - -We can provide the AI model with metadata about our own functions that it can use to retrieve that information as it processes your prompt. - -For example, if during the processing of a prompt, the AI Model determines that it needs additional information about the temperature in a given location, it will start a server side generated request/response interaction. The AI Model invokes a client side function. -The AI Model provides method invocation details as JSON and it is the responsibility of the client to execute that function and return the response. - -Spring AI greatly simplifies the code you need to write to support function invocation. -It brokers the function invocation conversation for you. -You can simply provide your function definition as a `@Bean` and then provide the bean name of the function in your prompt options. -You can also reference multiple function bean names in your prompt. - -== Quick Start - -Let's create a chatbot that answer questions by calling our own function. -To support the response of the chatbot, we will register our own function that takes a location and returns the current weather in that location. - -When the response to the prompt to the model needs to answer a question such as `"What’s the weather like in Boston?"` the AI model will invoke the client providing the location value as an argument to be passed to the function. This RPC-like data is passed as JSON. - -Our function can some SaaS based weather service API and returns the weather response back to the model to complete the conversation. -In this example we will use a simple implementation named `MockWeatherService` that hard codes the temperature for various locations. - -The following `MockWeatherService.java` represents the weather service API: - -[source,java] ----- -public class MockWeatherService implements Function { - - public enum Unit { C, F } - public record Request(String location, Unit unit) {} - public record Response(double temp, Unit unit) {} - - public Response apply(Request request) { - return new Response(30.0, Unit.C); - } -} ----- - -=== Registering Functions as Beans - -With the link:../anthropic-chat.html#_auto_configuration[AnthropicChatModel Auto-Configuration] you have multiple ways to register custom functions as beans in the Spring context. - -We start with describing the most POJO friendly options. - -==== Plain Java Functions - -In this approach you define `@Beans` in your application context as you would any other Spring managed object. - -Internally, Spring AI `ChatModel` will create an instance of a `ToolCallback` that adds the logic for it being invoked via the AI model. -The name of the `@Bean` is passed as a `ChatOption`. - - -[source,java] ----- -@Configuration -static class Config { - - @Bean - @Description("Get the weather in location") // function description - public Function weatherFunction1() { - return new MockWeatherService(); - } - ... -} ----- - -The `@Description` annotation is optional and provides a function description (2) that helps the model understand when to call the function. -It is an important property to set to help the AI model determine what client side function to invoke. - -Another option to provide the description of the function is to use the `@JsonClassDescription` annotation on the `MockWeatherService.Request` to provide the function description: - -[source,java] ----- - -@Configuration -static class Config { - - @Bean - public Function currentWeather3() { // (1) bean name as function name. - return new MockWeatherService(); - } - ... -} - -@JsonClassDescription("Get the weather in location") // (2) function description -public record Request(String location, Unit unit) {} ----- - -It is a best practice to annotate the request object with information such that the generated JSON schema of that function is as descriptive as possible to help the AI model pick the correct function to invoke. - -The link:https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-anthropic/src/test/java/org/springframework/ai/model/anthropic/autoconfigure/tool/FunctionCallWithFunctionBeanIT.java[FunctionCallWithFunctionBeanIT.java] demonstrates this approach. - - -==== ToolCallback - -Another way to register a function is to create a `ToolCallback` instance like this: - -[source,java] ----- -@Configuration -static class Config { - - @Bean - public FunctionToolCallback weatherFunctionInfo() { - - return FunctionToolCallback.builder("CurrentWeather", new MockWeatherService()) // (1) function name and instance - .description("Get the weather in location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function signature - .build(); - } - ... -} ----- - -It wraps the 3rd party `MockWeatherService` function and registers it as a `CurrentWeather` function with the `AnthropicChatModel`. -It also provides a description (2) and input type (3) used to generate the JSON schema for the function call. - -NOTE: By default, the response converter does a JSON serialization of the Response object. - -NOTE: The `FunctionToolCallback` internally resolves the function call signature based on the `MockWeatherService.Request` class. - -=== Specifying functions in Chat Options - -To let the model know and call your `CurrentWeather` function you need to enable it in your prompt requests: - -[source,java] ----- -AnthropicChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in Paris?"); - -ChatResponse response = this.chatModel.call(new Prompt(List.of(this.userMessage), - AnthropicChatOptions.builder().toolNames("CurrentWeather").build())); // (1) Enable the function - -logger.info("Response: {}", response); ----- - -// NOTE: You can can have multiple functions registered in your `ChatModel` but only those enabled in the prompt request will be considered for the function calling. - -Above user question will trigger 3 calls to `CurrentWeather` function (one for each city) and produce the final response. - -=== Register/Call Functions with Prompt Options - -In addition to the auto-configuration you can register callback functions, dynamically, with your Prompt requests: - -[source,java] ----- -AnthropicChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in Paris?"); - -var promptOptions = AnthropicChatOptions.builder() - .toolCallbacks(List.of(FunctionToolCallback.builder("CurrentWeather", new MockWeatherService()) // (1) function name and instance - .description("Get the weather in location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function signature - .build())) // function code - .build(); - -ChatResponse response = this.chatModel.call(new Prompt(List.of(this.userMessage), this.promptOptions)); ----- - -NOTE: The in-prompt registered functions are enabled by default for the duration of this request. - -This approach allows to dynamically chose different functions to be called based on the user input. - -The https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-anthropic/src/test/java/org/springframework/ai/model/anthropic/autoconfigure/tool/FunctionCallWithPromptFunctionIT.java[FunctionCallWithPromptFunctionIT.java] integration test provides a complete example of how to register a function with the `AnthropicChatModel` and use it in a prompt request. diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/azure-open-ai-chat-functions.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/azure-open-ai-chat-functions.adoc deleted file mode 100644 index 9f99e2f8b17..00000000000 --- a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/azure-open-ai-chat-functions.adoc +++ /dev/null @@ -1,198 +0,0 @@ -= Azure OpenAI Function Calling (Deprecated) - -WARNING: This page describes the previous version of the Function Calling API, which has been deprecated and marked for remove in the next release. The current version is available at xref:api/tools.adoc[Tool Calling]. See the xref:api/tools-migration.adoc[Migration Guide] for more information. - -Function calling lets developers create a description of a function in their code, then pass that description to a language model in a request. The response from the model includes the name of a function that matches the description and the arguments to call it with. - -You can register custom Java functions with the `AzureOpenAiChatModel` and have the model intelligently choose to output a JSON object containing arguments to call one or many of the registered functions. -This allows you to connect the LLM capabilities with external tools and APIs. -The Azure models are trained to detect when a function should be called and to respond with JSON that adheres to the function signature. - -The Azure OpenAI API does not call the function directly; instead, the model generates JSON that you can use to call the function in your code and return the result back to the model to complete the conversation. - -Spring AI provides flexible and user-friendly ways to register and call custom functions. -In general, the custom functions need to provide a function `name`, `description`, and the function call `signature` (as JSON schema) to let the model know what arguments the function expects. The `description` helps the model to understand when to call the function. - -As a developer, you need to implement a function that takes the function call arguments sent from the AI model, and responds with the result back to the model. -Your function can in turn invoke other 3rd party services to provide the results. - -Spring AI makes this as easy as defining a `@Bean` definition that returns a `java.util.Function` and supplying the bean name as an option when invoking the `ChatModel`. - -Under the hood, Spring wraps your POJO (the function) with the appropriate adapter code that enables interaction with the AI Model, saving you from writing tedious boilerplate code. -The basis of the underlying infrastructure is the link:https://github.com/spring-projects/spring-ai/blob/main/spring-ai-model/src/main/java/org/springframework/ai/tool/ToolCallback.java[ToolCallback.java] interface and the companion Builder utility class to simplify the implementation and registration of Java callback functions. - -== How it works - -Suppose we want the AI model to respond with information that it does not have, for example the current temperature at a given location. - -We can provide the AI model with metadata about our own functions that it can use to retrieve that information as it processes your prompt. - -For example, if during the processing of a prompt, the AI Model determines that it needs additional information about the temperature in a given location, it will start a server side generated request/response interaction. The AI Model invokes a client side function. -The AI Model provides method invocation details as JSON and it is the responsibility of the client to execute that function and return the response. - -Spring AI greatly simplifies the code you need to write to support function invocation. -It brokers the function invocation conversation for you. -You can simply provide your function definition as a `@Bean` and then provide the bean name of the function in your prompt options. -You can also reference multiple function bean names in your prompt. - -== Quick Start - -Let's create a chatbot that answer questions by calling our own function. -To support the response of the chatbot, we will register our own function that takes a location and returns the current weather in that location. - -When the response to the prompt to the model needs to answer a question such as `"What’s the weather like in Boston?"` the AI model will invoke the client providing the location value as an argument to be passed to the function. This RPC-like data is passed as JSON. - -Our function can have some SaaS based weather service API and returns the weather response back to the model to complete the conversation. In this example we will use a simple implementation named `MockWeatherService` that hard codes the temperature for various locations. - -The following `MockWeatherService.java` represents the weather service API: - -[source,java] ----- -public class MockWeatherService implements Function { - - public enum Unit { C, F } - public record Request(String location, Unit unit) {} - public record Response(double temp, Unit unit) {} - - public Response apply(Request request) { - return new Response(30.0, Unit.C); - } -} ----- - -=== Registering Functions as Beans - -With the link:../azure-openai-chat.html#_auto_configuration[AzureOpenAiChatModelAuto-Configuration] you have multiple ways to register custom functions as beans in the Spring context. - -We start with describing the most POJO friendly options. - -==== Plain Java Functions - -In this approach you define `@Beans` in your application context as you would any other Spring managed object. - -Internally, Spring AI `ChatModel` will create an instance of a `ToolCallback` instance that adds the logic for it being invoked via the AI model. -The name of the `@Bean` is passed as a `ChatOption`. - - -[source,java] ----- -@Configuration -static class Config { - - @Bean - @Description("Get the weather in location") // function description - public Function weatherFunction1() { - return new MockWeatherService(); - } - ... -} ----- - -The `@Description` annotation is optional and provides a function description (2) that helps the model understand when to call the function. It is an important property to set to help the AI model determine what client side function to invoke. - -Another option to provide the description of the function is to use the `@JsonClassDescription` annotation on the `MockWeatherService.Request` to provide the function description: - -[source,java] ----- - -@Configuration -static class Config { - - @Bean - public Function currentWeather3() { // (1) bean name as function name. - return new MockWeatherService(); - } - ... -} - -@JsonClassDescription("Get the weather in location") // (2) function description -public record Request(String location, Unit unit) {} ----- - -It is a best practice to annotate the request object with information such that the generated JSON schema of that function is as descriptive as possible to help the AI model pick the correct function to invoke. - -The link:https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-anthropic/src/test/java/org/springframework/ai/model/azure/openai/autoconfigure/tool/FunctionCallWithFunctionBeanIT.java[FunctionCallWithFunctionBeanIT.java] demonstrates this approach. - -==== ToolCallback Wrapper - -Another way to register a function is to create a `ToolCallback` instance like this: - -[source,java] ----- -@Configuration -static class Config { - - @Bean - public FunctionToolCallback weatherFunctionInfo() { - - return FunctionToolCallback.builder("CurrentWeather", new MockWeatherService()) // (1) function name - .description("Get the current weather in a given location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function input type - .build(); - } - ... -} ----- - -It wraps the 3rd party `MockWeatherService` function and registers it as a `CurrentWeather` function with the `AzureAiChatModel` and provides a description (2). - -NOTE: The default response converter does a JSON serialization of the Response object. - -NOTE: The `FunctionToolCallback` internally resolves the function call signature based on the `MockWeatherService.Request` class and internally generates an JSON schema for the function call. - -=== Specifying functions in Chat Options - -To let the model know and call your `CurrentWeather` function you need to enable it in your prompt requests: - -[source,java] ----- -AzureOpenAiChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in San Francisco, Tokyo, and Paris?"); - -ChatResponse response = this.chatModel.call(new Prompt(List.of(this.userMessage), - AzureOpenAiChatOptions.builder().tools("CurrentWeather").build())); // (1) Enable the function - -logger.info("Response: {}", response); ----- - -// NOTE: You can have multiple functions registered in your `ChatModel` but only those enabled in the prompt request will be considered for the function calling. - -Above user question will trigger 3 calls to `CurrentWeather` function (one for each city) and the final response will be something like this: - ----- -Here is the current weather for the requested cities: -- San Francisco, CA: 30.0°C -- Tokyo, Japan: 10.0°C -- Paris, France: 15.0°C ----- - -The link:https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-azure-openai/src/test/java/org/springframework/ai/model/azure/openai/autoconfigure/tool/FunctionCallWithFunctionWrapperIT.java[FunctionCallWithFunctionWrapperIT.java] test demo this approach. - - -=== Register/Call Functions with Prompt Options - -In addition to the auto-configuration you can register callback functions, dynamically, with your Prompt requests: - -[source,java] ----- -AzureOpenAiChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in San Francisco, Tokyo, and Paris? Use Multi-turn function calling."); - -var promptOptions = AzureOpenAiChatOptions.builder() - .toolCallbacks(List.of(FunctionToolCallback.builder("CurrentWeather", new MockWeatherService()) // (1) function name and instance - .description("Get the current weather in a given location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function input type - .build())) - .build(); - -ChatResponse response = this.chatModel.call(new Prompt(List.of(this.userMessage), this.promptOptions)); ----- - -NOTE: The in-prompt registered functions are enabled by default for the duration of this request. - -This approach allows to dynamically chose different functions to be called based on the user input. - -The https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-azure-openai/src/test/java/org/springframework/ai/model/azure/openai/autoconfigure/tool/FunctionCallWithPromptFunctionIT.java[FunctionCallWithPromptFunctionIT.java] integration test provides a complete example of how to register a function with the `AzureOpenAiChatModel` and use it in a prompt request. - diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/minimax-chat-functions.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/minimax-chat-functions.adoc deleted file mode 100644 index 39020c09536..00000000000 --- a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/minimax-chat-functions.adoc +++ /dev/null @@ -1,201 +0,0 @@ -= Function Calling - -You can register custom Java functions with the `MiniMaxChatModel` and have the MiniMax model intelligently choose to output a JSON object containing arguments to call one or many of the registered functions. -This allows you to connect the LLM capabilities with external tools and APIs. -The MiniMax models are trained to detect when a function should be called and to respond with JSON that adheres to the function signature. - -The MiniMax API does not call the function directly; instead, the model generates JSON that you can use to call the function in your code and return the result back to the model to complete the conversation. - -Spring AI provides flexible and user-friendly ways to register and call custom functions. -In general, the custom functions need to provide a function `name`, `description`, and the function call `signature` (as JSON schema) to let the model know what arguments the function expects. The `description` helps the model to understand when to call the function. - -As a developer, you need to implement a function that takes the function call arguments sent from the AI model, and responds with the result back to the model. Your function can in turn invoke other 3rd party services to provide the results. - -Spring AI makes this as easy as defining a `@Bean` definition that returns a `java.util.Function` and supplying the bean name as an option when invoking the `ChatModel`. - -Under the hood, Spring wraps your POJO (the function) with the appropriate adapter code that enables interaction with the AI Model, saving you from writing tedious boilerplate code. -The basis of the underlying infrastructure is the link:https://github.com/spring-projects/spring-ai/blob/main/spring-ai-model/src/main/java/org/springframework/ai/tool/ToolCallback.java[ToolCallback.java] interface and the companion Builder utility class to simplify the implementation and registration of Java callback functions. - -// Additionally, the Auto-Configuration provides a way to auto-register any Function beans definition as function calling candidates in the `ChatModel`. - - -== How it works - -Suppose we want the AI model to respond with information that it does not have, for example the current temperature at a given location. - -We can provide the AI model with metadata about our own functions that it can use to retrieve that information as it processes your prompt. - -For example, if during the processing of a prompt, the AI Model determines that it needs additional information about the temperature in a given location, it will start a server side generated request/response interaction. The AI Model invokes a client side function. -The AI Model provides method invocation details as JSON and it is the responsibility of the client to execute that function and return the response. - -The model-client interaction is illustrated in the <> diagram. - -Spring AI greatly simplifies code you need to write to support function invocation. -It brokers the function invocation conversation for you. -You can simply provide your function definition as a `@Bean` and then provide the bean name of the function in your prompt options. -You can also reference multiple function bean names in your prompt. - -== Quick Start - -Let's create a chatbot that answer questions by calling our own function. -To support the response of the chatbot, we will register our own function that takes a location and returns the current weather in that location. - -When the response to the prompt to the model needs to answer a question such as `"What’s the weather like in Boston?"` the AI model will invoke the client providing the location value as an argument to be passed to the function. This RPC-like data is passed as JSON. - -Our function calls some SaaS based weather service API and returns the weather response back to the model to complete the conversation. In this example we will use a simple implementation named `MockWeatherService` that hard codes the temperature for various locations. - -The following `MockWeatherService.java` represents the weather service API: - -[source,java] ----- -public class MockWeatherService implements Function { - - public enum Unit { C, F } - public record Request(String location, Unit unit) {} - public record Response(double temp, Unit unit) {} - - public Response apply(Request request) { - return new Response(30.0, Unit.C); - } -} ----- - -=== Registering Functions as Beans - -With the link:../minimax-chat.html#_auto_configuration[MiniMaxChatModel Auto-Configuration] you have multiple ways to register custom functions as beans in the Spring context. - -We start with describing the most POJO friendly options. - - -==== Plain Java Functions - -In this approach you define `@Beans` in your application context as you would any other Spring managed object. - -Internally, Spring AI `ChatModel` will create an instance of a `ToolCallback` instance that adds the logic for it being invoked via the AI model. -The name of the `@Bean` is passed as a `ChatOption`. - - -[source,java] ----- -@Configuration -static class Config { - - @Bean - @Description("Get the weather in location") // function description - public Function weatherFunction1() { - return new MockWeatherService(); - } - ... -} ----- - -The `@Description` annotation is optional and provides a function description (2) that helps the model to understand when to call the function. It is an important property to set to help the AI model determine what client side function to invoke. - -Another option to provide the description of the function is to the `@JacksonDescription` annotation on the `MockWeatherService.Request` to provide the function description: - -[source,java] ----- - -@Configuration -static class Config { - - @Bean - public Function currentWeather3() { // (1) bean name as function name. - return new MockWeatherService(); - } - ... -} - -@JsonClassDescription("Get the weather in location") // (2) function description -public record Request(String location, Unit unit) {} ----- - -It is a best practice to annotate the request object with information such that the generates JSON schema of that function is as descriptive as possible to help the AI model pick the correct function to invoke. - -The link:https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-minimax/src/test/java/org/springframework/ai/model/minimax/autoconfigure/FunctionCallbackWithPlainFunctionBeanIT.java[FunctionCallbackWithPlainFunctionBeanIT.java] demonstrates this approach. - - -==== ToolCallback Wrapper - -Another way register a function is to create `ToolCallback` instance like this: - -[source,java] ----- -@Configuration -static class Config { - - @Bean - public FunctionToolCallback weatherFunctionInfo() { - - return FunctionToolCallback.builder() - .function("CurrentWeather", new MockWeatherService()) // (1) function name and instance - .description("Get the weather in location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function signature - .build(); - } - ... -} ----- - -It wraps the 3rd party, `MockWeatherService` function and registers it as a `CurrentWeather` function with the `MiniMaxChatModel`. -It also provides a description (2) and the function signature (3) to let the model know what arguments the function expects. - -NOTE: By default, the response converter does a JSON serialization of the Response object. - -NOTE: The `FunctionToolCallback` internally resolves the function call signature based on the `MockWeatherService.Request` class. - -=== Specifying functions in Chat Options - -To let the model know and call your `CurrentWeather` function you need to enable it in your prompt requests: - -[source,java] ----- -MiniMaxChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in San Francisco, Tokyo, and Paris?"); - -ChatResponse response = this.chatModel.call(new Prompt(List.of(this.userMessage), - MiniMaxChatOptions.builder().function("CurrentWeather").build())); // (1) Enable the function - -logger.info("Response: {}", response); ----- - -// NOTE: You can can have multiple functions registered in your `ChatModel` but only those enabled in the prompt request will be considered for the function calling. - -Above user question will trigger 3 calls to `CurrentWeather` function (one for each city) and the final response will be something like this: - ----- -Here is the current weather for the requested cities: -- San Francisco, CA: 30.0°C -- Tokyo, Japan: 10.0°C -- Paris, France: 15.0°C ----- - -The link:https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-minimax/src/test/java/org/springframework/ai/model/minimax/autoconfigure/tool/MiniMaxFunctionCallbackIT.java[MiniMaxFunctionCallbackIT.java] test demo this approach. - - -=== Register/Call Functions with Prompt Options - -In addition to the auto-configuration you can register callback functions, dynamically, with your Prompt requests: - -[source,java] ----- -MiniMaxChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in San Francisco, Tokyo, and Paris?"); - -var promptOptions = MiniMaxChatOptions.builder() - .toolCallbacks(List.of(FunctionToolCallback.builder("CurrentWeather", new MockWeatherService()) // (1) function name and instance - .description("Get the weather in location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function signature - .build())) // function code - .build(); - -ChatResponse response = this.chatModel.call(new Prompt(List.of(this.userMessage), this.promptOptions)); ----- - -NOTE: The in-prompt registered functions are enabled by default for the duration of this request. - -This approach allows to dynamically chose different functions to be called based on the user input. - -The https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-minimax/src/test/java/org/springframework/ai/model/minimax/autoconfigure/FunctionCallbackInPromptIT.java[FunctionCallbackInPromptIT.java] integration test provides a complete example of how to register a function with the `MiniMaxChatModel` and use it in a prompt request. diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/mistralai-chat-functions.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/mistralai-chat-functions.adoc deleted file mode 100644 index d51566f5192..00000000000 --- a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/mistralai-chat-functions.adoc +++ /dev/null @@ -1,203 +0,0 @@ -= Mistral AI Function Calling (Deprecated) - -WARNING: This page describes the previous version of the Function Calling API, which has been deprecated and marked for remove in the next release. The current version is available at xref:api/tools.adoc[Tool Calling]. See the xref:api/tools-migration.adoc[Migration Guide] for more information. - -You can register custom Java functions with the `MistralAiChatModel` and have the Mistral AI models intelligently choose to output a JSON object containing arguments to call one or many of the registered functions. -This allows you to connect the LLM capabilities with external tools and APIs. -The `open-mixtral-8x22b`, `mistral-small-latest`, and `mistral-large-latest` models are trained to detect when a function should be called and to respond with JSON that adheres to the function signature. - -The Mistral AI API does not call the function directly; instead, the model generates JSON that you can use to call the function in your code and return the result back to the model to complete the conversation. - -Spring AI provides flexible and user-friendly ways to register and call custom functions. -In general, the custom functions need to provide a function `name`, `description`, and the function call `signature` (as JSON schema) to let the model know what arguments the function expects. -The `description` helps the model to understand when to call the function. - -As a developer, you need to implement a function that takes the function call arguments sent from the AI model, and responds with the result back to the model. -Your function can in turn invoke other 3rd party services to provide the results. - -Spring AI makes this as easy as defining a `@Bean` definition that returns a `java.util.Function` and supplying the bean name as an option when invoking the `ChatModel`. - -Under the hood, Spring wraps your POJO (the function) with the appropriate adapter code that enables interaction with the AI Model, saving you from writing tedious boilerplate code. -The basis of the underlying infrastructure is the link:https://github.com/spring-projects/spring-ai/blob/main/spring-ai-model/src/main/java/org/springframework/ai/tool/ToolCallback.java[ToolCallback.java] interface and the companion Builder utility class to simplify the implementation and registration of Java callback functions. - -== How it works - -Suppose we want the AI model to respond with information that it does not have, for example, the current temperature at a given location. - -We can provide the AI model with metadata about our own functions that it can use to retrieve that information as it processes your prompt. - -For example, if during the processing of a prompt, the AI Model determines that it needs additional information about the temperature in a given location, it will start a server-side generated request/response interaction. The AI Model invokes a client side function. -The AI Model provides method invocation details as JSON, and it is the responsibility of the client to execute that function and return the response. - -Spring AI greatly simplifies the code you need to write to support function invocation. -It brokers the function invocation conversation for you. -You can simply provide your function definition as a `@Bean` and then provide the bean name of the function in your prompt options. -You can also reference multiple function bean names in your prompt. - -== Quick Start - -Let's create a chatbot that answer questions by calling our own function. -To support the response of the chatbot, we will register our own function that takes a location and returns the current weather in that location. - -When the model needs to answer a question such as `"What’s the weather like in Boston?"` the AI model will invoke the client providing the location value as an argument to be passed to the function. This RPC-like data is passed as JSON. - -Our function calls some SaaS-based weather service API and returns the weather response back to the model to complete the conversation. -In this example, we will use a simple implementation named `MockWeatherService` that hard-codes the temperature for various locations. - -The following `MockWeatherService.java` represents the weather service API: - -[source,java] ----- -public class MockWeatherService implements Function { - - public enum Unit { C, F } - public record Request(String location, Unit unit) {} - public record Response(double temp, Unit unit) {} - - public Response apply(Request request) { - return new Response(30.0, Unit.C); - } -} ----- - -=== Registering Functions as Beans - -With the link:../mistralai-chat.html#_auto_configuration[MistralAiChatModel Auto-Configuration] you have multiple ways to register custom functions as beans in the Spring context. - -We start by describing the most POJO-friendly options. - -==== Plain Java Functions - -In this approach, you define a `@Bean` in your application context as you would any other Spring managed object. - -Internally, Spring AI `ChatModel` will create an instance of a `ToolCallback` that adds the logic for it being invoked via the AI model. -The name of the `@Bean` is passed as a `ChatOption`. - -[source,java] ----- -@Configuration -static class Config { - - @Bean - @Description("Get the weather in location") // function description - public Function currentWeather() { - return new MockWeatherService(); - } - -} ----- - -The `@Description` annotation is optional and provides a function description that helps the model understand when to call the function. -It is an important property to set to help the AI model determine what client side function to invoke. - -Another option for providing the description of the function is to use the `@JsonClassDescription` annotation on the `MockWeatherService.Request`: - -[source,java] ----- -@Configuration -static class Config { - - @Bean - public Function currentWeather() { // bean name as function name - return new MockWeatherService(); - } - -} - -@JsonClassDescription("Get the weather in location") // // function description -public record Request(String location, Unit unit) {} ----- - -It is a best practice to annotate the request object with information such that the generated JSON schema of that function is as descriptive as possible to help the AI model pick the correct function to invoke. - -The link:https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-mistral-ai/src/test/java/org/springframework/ai/model/mistralai/autoconfigure/tool/PaymentStatusBeanIT.java[PaymentStatusBeanIT.java] demonstrates this approach. - -TIP: The Mistral AI link:https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-mistral-ai/src/test/java/org/springframework/ai/model/mistralai/autoconfigure/tool/PaymentStatusBeanOpenAiIT.java[PaymentStatusBeanOpenAiIT] implements the same function using the OpenAI API. -Mistral AI is almost identical to OpenAI in this regard. - -==== ToolCallback Wrapper - -Another way to register a function is to create a `FunctionToolCallback` like this: - -[source,java] ----- -@Configuration -static class Config { - - @Bean - public FunctionToolCallback weatherFunctionInfo() { - - return FunctionToolCallback.builder("CurrentWeather", new MockWeatherService()) // (1) function name and instance - .description("Get the weather in location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function signature - .build(); - } - -} ----- - -It wraps the 3rd party `MockWeatherService` function and registers it as a `CurrentWeather` function with the `MistralAiChatModel`. -It also provides a description (2) and the function signature (3) to let the model know what arguments the function expects. - -NOTE: By default, the response converter performs a JSON serialization of the Response object. - -NOTE: The `FunctionToolCallback` internally resolves the function call signature based on the `MockWeatherService.Request` class. - -=== Specifying functions in Chat Options - -To let the model know and call your `CurrentWeather` function you need to enable it in your prompt requests: - -[source,java] ----- -MistralAiChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in Paris?"); - -ChatResponse response = this.chatModel.call(new Prompt(this.userMessage, - MistralAiChatOptions.builder().tools("CurrentWeather").build())); // Enable the function - -logger.info("Response: {}", response); ----- - -// NOTE: You can have multiple functions registered in your `ChatModel` but only those enabled in the prompt request will be considered for the function calling. - -The above user question will trigger 3 calls to the `CurrentWeather` function (one for each city) and the final response will be something like this: - -=== Register/Call Functions with Prompt Options - -In addition to the auto-configuration, you can register callback functions, dynamically, with your `Prompt` requests: - -[source,java] ----- -MistralAiChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in Paris?"); - -var promptOptions = MistralAiChatOptions.builder() - .toolCallbacks(List.of(FunctionToolCallback.builder("CurrentWeather", new MockWeatherService()) // (1) function name and instance - .description("Get the weather in location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function signature - .build())) // function code - .build(); - -ChatResponse response = this.chatModel.call(new Prompt(this.userMessage, this.promptOptions)); ----- - -NOTE: The in-prompt registered functions are enabled by default for the duration of this request. - -This approach allows to choose dynamically different functions to be called based on the user input. - -The https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-mistral-ai/src/test/java/org/springframework/ai/model/mistralai/autoconfigure/tool/PaymentStatusPromptIT.java[PaymentStatusPromptIT.java] integration test provides a complete example of how to register a function with the `MistralAiChatModel` and use it in a prompt request. - -== Appendices - -=== https://spring.io/blog/2024/03/06/function-calling-in-java-and-spring-ai-using-the-latest-mistral-ai-api[(Blog) Function Calling in Java and Spring AI using the latest Mistral AI API] - -=== Mistral AI API Function Calling Flow - -The following diagram illustrates the flow of the Mistral AI low-level API for link:https://docs.mistral.ai/guides/function-calling[Function Calling]: - -image:mistral-ai-function-calling-flow.jpg[title="Mistral AI API Function Calling Flow", width=800, link=https://docs.mistral.ai/guides/function-calling] - -The link:https://github.com/spring-projects/spring-ai/blob/main/models/spring-ai-mistral-ai/src/test/java/org/springframework/ai/mistralai/api/tool/PaymentStatusFunctionCallingIT.java[PaymentStatusFunctionCallingIT.java] provides a complete example on how to use the Mistral AI API function calling. -It is based on the https://docs.mistral.ai/guides/function-calling[Mistral AI Function Calling tutorial]. diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/moonshot-chat-functions.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/moonshot-chat-functions.adoc deleted file mode 100644 index 5aaa94ca6cf..00000000000 --- a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/moonshot-chat-functions.adoc +++ /dev/null @@ -1,5 +0,0 @@ -= Function Calling - -This functionality has been moved to the Spring AI Community repository. - -Please visit https://github.com/spring-ai-community/moonshot for the latest version. \ No newline at end of file diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/ollama-chat-functions.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/ollama-chat-functions.adoc deleted file mode 100644 index 8f11cf7a1fb..00000000000 --- a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/ollama-chat-functions.adoc +++ /dev/null @@ -1,217 +0,0 @@ -= Ollama Function Calling (Deprecated) - -WARNING: This page describes the previous version of the Function Calling API, which has been deprecated and marked for remove in the next release. The current version is available at xref:api/tools.adoc[Tool Calling]. See the xref:api/tools-migration.adoc[Migration Guide] for more information. - -TIP: You need Ollama 0.2.8 or newer to use the functional calling capabilities and Ollama 0.4.6 or newer to use them in streaming mode. - -TIP: You need https://ollama.com/search?c=tools[Models] pre-trained for Tools support. -Usually, such models are tagged with a `Tools` tag. - - -You can register custom Java functions with the `OllamaChatModel` and have the Ollama deployed model intelligently choose to output a JSON object containing arguments to call one or many of the registered functions. -This allows you to connect the LLM capabilities with external tools and APIs. -The Ollama models tagged with the `Tools` label (see https://ollama.com/search?c=tools[full list]) are trained to detect when a function should be called and to respond with JSON that adheres to the function signature. - -The Ollama API does not call the function directly; instead, the model generates JSON that you can use to call the function in your code and return the result back to the model to complete the conversation. -Spring AI provides flexible and user-friendly ways to register and call custom functions. -In general, the custom functions need to provide a function `name`, `description`, and the function call `signature` (as JSON schema) to let the model know what arguments the function expects. -The `description` helps the model to understand when to call the function. - -As a developer, you need to implement a function that takes the function call arguments sent from the AI model, and responds with the result back to the model. -Your function can in turn invoke other 3rd party services to provide the results. - -Spring AI makes this as easy as defining a `@Bean` definition that returns a `java.util.Function` and supplying the bean name as an option when invoking the `ChatModel`. - -Under the hood, Spring wraps your POJO (the function) with the appropriate adapter code that enables interaction with the AI Model, saving you from writing tedious boilerplate code. -The basis of the underlying infrastructure is the link:https://github.com/spring-projects/spring-ai/blob/main/spring-ai-model/src/main/java/org/springframework/ai/tool/ToolCallback.java[ToolCallback.java] interface and the companion Builder utility class to simplify the implementation and registration of Java callback functions. - -== How it works - -Suppose we want the AI model to respond with information that it does not have, for example, the current temperature at a given location. - -We can provide the AI model with metadata about our own functions that it can use to retrieve that information as it processes your prompt. - -For example, if during the processing of a prompt, the AI Model determines that it needs additional information about the temperature in a given location, it will start a server-side generated request/response interaction. The AI Model invokes a client side function. -The AI Model provides method invocation details as JSON, and it is the responsibility of the client to execute that function and return the response. - -The model-client interaction is illustrated in the <> diagram. - -Spring AI greatly simplifies the code you need to write to support function invocation. -It brokers the function invocation conversation for you. -You can simply provide your function definition as a `@Bean` and then provide the bean name of the function in your prompt options. -You can also reference multiple function bean names in your prompt. - -== Quick Start - -Let's create a chatbot that answer questions by calling our own function. -To support the response of the chatbot, we will register our own function that takes a location and returns the current weather in that location. - -When the model needs to answer a question such as `"What’s the weather like in Boston?"` the AI model will invoke the client providing -the location value as an argument to be passed to the function. This RPC-like data is passed as JSON. - -Our function calls some SaaS based weather service API and returns the weather response back to the model to complete the conversation. -In this example, we will use a simple implementation named `MockWeatherService` that hard-codes the temperature for various locations. - -The following `MockWeatherService.java` represents the weather service API: - -[source,java] ----- -public class MockWeatherService implements Function { - - public enum Unit { C, F } - public record Request(String location, Unit unit) {} - public record Response(double temp, Unit unit) {} - - public Response apply(Request request) { - return new Response(30.0, Unit.C); - } -} ----- - -=== Registering Functions as Beans - -With the link:../ollama-chat.html#_auto_configuration[OllamaChatModel Auto-Configuration] you have multiple ways to register custom functions as beans in the Spring context. - -We start by describing the most POJO-friendly options. - -==== Plain Java Functions - -In this approach, you define a `@Bean` in your application context as you would any other Spring managed object. - -Internally, Spring AI `ChatModel` will create an instance of a `ToolCallback` that adds the logic for it being invoked via the AI model. -The name of the `@Bean` is passed as a `ChatOption`. - -[source,java] ----- -@Configuration -static class Config { - - @Bean - @Description("Get the weather in location") // function description - public Function currentWeather() { - return new MockWeatherService(); - } - -} ----- - -The `@Description` annotation is optional and provides a function description that helps the model understand when to call the function. It is an important property to set to help the AI model determine what client side function to invoke. - -Another option for providing the description of the function is to use the `@JsonClassDescription` annotation on the `MockWeatherService.Request`: - -[source,java] ----- -@Configuration -static class Config { - - @Bean - public Function currentWeather() { // bean name as function name - return new MockWeatherService(); - } - -} - -@JsonClassDescription("Get the weather in location") // // function description -public record Request(String location, Unit unit) {} ----- - -It is a best practice to annotate the request object with information such that the generated JSON schema of that function is as descriptive as possible to help the AI model pick the correct function to invoke. - -==== ToolCallback - -Another way to register a function is to create a `ToolCallback` like this: - -[source,java] ----- -@Configuration -static class Config { - - @Bean - public FunctionToolCallback weatherFunctionInfo() { - - return FunctionToolCallback.builder("CurrentWeather", new MockWeatherService()) // (1) function name - .description("Get the weather in location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function signature - .build(); - } - -} ----- - -It wraps the 3rd party `MockWeatherService` function and registers it as a `CurrentWeather` function with the `OllamaChatModel`. -It also provides a description (2) and the function signature (3) to let the model know what arguments the function expects. - -NOTE: By default, the response converter performs a JSON serialization of the Response object. - -NOTE: The `FunctionToolCallback` internally resolves the function call signature based on the `MockWeatherService.Request` class. - -=== Specifying functions in Chat Options - -To let the model know and call your `CurrentWeather` function you need to enable it in your prompt requests: - -[source,java] ----- -OllamaChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in San Francisco, Tokyo, and Paris?"); - -ChatResponse response = this.chatModel.call(new Prompt(this.userMessage, - OllamaOptions.builder().tools("CurrentWeather").build())); // Enable the function - -logger.info("Response: {}", response); ----- - -// NOTE: You can have multiple functions registered in your `ChatModel` but only those enabled in the prompt request will be considered for the function calling. - -The above user question will trigger 3 calls to the `CurrentWeather` function (one for each city) and the final response will be something like this: - ----- -Here is the current weather for the requested cities: -- San Francisco, CA: 30.0°C -- Tokyo, Japan: 10.0°C -- Paris, France: 15.0°C ----- - -The link:https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-ollama/src/test/java/org/springframework/ai/model/ollama/autoconfigure/tool/OllamaFunctionCallbackIT.java[OllamaFunctionCallbackIT.java] test demo this approach. - -=== Register/Call Functions with Prompt Options - -In addition to the auto-configuration, you can register callback functions, dynamically, with your `Prompt` requests: - -[source,java] ----- -OllamaChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in San Francisco, Tokyo, and Paris?"); - -var promptOptions = OllamaOptions.builder() - .toolCallbacks(List.of(FunctionToolCallback.builder("CurrentWeather", new MockWeatherService()) // (1) function name and instance - .description("Get the weather in location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function signature - .build())) // function code - .build(); - -ChatResponse response = this.chatModel.call(new Prompt(this.userMessage, this.promptOptions)); ----- - -NOTE: The in-prompt registered functions are enabled by default for the duration of this request. - -This approach allows you to dynamically choose different functions to be called based on the user input. - -The link:https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-ollama/src/test/java/org/springframework/ai/model/ollama/autoconfigure/tool/auto-configurations/models/spring-ai-autoconfigure-model-ollama/src/test/java/org/springframework/ai/model/ollama/autoconfigure/tool/FunctionCallbackInPromptIT.java[FunctionCallbackInPromptIT.java] integration test provides a complete example of how to register a function with the `OllamaChatModel` and use it in a prompt request. - -== Appendices: - -=== Spring AI Function Calling Flow [[spring-ai-function-calling-flow]] - -The following diagram illustrates the flow of the `OllamaChatModel` Function Calling: - -image:ollama-chatmodel-function-call.jpg[width=800, title="OllamaChatModel Function Calling Flow"] - -=== OllamaAPI Function Calling Flow - -The following diagram illustrates the flow of the Ollama API: - -image:ollama-function-calling-flow.jpg[title="Ollama API Function Calling Flow", width=800] - -The link:https://github.com/spring-projects/spring-ai/blob/main/models/spring-ai-ollama/src/test/java/org/springframework/ai/ollama/api/tool/OllamaApiToolFunctionCallIT.java[OllamaApiToolFunctionCallIT.java] provides a complete example on how to use the Ollama API function calling. diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/openai-chat-functions.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/openai-chat-functions.adoc deleted file mode 100644 index 7beee879308..00000000000 --- a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/openai-chat-functions.adoc +++ /dev/null @@ -1,268 +0,0 @@ -= Function Calling - -WARNING: This page describes the previous version of the Function Calling API, which has been deprecated and marked for remove in the next release. The current version is available at xref:api/tools.adoc[Tool Calling]. See the xref:api/tools-migration.adoc[Migration Guide] for more information. - -You can register custom Java functions with the `OpenAiChatModel` and have the OpenAI model intelligently choose to output a JSON object containing arguments to call one or many of the registered functions. -This allows you to connect the LLM capabilities with external tools and APIs. -The OpenAI models are trained to detect when a function should be called and to respond with JSON that adheres to the function signature. - -The OpenAI API does not call the function directly; instead, the model generates JSON that you can use to call the function in your code and return the result back to the model to complete the conversation. - -Spring AI provides flexible and user-friendly ways to register and call custom functions. -In general, the custom functions need to provide a function `name`, `description`, and the function call `signature` (as JSON schema) to let the model know what arguments the function expects. The `description` helps the model to understand when to call the function. - -As a developer, you need to implement a function that takes the function call arguments sent from the AI model, and responds with the result back to the model. Your function can in turn invoke other 3rd party services to provide the results. - -Spring AI makes this as easy as defining a `@Bean` definition that returns a `java.util.Function` and supplying the bean name as an option when invoking the `ChatModel`. - -Under the hood, Spring wraps your POJO (the function) with the appropriate adapter code that enables interaction with the AI Model, saving you from writing tedious boilerplate code. -The basis of the underlying infrastructure is the link:https://github.com/spring-projects/spring-ai/blob/main/spring-ai-model/src/main/java/org/springframework/ai/tool/ToolCallback.java[ToolCallback.java] interface and the companion Builder utility class to simplify the implementation and registration of Java callback functions. - -// Additionally, the Auto-Configuration provides a way to auto-register any Function beans definition as function calling candidates in the `ChatModel`. - -== How it works - -Suppose we want the AI model to respond with information that it does not have, for example, the current temperature at a given location. - -We can provide the AI model with metadata about our own functions that it can use to retrieve that information as it processes your prompt. - -For example, if during the processing of a prompt, the AI Model determines that it needs additional information about the temperature in a given location, it will start a server-side generated request/response interaction. The AI Model invokes a client side function. -The AI Model provides method invocation details as JSON, and it is the responsibility of the client to execute that function and return the response. - -The model-client interaction is illustrated in the <> diagram. - -Spring AI greatly simplifies the code you need to write to support function invocation. -It brokers the function invocation conversation for you. -You can simply provide your function definition as a `@Bean` and then provide the bean name of the function in your prompt options. -You can also reference multiple function bean names in your prompt. - -== Quick Start - -Let's create a chatbot that answer questions by calling our own function. -To support the response of the chatbot, we will register our own function that takes a location and returns the current weather in that location. - -When the model needs to answer a question such as `"What’s the weather like in Boston?"` the AI model will invoke the client providing the location value as an argument to be passed to the function. This RPC-like data is passed as JSON. - -Our function calls some SaaS-based weather service API and returns the weather response back to the model to complete the conversation. In this example, we will use a simple implementation named `MockWeatherService` that hard-codes the temperature for various locations. - -The following `MockWeatherService.java` represents the weather service API: - -[source,java] ----- -public class MockWeatherService implements Function { - - public enum Unit { C, F } - public record Request(String location, Unit unit) {} - public record Response(double temp, Unit unit) {} - - public Response apply(Request request) { - return new Response(30.0, Unit.C); - } -} ----- - -=== Registering Functions as Beans - -With the link:../openai-chat.html#_auto_configuration[OpenAiChatModel Auto-Configuration] you have multiple ways to register custom functions as beans in the Spring context. - -We start by describing the most POJO-friendly options. - -==== Plain Java Functions - -In this approach, you define a `@Bean` in your application context as you would any other Spring managed object. - -Internally, Spring AI `ChatModel` will create an instance of a `ToolCallback` that adds the logic for it being invoked via the AI model. -The name of the `@Bean` is passed as a `ChatOption`. - -[source,java] ----- -@Configuration -static class Config { - - @Bean - @Description("Get the weather in location") // function description - public Function currentWeather() { - return new MockWeatherService(); - } - -} ----- - -The `@Description` annotation is optional and provides a function description that helps the model understand when to call the function. It is an important property to set to help the AI model determine what client side function to invoke. - -Another option for providing the description of the function is to use the `@JsonClassDescription` annotation on the `MockWeatherService.Request`: - -[source,java] ----- -@Configuration -static class Config { - - @Bean - public Function currentWeather() { // bean name as function name - return new MockWeatherService(); - } - -} - -@JsonClassDescription("Get the weather in location") // // function description -public record Request(String location, Unit unit) {} ----- - -It is a best practice to annotate the request object with information such that the generated JSON schema of that function is as descriptive as possible to help the AI model pick the correct function to invoke. - -The link:https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-openai/src/test/java/org/springframework/ai/model/openai/autoconfigure/tool/FunctionCallbackWithPlainFunctionBeanIT.java[FunctionCallbackWithPlainFunctionBeanIT.java] demonstrates this approach. - -==== FunctionToolCallback Wrapper - -Another way to register a function is to create a `toolCallback` like this: - -[source,java] ----- -@Configuration -static class Config { - - @Bean - public FunctionToolCallback weatherFunctionInfo() { - - return FunctionToolCallback.builder("CurrentWeather", new MockWeatherService()) // (1) function name and instance - .description("Get the weather in location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function input type - .build(); - } - -} ----- - -It wraps the 3rd party `MockWeatherService` function and registers it as a `CurrentWeather` function with the `OpenAiChatModel`. -It also provides a description (2) and an input type (3) used to generate the JSON schema for the function call. - -NOTE: By default, the response converter performs a JSON serialization of the Response object. - -NOTE: The `FunctionToolCallback` internally resolves the function call signature based on the `MockWeatherService.Request` class. - -=== Specifying functions in Chat Options - -To let the model know and call your `CurrentWeather` function you need to enable it in your prompt requests: - -[source,java] ----- -OpenAiChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in San Francisco, Tokyo, and Paris?"); - -ChatResponse response = this.chatModel.call(new Prompt(this.userMessage, - OpenAiChatOptions.builder().tools("CurrentWeather").build())); // Enable the function - -logger.info("Response: {}", response); ----- - -// NOTE: You can have multiple functions registered in your `ChatModel` but only those enabled in the prompt request will be considered for the function calling. - -The above user question will trigger 3 calls to the `CurrentWeather` function (one for each city) and the final response will be something like this: - ----- -Here is the current weather for the requested cities: -- San Francisco, CA: 30.0°C -- Tokyo, Japan: 10.0°C -- Paris, France: 15.0°C ----- - -The link:https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-openai/src/test/java/org/springframework/ai/model/openai/autoconfigure/tool/OpenAiFunctionCallbackIT.java[OpenAiFunctionCallbackIT.java] test demo this approach. - -=== Register/Call Functions with Prompt Options - -In addition to the auto-configuration, you can register callback functions, dynamically, with your `Prompt` requests: - -[source,java] ----- -OpenAiChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in San Francisco, Tokyo, and Paris?"); - -var promptOptions = OpenAiChatOptions.builder() - .toolCallbacks(List.of(FunctionToolCallback.builder("CurrentWeather", new MockWeatherService()) // (1) function name and instance - .description("Get the weather in location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function input type - .build())) // function code - .build(); - -ChatResponse response = this.chatModel.call(new Prompt(this.userMessage, this.promptOptions)); ----- - -NOTE: The in-prompt registered functions are enabled by default for the duration of this request. - -This approach allows to choose dynamically different functions to be called based on the user input. - -The https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-openai/src/test/java/org/springframework/ai/model/openai/autoconfigure/tool/FunctionCallbackInPromptIT.java[FunctionCallbackInPromptIT.java] integration test provides a complete example of how to register a function with the `OpenAiChatModel` and use it in a prompt request. - - -=== Tool Context Support - -Spring AI now supports passing additional contextual information to function callbacks through a tool context. This feature allows you to provide extra data that can be used within the function execution, enhancing the flexibility and power of function calling. - -The context information that is passed in as the second argument of a `java.util.BiFunction`. The `ToolContext` contains as an immutable `Map` allowing you to access key-value pairs. - -==== How to Use Tool Context - -You can set the tool context when building your chat options and use a BiFunction for your callback: - -[source,java] ----- -BiFunction weatherFunction = - (request, toolContext) -> { - String sessionId = (String) toolContext.getContext().get("sessionId"); - String userId = (String) toolContext.getContext().get("userId"); - - // Use sessionId and userId in your function logic - double temperature = 0; - if (request.location().contains("Paris")) { - temperature = 15; - } - else if (request.location().contains("Tokyo")) { - temperature = 10; - } - else if (request.location().contains("San Francisco")) { - temperature = 30; - } - - return new MockWeatherService.Response(temperature, 15, 20, 2, 53, 45, MockWeatherService.Unit.C); - }; - -OpenAiChatOptions options = OpenAiChatOptions.builder() - .model(OpenAiApi.ChatModel.GPT_4_O.getValue()) - .toolCallbacks(List.of(FunctionToolCallback.builder("getCurrentWeather", this.weatherFunction) - .description("Get the weather in location") - .inputType(MockWeatherService.Request.class) - .build())) - .toolContext(Map.of("sessionId", "123", "userId", "user456")) - .build(); ----- - -In this example, the `weatherFunction` is defined as a BiFunction that takes both the request and the tool context as parameters. This allows you to access the context directly within the function logic. - -You can then use these options when making a call to the chat model: - -[source,java] ----- -UserMessage userMessage = new UserMessage("What's the weather like in San Francisco, Tokyo, and Paris?"); -ChatResponse response = chatModel.call(new Prompt(List.of(this.userMessage), options)); ----- - -This approach allows you to pass session-specific or user-specific information to your functions, enabling more contextual and personalized responses. - -== Appendices: - -=== Spring AI Function Calling Flow [[spring-ai-function-calling-flow]] - -The following diagram illustrates the flow of the `OpenAiChatModel` Function Calling: - -image:openai-chatclient-function-call.jpg[width=800, title="OpenAiChatModel Function Calling Flow"] - -=== OpenAI API Function Calling Flow - -The following diagram illustrates the flow of the OpenAI API https://platform.openai.com/docs/guides/function-calling[Function Calling]: - -image:openai-function-calling-flow.jpg[title="OpenAI API Function Calling Flow", width=800, link=https://platform.openai.com/docs/guides/function-calling] - -The link:https://github.com/spring-projects/spring-ai/blob/main/models/spring-ai-openai/src/test/java/org/springframework/ai/openai/api/tool/OpenAiApiToolFunctionCallIT.java[OpenAiApiToolFunctionCallIT.java] provides a complete example on how to use the OpenAI API function calling. -It is based on the https://platform.openai.com/docs/guides/function-calling/parallel-function-calling[OpenAI Function Calling tutorial]. diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/zhipuai-chat-functions.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/zhipuai-chat-functions.adoc deleted file mode 100644 index 61dec956903..00000000000 --- a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/functions/zhipuai-chat-functions.adoc +++ /dev/null @@ -1,200 +0,0 @@ -= Function Calling - -You can register custom Java functions with the `ZhiPuAiChatModel` and have the ZhiPuAI model intelligently choose to output a JSON object containing arguments to call one or many of the registered functions. -This allows you to connect the LLM capabilities with external tools and APIs. -The ZhiPuAI models are trained to detect when a function should be called and to respond with JSON that adheres to the function signature. - -The ZhiPuAI API does not call the function directly; instead, the model generates JSON that you can use to call the function in your code and return the result back to the model to complete the conversation. - -Spring AI provides flexible and user-friendly ways to register and call custom functions. -In general, the custom functions need to provide a function `name`, `description`, and the function call `signature` (as JSON schema) to let the model know what arguments the function expects. The `description` helps the model to understand when to call the function. - -As a developer, you need to implement a function that takes the function call arguments sent from the AI model, and responds with the result back to the model. Your function can in turn invoke other 3rd party services to provide the results. - -Spring AI makes this as easy as defining a `@Bean` definition that returns a `java.util.Function` and supplying the bean name as an option when invoking the `ChatModel`. - -Under the hood, Spring wraps your POJO (the function) with the appropriate adapter code that enables interaction with the AI Model, saving you from writing tedious boilerplate code. -The basis of the underlying infrastructure is the link:https://github.com/spring-projects/spring-ai/blob/main/spring-ai-model/src/main/java/org/springframework/ai/tool/ToolCallback.java[ToolCallback.java] interface and the companion Builder utility class to simplify the implementation and registration of Java callback functions. - -// Additionally, the Auto-Configuration provides a way to auto-register any Function beans definition as function calling candidates in the `ChatModel`. - - -== How it works - -Suppose we want the AI model to respond with information that it does not have, for example the current temperature at a given location. - -We can provide the AI model with metadata about our own functions that it can use to retrieve that information as it processes your prompt. - -For example, if during the processing of a prompt, the AI Model determines that it needs additional information about the temperature in a given location, it will start a server side generated request/response interaction. The AI Model invokes a client side function. -The AI Model provides method invocation details as JSON and it is the responsibility of the client to execute that function and return the response. - -The model-client interaction is illustrated in the <> diagram. - -Spring AI greatly simplifies code you need to write to support function invocation. -It brokers the function invocation conversation for you. -You can simply provide your function definition as a `@Bean` and then provide the bean name of the function in your prompt options. -You can also reference multiple function bean names in your prompt. - -== Quick Start - -Let's create a chatbot that answer questions by calling our own function. -To support the response of the chatbot, we will register our own function that takes a location and returns the current weather in that location. - -When the response to the prompt to the model needs to answer a question such as `"What’s the weather like in Boston?"` the AI model will invoke the client providing the location value as an argument to be passed to the function. This RPC-like data is passed as JSON. - -Our function calls some SaaS based weather service API and returns the weather response back to the model to complete the conversation. In this example we will use a simple implementation named `MockWeatherService` that hard codes the temperature for various locations. - -The following `MockWeatherService.java` represents the weather service API: - -[source,java] ----- -public class MockWeatherService implements Function { - - public enum Unit { C, F } - public record Request(String location, Unit unit) {} - public record Response(double temp, Unit unit) {} - - public Response apply(Request request) { - return new Response(30.0, Unit.C); - } -} ----- - -=== Registering Functions as Beans - -With the link:../zhipuai-chat.html#_auto_configuration[ZhiPuAiChatModel Auto-Configuration] you have multiple ways to register custom functions as beans in the Spring context. - -We start with describing the most POJO friendly options. - - -==== Plain Java Functions - -In this approach you define `@Beans` in your application context as you would any other Spring managed object. - -Internally, Spring AI `ChatModel` will create an instance of a `ToolCallback` instance that adds the logic for it being invoked via the AI model. -The name of the `@Bean` is passed as a `ChatOption`. - - -[source,java] ----- -@Configuration -static class Config { - - @Bean - @Description("Get the weather in location") // function description - public Function weatherFunction1() { - return new MockWeatherService(); - } - ... -} ----- - -The `@Description` annotation is optional and provides a function description (2) that helps the model to understand when to call the function. It is an important property to set to help the AI model determine what client side function to invoke. - -Another option to provide the description of the function is to the `@JacksonDescription` annotation on the `MockWeatherService.Request` to provide the function description: - -[source,java] ----- - -@Configuration -static class Config { - - @Bean - public Function currentWeather3() { // (1) bean name as function name. - return new MockWeatherService(); - } - ... -} - -@JsonClassDescription("Get the weather in location") // (2) function description -public record Request(String location, Unit unit) {} ----- - -It is a best practice to annotate the request object with information such that the generates JSON schema of that function is as descriptive as possible to help the AI model pick the correct function to invoke. - -The link:https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-zhipuai/src/test/java/org/springframework/ai/model/zhipuai/autoconfigure/tool/FunctionCallbackWithPlainFunctionBeanIT.java[FunctionCallbackWithPlainFunctionBeanIT.java] demonstrates this approach. - - -==== FunctionToolCallback Wrapper - -Another way register a function is to create `ToolCallback` instance like this: - -[source,java] ----- -@Configuration -static class Config { - - @Bean - public FunctionToolCallback weatherFunctionInfo() { - - return FunctionToolCallback.builder("CurrentWeather", new MockWeatherService()) // (1) function name and instance - .description("Get the weather in location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function signature - .build(); - } - ... -} ----- - -It wraps the 3rd party, `MockWeatherService` function and registers it as a `CurrentWeather` function with the `ZhiPuAiChatModel`. -It also provides a description (2) and the input type (3) used to generate the JSON schema for the function call. - -NOTE: By default, the response converter does a JSON serialization of the Response object. - -NOTE: The `FunctionToolCallback` internally resolves the function call signature based on the `MockWeatherService.Request` class. - -=== Specifying functions in Chat Options - -To let the model know and call your `CurrentWeather` function you need to enable it in your prompt requests: - -[source,java] ----- -ZhiPuAiChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in San Francisco, Tokyo, and Paris?"); - -ChatResponse response = this.chatModel.call(new Prompt(List.of(this.userMessage), - ZhiPuAiChatOptions.builder().function("CurrentWeather").build())); // (1) Enable the function - -logger.info("Response: {}", response); ----- - -// NOTE: You can can have multiple functions registered in your `ChatModel` but only those enabled in the prompt request will be considered for the function calling. - -Above user question will trigger 3 calls to `CurrentWeather` function (one for each city) and the final response will be something like this: - ----- -Here is the current weather for the requested cities: -- San Francisco, CA: 30.0°C -- Tokyo, Japan: 10.0°C -- Paris, France: 15.0°C ----- - -The link:https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-zhipuai/src/test/java/org/springframework/ai/model/zhipuai/autoconfigure/tool/ZhipuAiFunctionCallbackIT.java[ZhipuAiFunctionCallbackIT.java] test demo this approach. - - -=== Register/Call Functions with Prompt Options - -In addition to the auto-configuration you can register callback functions, dynamically, with your Prompt requests: - -[source,java] ----- -ZhiPuAiChatModel chatModel = ... - -UserMessage userMessage = new UserMessage("What's the weather like in San Francisco, Tokyo, and Paris?"); - -var promptOptions = ZhiPuAiChatOptions.builder() - .toolCallbacks(List.of(FunctionToolCallback.builder("CurrentWeather", new MockWeatherService()) // (1) function name and instance - .description("Get the weather in location") // (2) function description - .inputType(MockWeatherService.Request.class) // (3) function signature - .build())) // function code - .build(); - -ChatResponse response = this.chatModel.call(new Prompt(List.of(this.userMessage), this.promptOptions)); ----- - -NOTE: The in-prompt registered functions are enabled by default for the duration of this request. - -This approach allows to dynamically chose different functions to be called based on the user input. - -The https://github.com/spring-projects/spring-ai/blob/main/auto-configurations/models/spring-ai-autoconfigure-model-zhipuai/src/test/java/org/springframework/ai/model/zhipuai/autoconfigure/tool/FunctionCallbackInPromptIT.java[FunctionCallbackInPromptIT.java] integration test provides a complete example of how to register a function with the `ZhiPuAiChatModel` and use it in a prompt request. diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/mistralai-chat.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/mistralai-chat.adoc index 1e4f7794321..8d589e220e1 100644 --- a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/mistralai-chat.adoc +++ b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/mistralai-chat.adoc @@ -156,7 +156,7 @@ TIP: In addition to the model specific link:https://github.com/spring-projects/s You can register custom Java functions with the `MistralAiChatModel` and have the Mistral AI model intelligently choose to output a JSON object containing arguments to call one or many of the registered functions. This is a powerful technique to connect the LLM capabilities with external tools and APIs. -Read more about xref:api/chat/functions/mistralai-chat-functions.adoc[Mistral AI Function Calling]. +Read more about xref:api/tools.adoc[Tool Calling]. == Multimodal diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/ollama-chat.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/ollama-chat.adoc index 44ac458bda3..2fa76b994c9 100644 --- a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/ollama-chat.adoc +++ b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/ollama-chat.adoc @@ -245,7 +245,7 @@ This configuration will apply the pulling strategy to all models except chat mod You can register custom Java functions with the `OllamaChatModel` and have the Ollama model intelligently choose to output a JSON object containing arguments to call one or many of the registered functions. This is a powerful technique to connect the LLM capabilities with external tools and APIs. -Read more about xref:api/chat/functions/ollama-chat-functions.adoc[Ollama Function Calling]. +Read more about xref:api/tools.adoc[Tool Calling]. TIP: You need Ollama 0.2.8 or newer to use the functional calling capabilities and Ollama 0.4.6 or newer to use them in streaming mode. diff --git a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/openai-chat.adoc b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/openai-chat.adoc index edfb9a1d1c8..4496b697f18 100644 --- a/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/openai-chat.adoc +++ b/spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/openai-chat.adoc @@ -184,7 +184,7 @@ TIP: In addition to the model specific https://github.com/spring-projects/spring You can register custom Java functions with the `OpenAiChatModel` and have the OpenAI model intelligently choose to output a JSON object containing arguments to call one or many of the registered functions. This is a powerful technique to connect the LLM capabilities with external tools and APIs. -Read more about xref:api/chat/functions/openai-chat-functions.adoc[OpenAI Function Calling]. +Read more about xref:api/tools.adoc[Tool Calling]. == Multimodal