diff --git a/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/java/CommonSnippets.java b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/java/CommonSnippets.java new file mode 100644 index 00000000..d05954c6 --- /dev/null +++ b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/java/CommonSnippets.java @@ -0,0 +1,269 @@ +package com.google.firebase.example.ailogic.java; + +import androidx.lifecycle.ViewModel; + +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.firebase.ai.FirebaseAI; +import com.google.firebase.ai.GenerativeModel; +import com.google.firebase.ai.java.ChatFutures; +import com.google.firebase.ai.java.GenerativeModelFutures; +import com.google.firebase.ai.type.Content; +import com.google.firebase.ai.type.FunctionCallPart; +import com.google.firebase.ai.type.FunctionResponsePart; +import com.google.firebase.ai.type.GenerateContentResponse; +import com.google.firebase.ai.type.Schema; +import com.google.firebase.ai.type.TextPart; +import com.google.firebase.ai.type.Tool; +import com.google.firebase.ai.type.FunctionDeclaration; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; + +import kotlinx.serialization.json.JsonElement; +import kotlinx.serialization.json.JsonElementKt; +import kotlinx.serialization.json.JsonObject; +import kotlinx.serialization.json.JsonPrimitive; + +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + +public class CommonSnippets extends ViewModel { + private Executor executor; + private GenerativeModel ai = FirebaseAI.getInstance().generativeModel("gemini-2.5-flash"); + private GenerativeModelFutures model = GenerativeModelFutures.from(ai); + + public void chat() { + chatNonStreaming(); + chatStreaming(); + } + + void chatNonStreaming() { + // [START chat_non_streaming] + // (optional) Create previous chat history for context + Content.Builder userContentBuilder = new Content.Builder(); + userContentBuilder.setRole("user"); + userContentBuilder.addText("Hello, I have 2 dogs in my house."); + Content userContent = userContentBuilder.build(); + + Content.Builder modelContentBuilder = new Content.Builder(); + modelContentBuilder.setRole("model"); + modelContentBuilder.addText("Great to meet you. What would you like to know?"); + Content modelContent = userContentBuilder.build(); + + List history = Arrays.asList(userContent, modelContent); + + // Initialize the chat + ChatFutures chat = model.startChat(history); + + // Create a new user message + Content.Builder messageBuilder = new Content.Builder(); + messageBuilder.setRole("user"); + messageBuilder.addText("How many paws are in my house?"); + + Content message = messageBuilder.build(); + + // Send the message + ListenableFuture response = chat.sendMessage(message); + Futures.addCallback( + response, + new FutureCallback() { + @Override + public void onSuccess(GenerateContentResponse result) { + String resultText = result.getText(); + System.out.println(resultText); + } + + @Override + public void onFailure(Throwable t) { + t.printStackTrace(); + } + }, + executor); + // [END chat_non_streaming] + } + + void chatStreaming() { + // [START chat_streaming] + // (optional) Create previous chat history for context + Content.Builder userContentBuilder = new Content.Builder(); + userContentBuilder.setRole("user"); + userContentBuilder.addText("Hello, I have 2 dogs in my house."); + Content userContent = userContentBuilder.build(); + + Content.Builder modelContentBuilder = new Content.Builder(); + modelContentBuilder.setRole("model"); + modelContentBuilder.addText("Great to meet you. What would you like to know?"); + Content modelContent = userContentBuilder.build(); + + List history = Arrays.asList(userContent, modelContent); + + // Initialize the chat + ChatFutures chat = model.startChat(history); + + // Create a new user message + Content.Builder messageBuilder = new Content.Builder(); + messageBuilder.setRole("user"); + messageBuilder.addText("How many paws are in my house?"); + + Content message = messageBuilder.build(); + + // Send the message + Publisher streamingResponse = chat.sendMessageStream(message); + + final String[] fullResponse = {""}; + + streamingResponse.subscribe( + new Subscriber() { + + @Override + public void onNext(GenerateContentResponse generateContentResponse) { + String chunk = generateContentResponse.getText(); + fullResponse[0] += chunk; + } + + @Override + public void onComplete() { + System.out.println(fullResponse[0]); + } + + // ... other methods omitted for brevity + + // [START_EXCLUDE] + @Override + public void onSubscribe(Subscription s) { + } + + @Override + public void onError(Throwable t) { + } + + // [END_EXCLUDE] + }); + // [END chat_streaming] + } + + public void functionCalling() { + // [START function_calling_create_function_declaration] + FunctionDeclaration fetchWeatherTool = + new FunctionDeclaration( + "fetchWeather", + "Get the weather conditions for a specific city on a specific date.", + Map.of( + "location", + Schema.obj( + Map.of( + "city", Schema.str("The city of the location."), + "state", Schema.str("The US state of the location."))), + "date", + Schema.str( + "The date for which to get the weather. " + + "Date must be in the format: YYYY-MM-DD.")), + Collections.emptyList()); + // [END function_calling_create_function_declaration] + + // [START function_calling_generate_function_call] + String prompt = "What was the weather in Boston on October 17, 2024?"; + ChatFutures chatFutures = model.startChat(); + // Send the user's question (the prompt) to the model using multi-turn chat. + ListenableFuture response = + chatFutures.sendMessage(new Content("user", List.of(new TextPart(prompt)))); + + ListenableFuture handleFunctionCallFuture = + Futures.transform( + response, + result -> { + for (FunctionCallPart functionCall : result.getFunctionCalls()) { + if (functionCall.getName().equals("fetchWeather")) { + Map args = functionCall.getArgs(); + JsonObject locationJsonObject = JsonElementKt.getJsonObject(args.get("location")); + String city = + JsonElementKt.getContentOrNull( + JsonElementKt.getJsonPrimitive(locationJsonObject.get("city"))); + String state = + JsonElementKt.getContentOrNull( + JsonElementKt.getJsonPrimitive(locationJsonObject.get("state"))); + Location location = new Location(city, state); + + String date = + JsonElementKt.getContentOrNull( + JsonElementKt.getJsonPrimitive(args.get("date"))); + return fetchWeather(location, date); + } + } + return null; + }, + Executors.newSingleThreadExecutor()); + // [END function_calling_generate_function_call] + + // [START function_calling_pass_back_function_response] + ListenableFuture modelResponseFuture = + Futures.transformAsync( + handleFunctionCallFuture, + // Send the response(s) from the function back to the model + // so that the model can use it to generate its final response. + functionCallResult -> + chatFutures.sendMessage( + new Content( + "function", + List.of(new FunctionResponsePart("fetchWeather", functionCallResult)))), + Executors.newSingleThreadExecutor()); + + Futures.addCallback( + modelResponseFuture, + new FutureCallback() { + @Override + public void onSuccess(GenerateContentResponse result) { + if (result.getText() != null) { + // Log the text response. + System.out.println(result.getText()); + } + } + + @Override + public void onFailure(Throwable t) { + // handle error + } + }, + Executors.newSingleThreadExecutor()); + // [END function_calling_pass_back_function_response] + } + + // [START function_calling_write_function] + // This function calls a hypothetical external API that returns + // a collection of weather information for a given location on a given date. + // `location` is an object of the form { city: string, state: string } + public JsonObject fetchWeather(Location location, String date) { + + // TODO(developer): Write a standard function that would call to an external weather API. + + // For demo purposes, this hypothetical response is hardcoded here in the expected format. + return new JsonObject( + Map.of( + "temperature", + JsonElementKt.JsonPrimitive(38), + "chancePrecipitation", + JsonElementKt.JsonPrimitive("56%"), + "cloudConditions", + JsonElementKt.JsonPrimitive("partlyCloudy") + ) + ); + } + // [END function_calling_write_function] +} + +class Location { + public String city; + public String state; + + public Location(String city, String state) { + this.city = city; + this.state = state; + } +} \ No newline at end of file diff --git a/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/java/GeneralViewModel.java b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/java/GeneralViewModel.java deleted file mode 100644 index b5f47b85..00000000 --- a/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/java/GeneralViewModel.java +++ /dev/null @@ -1,4 +0,0 @@ -package com.google.firebase.example.ailogic.java; - -public class GeneralViewModel { -} diff --git a/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/java/GoogleAISnippets.java b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/java/GoogleAISnippets.java new file mode 100644 index 00000000..eae4b311 --- /dev/null +++ b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/java/GoogleAISnippets.java @@ -0,0 +1,209 @@ +package com.google.firebase.example.ailogic.java; + +import androidx.annotation.OptIn; +import androidx.lifecycle.ViewModel; + +import com.google.firebase.ai.FirebaseAI; +import com.google.firebase.ai.GenerativeModel; +import com.google.firebase.ai.LiveGenerativeModel; +import com.google.firebase.ai.java.GenerativeModelFutures; +import com.google.firebase.ai.java.ImagenModelFutures; +import com.google.firebase.ai.java.LiveModelFutures; +import com.google.firebase.ai.type.Content; +import com.google.firebase.ai.type.FunctionDeclaration; +import com.google.firebase.ai.type.GenerationConfig; +import com.google.firebase.ai.type.GenerativeBackend; +import com.google.firebase.ai.type.HarmBlockThreshold; +import com.google.firebase.ai.type.HarmCategory; +import com.google.firebase.ai.type.ImagenAspectRatio; +import com.google.firebase.ai.type.ImagenGenerationConfig; +import com.google.firebase.ai.type.ImagenImageFormat; +import com.google.firebase.ai.type.LiveGenerationConfig; +import com.google.firebase.ai.type.PublicPreviewAPI; +import com.google.firebase.ai.type.ResponseModality; +import com.google.firebase.ai.type.SafetySetting; +import com.google.firebase.ai.type.SpeechConfig; +import com.google.firebase.ai.type.Tool; +import com.google.firebase.ai.type.Voice; + +import java.util.Collections; +import java.util.List; + +public class GoogleAISnippets extends ViewModel { + void functionCalling(FunctionDeclaration fetchWeatherTool) { + // [START function_calling_specify_declaration_during_init] + // Initialize the Gemini Developer API backend service + // Create a `GenerativeModel` instance with a model that supports your use case + GenerativeModelFutures model = + GenerativeModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.googleAI()) + .generativeModel( + "gemini-2.5-flash", + null, + null, + // Provide the function declaration to the model. + List.of(Tool.functionDeclarations(List.of(fetchWeatherTool))))); + // [END function_calling_specify_declaration_during_init] + } + + public void modelConfiguration_model_parameters_general() { + // [START model_parameters_general] + // ... + + // Set parameter values in a `GenerationConfig` (example values shown here) + GenerationConfig.Builder configBuilder = new GenerationConfig.Builder(); + configBuilder.maxOutputTokens = 200; + configBuilder.stopSequences = List.of("red"); + configBuilder.temperature = 0.9f; + configBuilder.topK = 16; + configBuilder.topP = 0.1f; + + GenerationConfig config = configBuilder.build(); + + // Specify the config as part of creating the `GenerativeModel` instance + GenerativeModelFutures model = + GenerativeModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.googleAI()) + .generativeModel("gemini-2.5-flash", config)); + + // ... + // [END model_parameters_general] + } + + @OptIn(markerClass = PublicPreviewAPI.class) + public void modelConfiguration_model_parameters_imagen() { + // [START model_parameters_imagen] + // ... + + // Set parameter values in a `ImagenGenerationConfig` (example values shown here) + ImagenGenerationConfig config = + new ImagenGenerationConfig.Builder() + .setNegativePrompt("frogs") + .setNumberOfImages(2) + .setAspectRatio(ImagenAspectRatio.LANDSCAPE_16x9) + .setImageFormat(ImagenImageFormat.jpeg(100)) + .setAddWatermark(false) + .build(); + + // Specify the config as part of creating the `ImagenModel` instance + ImagenModelFutures model = + ImagenModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.googleAI()).imagenModel("imagen-4.0-generate-001", config)); + + // ... + // [END model_parameters_imagen] + } + + @OptIn(markerClass = PublicPreviewAPI.class) + public void modelConfiguration_model_parameters_live() { + // [START model_parameters_live] + // ... + + // Set parameter values in a `LiveGenerationConfig` (example values shown here) + LiveGenerationConfig.Builder configBuilder = new LiveGenerationConfig.Builder(); + configBuilder.setMaxOutputTokens(200); + configBuilder.setResponseModality(ResponseModality.AUDIO); + + configBuilder.setSpeechConfig(new SpeechConfig(new Voice("FENRIR"))); + configBuilder.setTemperature(0.9f); + configBuilder.topK = 16; + configBuilder.topP = 0.1f; + + LiveGenerationConfig config = configBuilder.build(); + + // Initialize the Gemini Developer API backend service + // Specify the config as part of creating the `LiveModel` instance + LiveModelFutures model = + LiveModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.googleAI()) + .liveModel("gemini-2.5-flash", config)); + + // ... + // [END model_parameters_live] + } + + @OptIn(markerClass = PublicPreviewAPI.class) + public void modelConfiguration_safety_settings_imagen() { + // [START safety_settings_imagen] + // Specify the safety settings as part of creating the `ImagenModel` instance + ImagenModelFutures model = + ImagenModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.googleAI()) + .imagenModel(/* modelName */ "imagen-4.0-generate-001", /* imageGenerationConfig */ null)); + + // ... + // [END safety_settings_imagen] + } + + public void modelConfiguration_safety_settings_multiple() { + // [START safety_settings_multiple] + SafetySetting harassmentSafety = + new SafetySetting(HarmCategory.HARASSMENT, HarmBlockThreshold.ONLY_HIGH, null); + + SafetySetting hateSpeechSafety = + new SafetySetting(HarmCategory.HATE_SPEECH, HarmBlockThreshold.MEDIUM_AND_ABOVE, null); + + // Specify the safety settings as part of creating the `GenerativeModel` instance + GenerativeModelFutures model = + GenerativeModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.googleAI()) + .generativeModel( + /* modelName */ "gemini-2.5-flash", + /* generationConfig is optional */ null, + List.of(harassmentSafety, hateSpeechSafety))); + + // ... + // [END safety_settings_multiple] + } + + public void modelConfiguration_safety_settings_single() { + // [START safety_settings_single] + SafetySetting harassmentSafety = + new SafetySetting(HarmCategory.HARASSMENT, HarmBlockThreshold.ONLY_HIGH, null); + + // Specify the safety settings as part of creating the `GenerativeModel` instance + GenerativeModelFutures model = + GenerativeModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.googleAI()) + .generativeModel( + /* modelName */ "gemini-2.5-flash", + /* generationConfig is optional */ null, + Collections.singletonList(harassmentSafety))); + + // ... + // [END safety_settings_single] + } + + public void systemInstructions_general() { + // [START system_instructions_general] + // Specify the system instructions as part of creating the `GenerativeModel` instance + GenerativeModel ai = FirebaseAI.getInstance(GenerativeBackend.googleAI()) + .generativeModel( + /* modelName */ "gemini-2.5-flash", + /* generationConfig (optional) */ null, + /* safetySettings (optional) */ null, + /* tools (optional) */ null, + /* toolConfig (optional) */ null, + /* systemInstructions (optional) */ new Content.Builder().addText("You are a cat. Your name is Neko.").build() + ); + + GenerativeModelFutures model = GenerativeModelFutures.from(ai); + // [END system_instructions_general] + } + + @OptIn(markerClass = PublicPreviewAPI.class) + public void systemInstructions_live() { + // [START system_instructions_live] + // Specify the system instructions as part of creating the `LiveModel` instance + LiveGenerativeModel ai = FirebaseAI.getInstance(GenerativeBackend.googleAI()) + .liveModel( + /* modelName */ "gemini-2.5-flash", + /* generationConfig (optional) */ null, + /* tools (optional) */ null, + /* systemInstructions (optional) */ new Content.Builder().addText("You are a cat. Your name is Neko.").build() + ); + + LiveModelFutures model = LiveModelFutures.from(ai); + // [END system_instructions_live] + } +} \ No newline at end of file diff --git a/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/java/VertexAISnippets.java b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/java/VertexAISnippets.java new file mode 100644 index 00000000..0f199d10 --- /dev/null +++ b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/java/VertexAISnippets.java @@ -0,0 +1,210 @@ +package com.google.firebase.example.ailogic.java; + +import androidx.annotation.OptIn; +import androidx.lifecycle.ViewModel; + +import com.google.firebase.ai.FirebaseAI; +import com.google.firebase.ai.GenerativeModel; +import com.google.firebase.ai.LiveGenerativeModel; +import com.google.firebase.ai.java.GenerativeModelFutures; +import com.google.firebase.ai.java.ImagenModelFutures; +import com.google.firebase.ai.java.LiveModelFutures; +import com.google.firebase.ai.type.Content; +import com.google.firebase.ai.type.FunctionDeclaration; +import com.google.firebase.ai.type.GenerationConfig; +import com.google.firebase.ai.type.GenerativeBackend; +import com.google.firebase.ai.type.HarmBlockThreshold; +import com.google.firebase.ai.type.HarmCategory; +import com.google.firebase.ai.type.ImagenAspectRatio; +import com.google.firebase.ai.type.ImagenGenerationConfig; +import com.google.firebase.ai.type.ImagenImageFormat; +import com.google.firebase.ai.type.LiveGenerationConfig; +import com.google.firebase.ai.type.PublicPreviewAPI; +import com.google.firebase.ai.type.ResponseModality; +import com.google.firebase.ai.type.SafetySetting; +import com.google.firebase.ai.type.SpeechConfig; +import com.google.firebase.ai.type.Tool; +import com.google.firebase.ai.type.Voice; + +import java.util.Collections; +import java.util.List; + +public class VertexAISnippets extends ViewModel { + void functionCalling(FunctionDeclaration fetchWeatherTool) { + // [START function_calling_specify_declaration_during_init] + // Initialize the Vertex AI Gemini API backend service + // Optionally specify the location to access the model (`global` is recommended) + // Create a `GenerativeModel` instance with a model that supports your use case + GenerativeModelFutures model = + GenerativeModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.vertexAI("global")) + .generativeModel( + "gemini-2.5-flash", + null, + null, + // Provide the function declaration to the model. + List.of(Tool.functionDeclarations(List.of(fetchWeatherTool))))); + // [END function_calling_specify_declaration_during_init] + } + + public void modelConfiguration_model_parameters_general() { + // [START model_parameters_general] + // ... + + // Set parameter values in a `GenerationConfig` (example values shown here) + GenerationConfig.Builder configBuilder = new GenerationConfig.Builder(); + configBuilder.maxOutputTokens = 200; + configBuilder.stopSequences = List.of("red"); + configBuilder.temperature = 0.9f; + configBuilder.topK = 16; + configBuilder.topP = 0.1f; + + GenerationConfig config = configBuilder.build(); + + // Specify the config as part of creating the `GenerativeModel` instance + GenerativeModelFutures model = + GenerativeModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.vertexAI()) + .generativeModel("gemini-2.5-flash", config)); + + // ... + // [END model_parameters_general] + } + + @OptIn(markerClass = PublicPreviewAPI.class) + public void modelConfiguration_model_parameters_imagen() { + // [START model_parameters_imagen] + // ... + + // Set parameter values in a `ImagenGenerationConfig` (example values shown here) + ImagenGenerationConfig config = + new ImagenGenerationConfig.Builder() + .setNegativePrompt("frogs") + .setNumberOfImages(2) + .setAspectRatio(ImagenAspectRatio.LANDSCAPE_16x9) + .setImageFormat(ImagenImageFormat.jpeg(100)) + .setAddWatermark(false) + .build(); + + // Specify the config as part of creating the `ImagenModel` instance + ImagenModelFutures model = + ImagenModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.vertexAI()).imagenModel("imagen-4.0-generate-001", config)); + + // ... + // [END model_parameters_imagen] + } + + @OptIn(markerClass = PublicPreviewAPI.class) + public void modelConfiguration_model_parameters_live() { + // [START model_parameters_live] + // ... + + // Set parameter values in a `LiveGenerationConfig` (example values shown here) + LiveGenerationConfig.Builder configBuilder = new LiveGenerationConfig.Builder(); + configBuilder.setMaxOutputTokens(200); + configBuilder.setResponseModality(ResponseModality.AUDIO); + + configBuilder.setSpeechConfig(new SpeechConfig(new Voice("FENRIR"))); + configBuilder.setTemperature(0.9f); + configBuilder.topK = 16; + configBuilder.topP = 0.1f; + + LiveGenerationConfig config = configBuilder.build(); + + // Initialize the Vertex AI Gemini API backend service + // Specify the config as part of creating the `LiveModel` instance + LiveModelFutures model = + LiveModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.vertexAI()) + .liveModel("gemini-2.5-flash", config)); + + // ... + // [END model_parameters_live] + } + + @OptIn(markerClass = PublicPreviewAPI.class) + public void modelConfiguration_safety_settings_imagen() { + // [START safety_settings_imagen] + // Specify the safety settings as part of creating the `ImagenModel` instance + ImagenModelFutures model = + ImagenModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.vertexAI()) + .imagenModel(/* modelName */ "imagen-4.0-generate-001", /* imageGenerationConfig */ null)); + + // ... + // [END safety_settings_imagen] + } + + public void modelConfiguration_safety_settings_multiple() { + // [START safety_settings_multiple] + SafetySetting harassmentSafety = + new SafetySetting(HarmCategory.HARASSMENT, HarmBlockThreshold.ONLY_HIGH, null); + + SafetySetting hateSpeechSafety = + new SafetySetting(HarmCategory.HATE_SPEECH, HarmBlockThreshold.MEDIUM_AND_ABOVE, null); + + // Specify the safety settings as part of creating the `GenerativeModel` instance + GenerativeModelFutures model = + GenerativeModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.vertexAI()) + .generativeModel( + /* modelName */ "gemini-2.5-flash", + /* generationConfig is optional */ null, + List.of(harassmentSafety, hateSpeechSafety))); + + // ... + // [END safety_settings_multiple] + } + + public void modelConfiguration_safety_settings_single() { + // [START safety_settings_single] + SafetySetting harassmentSafety = + new SafetySetting(HarmCategory.HARASSMENT, HarmBlockThreshold.ONLY_HIGH, null); + + // Specify the safety settings as part of creating the `GenerativeModel` instance + GenerativeModelFutures model = + GenerativeModelFutures.from( + FirebaseAI.getInstance(GenerativeBackend.vertexAI()) + .generativeModel( + /* modelName */ "gemini-2.5-flash", + /* generationConfig is optional */ null, + Collections.singletonList(harassmentSafety))); + + // ... + // [END safety_settings_single] + } + + public void systemInstructions_general() { + // [START system_instructions_general] + // Specify the system instructions as part of creating the `GenerativeModel` instance + GenerativeModel ai = FirebaseAI.getInstance(GenerativeBackend.vertexAI()) + .generativeModel( + /* modelName */ "gemini-2.5-flash", + /* generationConfig (optional) */ null, + /* safetySettings (optional) */ null, + /* tools (optional) */ null, + /* toolsConfig (optional) */ null, + /* systemInstruction (optional) */ new Content.Builder().addText("You are a cat. Your name is Neko.").build() + ); + + GenerativeModelFutures model = GenerativeModelFutures.from(ai); + // [END system_instructions_general] + } + + @OptIn(markerClass = PublicPreviewAPI.class) + public void systemInstructions_live() { + // [START system_instructions_live] + // Specify the system instructions as part of creating the `LiveModel` instance + LiveGenerativeModel ai = FirebaseAI.getInstance(GenerativeBackend.vertexAI()) + .liveModel( + /* modelName */ "gemini-2.5-flash", + /* generationConfig (optional) */ null, + /* tools (optional) */ null, + /* systemInstruction (optional) */ new Content.Builder().addText("You are a cat. Your name is Neko.").build() + ); + + LiveModelFutures model = LiveModelFutures.from(ai); + // [END system_instructions_live] + } +} \ No newline at end of file diff --git a/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/kotlin/CommonSnippets.kt b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/kotlin/CommonSnippets.kt new file mode 100644 index 00000000..baa41c9d --- /dev/null +++ b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/kotlin/CommonSnippets.kt @@ -0,0 +1,150 @@ +package com.google.firebase.example.ailogic.kotlin + +import androidx.lifecycle.ViewModel +import androidx.lifecycle.viewModelScope +import com.google.firebase.ai.GenerativeModel +import com.google.firebase.ai.type.FunctionResponsePart +import com.google.firebase.ai.type.Schema +import com.google.firebase.ai.type.content +import com.google.firebase.ai.type.FunctionDeclaration +import kotlinx.coroutines.launch +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.jsonObject +import kotlinx.serialization.json.jsonPrimitive + +class CommonSnippets( + private val generativeModel: GenerativeModel +) : ViewModel() { + + fun chat() { + viewModelScope.launch { + chatNonStreaming() + chatStreaming() + } + } + + suspend fun chatNonStreaming() { + // [START chat_non_streaming] + // Initialize the chat + val chat = + generativeModel.startChat( + history = + listOf( + content(role = "user") { text("Hello, I have 2 dogs in my house.") }, + content(role = "model") { + text("Great to meet you. What would you like to know?") + } + ) + ) + + val response = chat.sendMessage("How many paws are in my house?") + print(response.text) + // [END chat_non_streaming] + } + + suspend fun chatStreaming() { + // [START chat_streaming] + // Initialize the chat + val chat = + generativeModel.startChat( + history = + listOf( + content(role = "user") { text("Hello, I have 2 dogs in my house.") }, + content(role = "model") { + text("Great to meet you. What would you like to know?") + } + ) + ) + + chat.sendMessageStream("How many paws are in my house?").collect { chunk -> print(chunk.text) } + // [END chat_streaming] + } + + suspend fun functionCalling() { + // [START function_calling_create_function_declaration] + val fetchWeatherTool = + FunctionDeclaration( + "fetchWeather", + "Get the weather conditions for a specific city on a specific date.", + mapOf( + "location" to + Schema.obj( + mapOf( + "city" to Schema.string("The city of the location."), + "state" to Schema.string("The US state of the location."), + ), + description = + "The name of the city and its state for which " + + "to get the weather. Only cities in the " + + "USA are supported." + ), + "date" to + Schema.string( + "The date for which to get the weather." + + " Date must be in the format: YYYY-MM-DD." + ), + ), + ) + // [END function_calling_create_function_declaration] + + // [START function_calling_generate_function_call] + val prompt = "What was the weather in Boston on October 17, 2024?" + val chat = generativeModel.startChat() + // Send the user's question (the prompt) to the model using multi-turn chat. + val result = chat.sendMessage(prompt) + + val functionCalls = result.functionCalls + // When the model responds with one or more function calls, invoke the function(s). + val fetchWeatherCall = functionCalls.find { it.name == "fetchWeather" } + + // Forward the structured input data prepared by the model + // to the hypothetical external API. + val functionResponse = + fetchWeatherCall?.let { + // Alternatively, if your `Location` class is marked as @Serializable, you can use + // val location = Json.decodeFromJsonElement(it.args["location"]!!) + val location = + Location( + it.args["location"]!!.jsonObject["city"]!!.jsonPrimitive.content, + it.args["location"]!!.jsonObject["state"]!!.jsonPrimitive.content + ) + val date = it.args["date"]!!.jsonPrimitive.content + fetchWeather(location, date) + } + // [END function_calling_generate_function_call] + + // [START function_calling_pass_back_function_response] + // Send the response(s) from the function back to the model + // so that the model can use it to generate its final response. + val finalResponse = + chat.sendMessage( + content("function") { part(FunctionResponsePart("fetchWeather", functionResponse!!)) } + ) + + // Log the text response. + println(finalResponse.text ?: "No text in response") + // [END function_calling_pass_back_function_response] + } + + // [START function_calling_write_function] + // This function calls a hypothetical external API that returns + // a collection of weather information for a given location on a given date. + // `location` is an object of the form { city: string, state: string } + data class Location(val city: String, val state: String) + + suspend fun fetchWeather(location: Location, date: String): JsonObject { + + // TODO(developer): Write a standard function that would call to an external weather API. + + // For demo purposes, this hypothetical response is hardcoded here in the expected format. + return JsonObject( + mapOf( + "temperature" to JsonPrimitive(38), + "chancePrecipitation" to JsonPrimitive("56%"), + "cloudConditions" to JsonPrimitive("partlyCloudy") + ) + ) + } + // [END function_calling_write_function] +} diff --git a/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/kotlin/GeneralViewModel.kt b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/kotlin/GeneralViewModel.kt deleted file mode 100644 index fb26b802..00000000 --- a/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/kotlin/GeneralViewModel.kt +++ /dev/null @@ -1,4 +0,0 @@ -package com.google.firebase.example.ailogic.kotlin - -class GeneralViewModel { -} \ No newline at end of file diff --git a/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/kotlin/GoogleAISnippets.kt b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/kotlin/GoogleAISnippets.kt new file mode 100644 index 00000000..bbff4aa6 --- /dev/null +++ b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/kotlin/GoogleAISnippets.kt @@ -0,0 +1,185 @@ +package com.google.firebase.example.ailogic.kotlin + +import androidx.lifecycle.ViewModel +import com.google.firebase.Firebase +import com.google.firebase.ai.ai +import com.google.firebase.ai.type.FunctionDeclaration +import com.google.firebase.ai.type.GenerativeBackend +import com.google.firebase.ai.type.HarmBlockThreshold +import com.google.firebase.ai.type.HarmCategory +import com.google.firebase.ai.type.ImagenAspectRatio +import com.google.firebase.ai.type.ImagenImageFormat +import com.google.firebase.ai.type.ImagenPersonFilterLevel +import com.google.firebase.ai.type.ImagenSafetyFilterLevel +import com.google.firebase.ai.type.ImagenSafetySettings +import com.google.firebase.ai.type.PublicPreviewAPI +import com.google.firebase.ai.type.ResponseModality +import com.google.firebase.ai.type.SafetySetting +import com.google.firebase.ai.type.SpeechConfig +import com.google.firebase.ai.type.Tool +import com.google.firebase.ai.type.Voice +import com.google.firebase.ai.type.content +import com.google.firebase.ai.type.generationConfig +import com.google.firebase.ai.type.imagenGenerationConfig +import com.google.firebase.ai.type.liveGenerationConfig + +class GoogleAISnippets : ViewModel() { + fun functionCalling(fetchWeatherTool: FunctionDeclaration) { + // [START function_calling_specify_declaration_during_init] + // Initialize the Gemini Developer API backend service + // Create a `GenerativeModel` instance with a model that supports your use case + val model = + Firebase.ai(backend = GenerativeBackend.googleAI()) + .generativeModel( + modelName = "gemini-2.5-flash", + // Provide the function declaration to the model. + tools = listOf(Tool.functionDeclarations(listOf(fetchWeatherTool))), + ) + // [END function_calling_specify_declaration_during_init] + } + + fun modelConfiguration_model_parameters_general() { + // [START model_parameters_general] + // ... + + // Set parameter values in a `GenerationConfig` (example values shown here) + val config = generationConfig { + maxOutputTokens = 200 + stopSequences = listOf("red") + temperature = 0.9f + topK = 16 + topP = 0.1f + } + + // Initialize the Gemini Developer API backend service + // Specify the config as part of creating the `GenerativeModel` instance + val model = + Firebase.ai(backend = GenerativeBackend.googleAI()) + .generativeModel(modelName = "gemini-2.5-flash", generationConfig = config) + + // ... + // [END model_parameters_general] + } + + @OptIn(PublicPreviewAPI::class) + fun modelConfiguration_model_parameters_imagen() { + // [START model_parameters_imagen] + // ... + + // Set parameter values in a `ImagenGenerationConfig` (example values shown here) + val config = imagenGenerationConfig { + negativePrompt = "frogs" + numberOfImages = 2 + aspectRatio = ImagenAspectRatio.LANDSCAPE_16x9 + imageFormat = ImagenImageFormat.jpeg(compressionQuality = 100) + addWatermark = false + } + + // Initialize the Gemini Developer API backend service + // Specify the config as part of creating the `GenerativeModel` instance + val model = + Firebase.ai(backend = GenerativeBackend.vertexAI()) + .imagenModel(modelName = "imagen-4.0-generate-001", generationConfig = config) + + // ... + // [END model_parameters_imagen] + } + + @OptIn(PublicPreviewAPI::class) + fun modelConfiguration_model_parameters_live() { + // [START model_parameters_live] + // ... + + // Set parameter values in a `LiveGenerationConfig` (example values shown here) + val config = liveGenerationConfig { + maxOutputTokens = 200 + responseModality = ResponseModality.AUDIO + speechConfig = SpeechConfig(Voice("FENRIR")) + temperature = 0.9f + topK = 16 + topP = 0.1f + } + + // Initialize the Gemini Developer API backend service + // Specify the config as part of creating the `LiveModel` instance + val model = + Firebase.ai(backend = GenerativeBackend.googleAI()) + .liveModel(modelName = "gemini-2.5-flash", generationConfig = config) + + // ... + // [END model_parameters_live] + } + + @OptIn(PublicPreviewAPI::class) + fun modelConfiguration_safety_settings_imagen() { + // [START safety_settings_imagen] + // Specify the safety settings as part of creating the `ImagenModel` instance + val model = + Firebase.ai(backend = GenerativeBackend.googleAI()) + .imagenModel( + modelName = "imagen-4.0-generate-001", + safetySettings = + ImagenSafetySettings( + safetyFilterLevel = ImagenSafetyFilterLevel.BLOCK_LOW_AND_ABOVE, + personFilterLevel = ImagenPersonFilterLevel.BLOCK_ALL, + ), + ) + + // ... + // [END safety_settings_imagen] + } + + fun modelConfiguration_safety_settings_multiple() { + // [START safety_settings_multiple] + val harassmentSafety = SafetySetting(HarmCategory.HARASSMENT, HarmBlockThreshold.ONLY_HIGH) + val hateSpeechSafety = + SafetySetting(HarmCategory.HATE_SPEECH, HarmBlockThreshold.MEDIUM_AND_ABOVE) + + // Specify the safety settings as part of creating the `GenerativeModel` instance + val model = + Firebase.ai(backend = GenerativeBackend.googleAI()) + .generativeModel( + modelName = "gemini-2.5-flash", + safetySettings = listOf(harassmentSafety, hateSpeechSafety), + ) + + // ... + // [END safety_settings_multiple] + } + + fun modelConfiguration_safety_settings_single() { + // [START safety_settings_single] + // Specify the safety settings as part of creating the `GenerativeModel` instance + val model = + Firebase.ai(backend = GenerativeBackend.googleAI()) + .generativeModel( + modelName = "gemini-2.5-flash", + safetySettings = + listOf(SafetySetting(HarmCategory.HARASSMENT, HarmBlockThreshold.ONLY_HIGH)), + ) + + // ... + // [END safety_settings_single] + } + + fun systemInstructions_general() { + // [START system_instructions_general] + // Specify the system instructions as part of creating the `GenerativeModel` instance + val model = Firebase.ai(backend = GenerativeBackend.googleAI()).generativeModel( + modelName = "gemini-2.5-flash", + systemInstruction = content { text("You are a cat. Your name is Neko.") } + ) + // [END system_instructions_general] + } + + @OptIn(PublicPreviewAPI::class) + fun systemInstructions_live() { + // [START system_instructions_live] + // Specify the system instructions as part of creating the `LiveModel` instance + val model = Firebase.ai(backend = GenerativeBackend.googleAI()).liveModel( + modelName = "gemini-2.5-flash", + systemInstruction = content { text("You are a cat. Your name is Neko.") } + ) + // [END system_instructions_live] + } +} \ No newline at end of file diff --git a/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/kotlin/VertexAISnippets.kt b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/kotlin/VertexAISnippets.kt new file mode 100644 index 00000000..069bed49 --- /dev/null +++ b/firebase-ai/app/src/main/java/com/google/firebase/example/ailogic/kotlin/VertexAISnippets.kt @@ -0,0 +1,186 @@ +package com.google.firebase.example.ailogic.kotlin + +import androidx.lifecycle.ViewModel +import com.google.firebase.Firebase +import com.google.firebase.ai.ai +import com.google.firebase.ai.type.FunctionDeclaration +import com.google.firebase.ai.type.GenerativeBackend +import com.google.firebase.ai.type.HarmBlockThreshold +import com.google.firebase.ai.type.HarmCategory +import com.google.firebase.ai.type.ImagenAspectRatio +import com.google.firebase.ai.type.ImagenImageFormat +import com.google.firebase.ai.type.ImagenPersonFilterLevel +import com.google.firebase.ai.type.ImagenSafetyFilterLevel +import com.google.firebase.ai.type.ImagenSafetySettings +import com.google.firebase.ai.type.PublicPreviewAPI +import com.google.firebase.ai.type.ResponseModality +import com.google.firebase.ai.type.SafetySetting +import com.google.firebase.ai.type.SpeechConfig +import com.google.firebase.ai.type.Tool +import com.google.firebase.ai.type.Voice +import com.google.firebase.ai.type.content +import com.google.firebase.ai.type.generationConfig +import com.google.firebase.ai.type.imagenGenerationConfig +import com.google.firebase.ai.type.liveGenerationConfig + +class VertexAISnippets : ViewModel() { + fun functionCalling(fetchWeatherTool: FunctionDeclaration) { + // [START function_calling_specify_declaration_during_init] + // Initialize the Vertex AI Gemini API backend service + // Optionally specify the location to access the model (`global` is recommended) + // Create a `GenerativeModel` instance with a model that supports your use case + val model = + Firebase.ai(backend = GenerativeBackend.vertexAI(location = "global")) + .generativeModel( + modelName = "gemini-2.5-flash", + // Provide the function declaration to the model. + tools = listOf(Tool.functionDeclarations(listOf(fetchWeatherTool))), + ) + // [END function_calling_specify_declaration_during_init] + } + + fun modelConfiguration_model_parameters_general() { + // [START model_parameters_general] + // ... + + // Set parameter values in a `GenerationConfig` (example values shown here) + val config = generationConfig { + maxOutputTokens = 200 + stopSequences = listOf("red") + temperature = 0.9f + topK = 16 + topP = 0.1f + } + + // Initialize the Vertex AI Gemini API backend service + // Specify the config as part of creating the `GenerativeModel` instance + val model = + Firebase.ai(backend = GenerativeBackend.vertexAI()) + .generativeModel(modelName = "gemini-2.5-flash", generationConfig = config) + + // ... + // [END model_parameters_general] + } + + @OptIn(PublicPreviewAPI::class) + fun modelConfiguration_model_parameters_imagen() { + // [START model_parameters_imagen] + // ... + + // Set parameter values in a `ImagenGenerationConfig` (example values shown here) + val config = imagenGenerationConfig { + negativePrompt = "frogs" + numberOfImages = 2 + aspectRatio = ImagenAspectRatio.LANDSCAPE_16x9 + imageFormat = ImagenImageFormat.jpeg(compressionQuality = 100) + addWatermark = false + } + + // Initialize the Vertex AI Gemini API backend service + // Specify the config as part of creating the `GenerativeModel` instance + val model = + Firebase.ai(backend = GenerativeBackend.vertexAI()) + .imagenModel(modelName = "imagen-4.0-generate-001", generationConfig = config) + + // ... + // [END model_parameters_imagen] + } + + @OptIn(PublicPreviewAPI::class) + fun modelConfiguration_model_parameters_live() { + // [START model_parameters_live] + // ... + + // Set parameter values in a `LiveGenerationConfig` (example values shown here) + val config = liveGenerationConfig { + maxOutputTokens = 200 + responseModality = ResponseModality.AUDIO + speechConfig = SpeechConfig(Voice("FENRIR")) + temperature = 0.9f + topK = 16 + topP = 0.1f + } + + // Initialize the Vertex AI Gemini API backend service + // Specify the config as part of creating the `LiveModel` instance + val model = + Firebase.ai(backend = GenerativeBackend.vertexAI()) + .liveModel(modelName = "gemini-2.5-flash", generationConfig = config) + + // ... + // [END model_parameters_live] + } + + @OptIn(PublicPreviewAPI::class) + fun modelConfiguration_safety_settings_imagen() { + // [START safety_settings_imagen] + // Specify the safety settings as part of creating the `ImagenModel` instance + val model = + Firebase.ai(backend = GenerativeBackend.vertexAI()) + .imagenModel( + modelName = "imagen-4.0-generate-001", + safetySettings = + ImagenSafetySettings( + safetyFilterLevel = ImagenSafetyFilterLevel.BLOCK_LOW_AND_ABOVE, + personFilterLevel = ImagenPersonFilterLevel.BLOCK_ALL, + ), + ) + + // ... + // [END safety_settings_imagen] + } + + fun modelConfiguration_safety_settings_multiple() { + // [START safety_settings_multiple] + val harassmentSafety = SafetySetting(HarmCategory.HARASSMENT, HarmBlockThreshold.ONLY_HIGH) + val hateSpeechSafety = + SafetySetting(HarmCategory.HATE_SPEECH, HarmBlockThreshold.MEDIUM_AND_ABOVE) + + // Specify the safety settings as part of creating the `GenerativeModel` instance + val model = + Firebase.ai(backend = GenerativeBackend.vertexAI()) + .generativeModel( + modelName = "gemini-2.5-flash", + safetySettings = listOf(harassmentSafety, hateSpeechSafety), + ) + + // ... + // [END safety_settings_multiple] + } + + fun modelConfiguration_safety_settings_single() { + // [START safety_settings_single] + // Specify the safety settings as part of creating the `GenerativeModel` instance + val model = + Firebase.ai(backend = GenerativeBackend.vertexAI()) + .generativeModel( + modelName = "gemini-2.5-flash", + safetySettings = + listOf(SafetySetting(HarmCategory.HARASSMENT, HarmBlockThreshold.ONLY_HIGH)), + ) + + // ... + // [END safety_settings_single] + } + + fun systemInstructions_general() { + // [START system_instructions_general] + // Specify the system instructions as part of creating the `GenerativeModel` instance + val model = Firebase.ai(backend = GenerativeBackend.vertexAI()).generativeModel( + modelName = "gemini-2.5-flash", + systemInstruction = content { text("You are a cat. Your name is Neko.") } + ) + // [END system_instructions_general] + } + + @OptIn(PublicPreviewAPI::class) + fun systemInstructions_live() { + // [START system_instructions_live] + // Specify the system instructions as part of creating the `LiveModel` instance + val model = Firebase.ai(backend = GenerativeBackend.vertexAI()).liveModel( + modelName = "gemini-2.5-flash", + systemInstruction = content { text("You are a cat. Your name is Neko.") } + ) + // [END system_instructions_live] + } +}