diff --git a/README.md b/README.md index 3da8563..c59cd55 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ def test_text_gen_text_only_prompt(self): client = genai.Client() response = client.models.generate_content( - model="gemini-2.0-flash", contents="Write a story about a magic backpack." + model="gemini-2.5-flash", contents="Write a story about a magic backpack." ) print(response.text) # [END text_gen_text_only_prompt] diff --git a/go/cache.go b/go/cache.go index f6a2d0b..eb2607e 100644 --- a/go/cache.go +++ b/go/cache.go @@ -22,7 +22,7 @@ func CacheCreate() (*genai.GenerateContentResponse, error) { log.Fatal(err) } - modelName := "gemini-1.5-flash-001" + modelName := "gemini-2.5-flash" document, err := client.Files.UploadFromPath( ctx, filepath.Join(getMedia(), "a11.txt"), @@ -82,7 +82,7 @@ func CacheCreateFromName() (*genai.GenerateContentResponse, error) { log.Fatal(err) } - modelName := "gemini-1.5-flash-001" + modelName := "gemini-2.5-flash" document, err := client.Files.UploadFromPath( ctx, filepath.Join(getMedia(), "a11.txt"), @@ -146,7 +146,7 @@ func CacheCreateFromChat() (*genai.GenerateContentResponse, error) { log.Fatal(err) } - modelName := "gemini-1.5-flash-001" + modelName := "gemini-2.5-flash" systemInstruction := "You are an expert analyzing transcripts." // Create initial chat with a system instruction. @@ -243,7 +243,7 @@ func CacheDelete() error { log.Fatal(err) } - modelName := "gemini-1.5-flash-001" + modelName := "gemini-2.5-flash" document, err := client.Files.UploadFromPath( ctx, filepath.Join(getMedia(), "a11.txt"), @@ -291,7 +291,7 @@ func CacheGet() error { log.Fatal(err) } - modelName := "gemini-1.5-flash-001" + modelName := "gemini-2.5-flash" document, err := client.Files.UploadFromPath( ctx, filepath.Join(getMedia(), "a11.txt"), @@ -343,7 +343,7 @@ func CacheList() error { } // For demonstration, create a cache first. - modelName := "gemini-1.5-flash-001" + modelName := "gemini-2.5-flash" document, err := client.Files.UploadFromPath( ctx, filepath.Join(getMedia(), "a11.txt"), @@ -410,7 +410,7 @@ func CacheUpdate() error { log.Fatal(err) } - modelName := "gemini-1.5-flash-001" + modelName := "gemini-2.5-flash" document, err := client.Files.UploadFromPath( ctx, filepath.Join(getMedia(), "a11.txt"), diff --git a/go/chat.go b/go/chat.go index 8ff8fc9..bfa6376 100644 --- a/go/chat.go +++ b/go/chat.go @@ -28,7 +28,7 @@ func Chat() error { genai.NewContentFromText("Great to meet you. What would you like to know?", genai.RoleModel), } - chat, err := client.Chats.Create(ctx, "gemini-2.0-flash", nil, history) + chat, err := client.Chats.Create(ctx, "gemini-2.5-flash", nil, history) if err != nil { log.Fatal(err) } @@ -64,7 +64,7 @@ func ChatStreaming() error { genai.NewContentFromText("Hello", genai.RoleUser), genai.NewContentFromText("Great to meet you. What would you like to know?", genai.RoleModel), } - chat, err := client.Chats.Create(ctx, "gemini-2.0-flash", nil, history) + chat, err := client.Chats.Create(ctx, "gemini-2.5-flash", nil, history) if err != nil { log.Fatal(err) } @@ -102,7 +102,7 @@ func ChatStreamingWithImages() error { log.Fatal(err) } - chat, err := client.Chats.Create(ctx, "gemini-2.0-flash", nil, nil) + chat, err := client.Chats.Create(ctx, "gemini-2.5-flash", nil, nil) if err != nil { log.Fatal(err) } diff --git a/go/code_execution.go b/go/code_execution.go index 56455f8..b753fe2 100644 --- a/go/code_execution.go +++ b/go/code_execution.go @@ -22,7 +22,7 @@ func CodeExecutionBasic() (*genai.GenerateContentResponse, error) { response, err := client.Models.GenerateContent( ctx, - "gemini-2.0-pro-exp-02-05", + "gemini-2.5-pro", genai.Text( `Write and execute code that calculates the sum of the first 50 prime numbers. Ensure that only the executable code and its resulting output are generated.`, @@ -86,7 +86,7 @@ func CodeExecutionRequestOverride() (*genai.GenerateContentResponse, error) { response, err := client.Models.GenerateContent( ctx, - "gemini-2.0-pro-exp-02-05", + "gemini-2.5-pro", genai.Text( `What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50.`, diff --git a/go/configure_model_parameters.go b/go/configure_model_parameters.go index e2fe0c9..bdfdf6f 100644 --- a/go/configure_model_parameters.go +++ b/go/configure_model_parameters.go @@ -26,7 +26,7 @@ func ConfigureModelParameters() (*genai.GenerateContentResponse, error) { response, err := client.Models.GenerateContent( ctx, - "gemini-2.0-flash", + "gemini-2.5-flash", genai.Text("Tell me a story about a magic backpack."), &genai.GenerateContentConfig{ CandidateCount: candidateCount, diff --git a/go/controlled_generation.go b/go/controlled_generation.go index 2016cb6..1cc92d3 100644 --- a/go/controlled_generation.go +++ b/go/controlled_generation.go @@ -42,7 +42,7 @@ func JsonControlledGeneration() (*genai.GenerateContentResponse, error) { response, err := client.Models.GenerateContent( ctx, - "gemini-2.0-flash", + "gemini-2.5-flash", genai.Text("List a few popular cookie recipes."), config, ) @@ -68,7 +68,7 @@ func JsonNoSchema() (*genai.GenerateContentResponse, error) { "Use this JSON schema:\n\n" + "Recipe = {'recipe_name': str, 'ingredients': list[str]}\n" + "Return: list[Recipe]" - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", genai.Text(prompt), nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", genai.Text(prompt), nil) if err != nil { log.Fatal(err) } @@ -134,7 +134,7 @@ func JsonEnum() (*genai.GenerateContentResponse, error) { genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, config, ) @@ -173,7 +173,7 @@ func EnumInJson() (*genai.GenerateContentResponse, error) { ResponseMIMEType: "application/json", ResponseSchema: schema, } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", genai.Text("List about 10 cookie recipes, grade them based on popularity"), config, ) @@ -223,7 +223,7 @@ func JsonEnumRaw() (*genai.GenerateContentResponse, error) { genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, config, ) @@ -292,7 +292,7 @@ func XEnum() (*genai.GenerateContentResponse, error) { contents := []*genai.Content{ genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, config, ) @@ -341,7 +341,7 @@ func XEnumRaw() (*genai.GenerateContentResponse, error) { contents := []*genai.Content{ genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, config, ) diff --git a/go/count_tokens.go b/go/count_tokens.go index dd0a11e..448a5fd 100644 --- a/go/count_tokens.go +++ b/go/count_tokens.go @@ -23,7 +23,7 @@ func TokensContextWindow() error { log.Fatal(err) } - modelInfo, err := client.Models.Get(ctx, "gemini-2.0-flash", &genai.GetModelConfig{}) + modelInfo, err := client.Models.Get(ctx, "gemini-2.5-flash", &genai.GetModelConfig{}) if err != nil { log.Fatal(err) } @@ -49,13 +49,13 @@ func TokensTextOnly() error { contents := []*genai.Content{ genai.NewContentFromText(prompt, genai.RoleUser), } - countResp, err := client.Models.CountTokens(ctx, "gemini-2.0-flash", contents, nil) + countResp, err := client.Models.CountTokens(ctx, "gemini-2.5-flash", contents, nil) if err != nil { return err } fmt.Println("total_tokens:", countResp.TotalTokens) - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -84,12 +84,12 @@ func TokensChat() error { {Role: genai.RoleUser, Parts: []*genai.Part{{Text: "Hi my name is Bob"}}}, {Role: genai.RoleModel, Parts: []*genai.Part{{Text: "Hi Bob!"}}}, } - chat, err := client.Chats.Create(ctx, "gemini-2.0-flash", nil, history) + chat, err := client.Chats.Create(ctx, "gemini-2.5-flash", nil, history) if err != nil { log.Fatal(err) } - firstTokenResp, err := client.Models.CountTokens(ctx, "gemini-2.0-flash", chat.History(false), nil) + firstTokenResp, err := client.Models.CountTokens(ctx, "gemini-2.5-flash", chat.History(false), nil) if err != nil { log.Fatal(err) } @@ -108,7 +108,7 @@ func TokensChat() error { hist := chat.History(false) hist = append(hist, extra) - secondTokenResp, err := client.Models.CountTokens(ctx, "gemini-2.0-flash", hist, nil) + secondTokenResp, err := client.Models.CountTokens(ctx, "gemini-2.5-flash", hist, nil) if err != nil { log.Fatal(err) } @@ -147,13 +147,13 @@ func TokensMultimodalImageFileApi() error { genai.NewContentFromParts(parts, genai.RoleUser), } - tokenResp, err := client.Models.CountTokens(ctx, "gemini-2.0-flash", contents, nil) + tokenResp, err := client.Models.CountTokens(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } fmt.Println("Multimodal image token count:", tokenResp.TotalTokens) - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -208,12 +208,12 @@ func TokensMultimodalVideoAudioFileApi() error { genai.NewContentFromParts(parts, genai.RoleUser), } - tokenResp, err := client.Models.CountTokens(ctx, "gemini-2.0-flash", contents, nil) + tokenResp, err := client.Models.CountTokens(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } fmt.Println("Multimodal video/audio token count:", tokenResp.TotalTokens) - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -255,12 +255,12 @@ func TokensMultimodalPdfFileApi() error { genai.NewContentFromParts(parts, genai.RoleUser), } - tokenResp, err := client.Models.CountTokens(ctx, "gemini-2.0-flash", contents, nil) + tokenResp, err := client.Models.CountTokens(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } fmt.Printf("Multimodal PDF token count: %d\n", tokenResp.TotalTokens) - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -303,7 +303,7 @@ func TokensCachedContent() error { } // Create cached content using a simple slice with text and a file. - cache, err := client.Caches.Create(ctx, "gemini-1.5-flash-001", &genai.CreateCachedContentConfig{ + cache, err := client.Caches.Create(ctx, "gemini-2.5-flash", &genai.CreateCachedContentConfig{ Contents: contents, }) if err != nil { @@ -311,14 +311,14 @@ func TokensCachedContent() error { } prompt := "Please give a short summary of this file." - countResp, err := client.Models.CountTokens(ctx, "gemini-2.0-flash", []*genai.Content{ + countResp, err := client.Models.CountTokens(ctx, "gemini-2.5-flash", []*genai.Content{ genai.NewContentFromText(prompt, genai.RoleUser), }, nil) if err != nil { log.Fatal(err) } fmt.Printf("%d", countResp.TotalTokens) - response, err := client.Models.GenerateContent(ctx, "gemini-1.5-flash-001", []*genai.Content{ + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", []*genai.Content{ genai.NewContentFromText(prompt, genai.RoleUser), }, &genai.GenerateContentConfig{ CachedContent: cache.Name, diff --git a/go/files.go b/go/files.go index 7b15709..d2d9ab0 100644 --- a/go/files.go +++ b/go/files.go @@ -44,7 +44,7 @@ func FilesCreateText() (*genai.GenerateContentResponse, error) { genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -86,7 +86,7 @@ func FilesCreateImage() (*genai.GenerateContentResponse, error) { genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -127,7 +127,7 @@ func FilesCreateAudio() (*genai.GenerateContentResponse, error) { genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -180,7 +180,7 @@ func FilesCreateVideo() (*genai.GenerateContentResponse, error) { genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -220,7 +220,7 @@ func FilesCreatePdf() (*genai.GenerateContentResponse, error) { genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -261,7 +261,7 @@ func FilesCreateFromIO() (*genai.GenerateContentResponse, error) { genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -359,7 +359,7 @@ func FilesDelete() error { genai.NewContentFromParts(parts, genai.RoleUser), } - _, err = client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + _, err = client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) // Expect an error when using a deleted file. if err != nil { return nil diff --git a/go/function_calling.go b/go/function_calling.go index 6137c6f..9830f7b 100644 --- a/go/function_calling.go +++ b/go/function_calling.go @@ -68,7 +68,7 @@ func FunctionCalling() error { if err != nil { log.Fatal(err) } - modelName := "gemini-2.0-flash" + modelName := "gemini-2.5-flash" // Create the function declarations for arithmetic operations. addDeclaration := createArithmeticToolDeclaration("addNumbers", "Return the result of adding two numbers.") diff --git a/go/models.go b/go/models.go index 819b6f1..f555044 100644 --- a/go/models.go +++ b/go/models.go @@ -61,7 +61,7 @@ func ModelsGet() error { log.Fatal(err) } - modelInfo, err := client.Models.Get(ctx, "gemini-2.0-flash", nil) + modelInfo, err := client.Models.Get(ctx, "gemini-2.5-flash", nil) if err != nil { log.Fatal(err) } diff --git a/go/safety_settings.go b/go/safety_settings.go index c802424..849c5a1 100644 --- a/go/safety_settings.go +++ b/go/safety_settings.go @@ -35,7 +35,7 @@ func SafetySettings() error { contents := []*genai.Content{ genai.NewContentFromText(unsafePrompt, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, config) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, config) if err != nil { log.Fatal(err) } @@ -84,7 +84,7 @@ func SafetySettingsMulti() error { contents := []*genai.Content{ genai.NewContentFromText(unsafePrompt, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, config) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, config) if err != nil { log.Fatal(err) } diff --git a/go/system_instruction.go b/go/system_instruction.go index ae88756..0278029 100644 --- a/go/system_instruction.go +++ b/go/system_instruction.go @@ -29,7 +29,7 @@ func SystemInstruction() error { SystemInstruction: genai.NewContentFromText("You are a cat. Your name is Neko.", genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, config) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, config) if err != nil { log.Fatal(err) } diff --git a/go/text_generation.go b/go/text_generation.go index cdd5607..00e5192 100644 --- a/go/text_generation.go +++ b/go/text_generation.go @@ -24,7 +24,7 @@ func TextGenTextOnlyPrompt() (*genai.GenerateContentResponse, error) { contents := []*genai.Content{ genai.NewContentFromText("Write a story about a magic backpack.", genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -48,7 +48,7 @@ func TextGenTextOnlyPromptStreaming() error { } for response, err := range client.Models.GenerateContentStream( ctx, - "gemini-2.0-flash", + "gemini-2.5-flash", contents, nil, ) { @@ -90,7 +90,7 @@ func TextGenMultimodalOneImagePrompt() (*genai.GenerateContentResponse, error) { genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -128,7 +128,7 @@ func TextGenMultimodalOneImagePromptStreaming() error { } for response, err := range client.Models.GenerateContentStream( ctx, - "gemini-2.0-flash", + "gemini-2.5-flash", contents, nil, ) { @@ -184,7 +184,7 @@ func TextGenMultimodalMultiImagePrompt() (*genai.GenerateContentResponse, error) genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -238,7 +238,7 @@ func TextGenMultimodalMultiImagePromptStreaming() error { for result, err := range client.Models.GenerateContentStream( ctx, - "gemini-2.0-flash", + "gemini-2.5-flash", contents, nil, ) { @@ -282,7 +282,7 @@ func TextGenMultimodalAudio() (*genai.GenerateContentResponse, error) { genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -324,7 +324,7 @@ func TextGenMultimodalAudioStreaming() error { for result, err := range client.Models.GenerateContentStream( ctx, - "gemini-2.0-flash", + "gemini-2.5-flash", contents, nil, ) { @@ -380,7 +380,7 @@ func TextGenMultimodalVideoPrompt() (*genai.GenerateContentResponse, error) { genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -434,7 +434,7 @@ func TextGenMultimodalVideoPromptStreaming() error { for result, err := range client.Models.GenerateContentStream( ctx, - "gemini-2.0-flash", + "gemini-2.5-flash", contents, nil, ) { @@ -478,7 +478,7 @@ func TextGenMultimodalPdf() (*genai.GenerateContentResponse, error) { genai.NewContentFromParts(parts, genai.RoleUser), } - response, err := client.Models.GenerateContent(ctx, "gemini-2.0-flash", contents, nil) + response, err := client.Models.GenerateContent(ctx, "gemini-2.5-flash", contents, nil) if err != nil { log.Fatal(err) } @@ -520,7 +520,7 @@ func TextGenMultimodalPdfStreaming() error { for result, err := range client.Models.GenerateContentStream( ctx, - "gemini-2.0-flash", + "gemini-2.5-flash", contents, nil, ) { diff --git a/go/thinking_generation.go b/go/thinking_generation.go index 5639a99..b4b476a 100644 --- a/go/thinking_generation.go +++ b/go/thinking_generation.go @@ -11,7 +11,7 @@ import ( ) // Define the thinking model centrally -const modelID = "gemini-2.5-pro-exp-03-25" +const modelID = "gemini-2.5-pro" // Helper function to initialize the client func newGenAIClient(ctx context.Context) (*genai.Client, error) { diff --git a/java/src/main/java/com/example/gemini/ChatSession.java b/java/src/main/java/com/example/gemini/ChatSession.java index 986363f..51f14ac 100644 --- a/java/src/main/java/com/example/gemini/ChatSession.java +++ b/java/src/main/java/com/example/gemini/ChatSession.java @@ -42,7 +42,7 @@ public static List chat() { ).build(); Chat chat = client.chats.create( - "gemini-2.0-flash", + "gemini-2.5-flash", GenerateContentConfig.builder() .systemInstruction(userContent) .systemInstruction(modelContent) diff --git a/java/src/main/java/com/example/gemini/CodeExecution.java b/java/src/main/java/com/example/gemini/CodeExecution.java index 7d8e953..d11f14a 100644 --- a/java/src/main/java/com/example/gemini/CodeExecution.java +++ b/java/src/main/java/com/example/gemini/CodeExecution.java @@ -38,7 +38,7 @@ public static GenerateContentResponse codeExecutionBasic() { GenerateContentResponse response = client.models.generateContent( - "gemini-2.0-pro-exp-02-05", + "gemini-2.5-pro", prompt, null); @@ -114,7 +114,7 @@ public static GenerateContentResponse codeExecutionRequestOverride() { GenerateContentResponse response = client.models.generateContent( - "gemini-2.0-flash", + "gemini-2.5-flash", prompt, config); diff --git a/java/src/main/java/com/example/gemini/ConfigureModelParameters.java b/java/src/main/java/com/example/gemini/ConfigureModelParameters.java index 0826aca..dbcd38e 100644 --- a/java/src/main/java/com/example/gemini/ConfigureModelParameters.java +++ b/java/src/main/java/com/example/gemini/ConfigureModelParameters.java @@ -38,7 +38,7 @@ public class ConfigureModelParameters { GenerateContentResponse response = client.models.generateContent( - "gemini-2.0-flash", + "gemini-2.5-flash", "Tell me a story about a magic backpack.", config); diff --git a/java/src/main/java/com/example/gemini/ControlledGeneration.java b/java/src/main/java/com/example/gemini/ControlledGeneration.java index 58ec338..374700c 100644 --- a/java/src/main/java/com/example/gemini/ControlledGeneration.java +++ b/java/src/main/java/com/example/gemini/ControlledGeneration.java @@ -65,7 +65,7 @@ public class ControlledGeneration { GenerateContentResponse response = client.models.generateContent( - "gemini-2.0-flash", + "gemini-2.5-flash", "List a few popular cookie recipes.", config); @@ -92,7 +92,7 @@ public class ControlledGeneration { GenerateContentResponse response = client.models.generateContent( - "gemini-2.0-flash", + "gemini-2.5-flash", prompt, null); @@ -128,7 +128,7 @@ public class ControlledGeneration { GenerateContentResponse response = client.models.generateContent( - "gemini-2.0-flash", + "gemini-2.5-flash", content, config); @@ -166,7 +166,7 @@ public class ControlledGeneration { GenerateContentResponse response = client.models.generateContent( - "gemini-2.0-flash", + "gemini-2.5-flash", "List about 10 cookie recipes, grade them based on popularity", config); @@ -202,7 +202,7 @@ public class ControlledGeneration { GenerateContentResponse response = client.models.generateContent( - "gemini-2.0-flash", + "gemini-2.5-flash", content, config); diff --git a/java/src/main/java/com/example/gemini/FunctionCalling.java b/java/src/main/java/com/example/gemini/FunctionCalling.java index b2045a8..7c9f804 100644 --- a/java/src/main/java/com/example/gemini/FunctionCalling.java +++ b/java/src/main/java/com/example/gemini/FunctionCalling.java @@ -110,7 +110,7 @@ public static Double functionCalling() { GenerateContentResponse response = client.models.generateContent( - "gemini-2.0-flash", + "gemini-2.5-flash", "I have 57 cats, each owns 44 mittens, how many mittens is that in total?", config); diff --git a/java/src/main/java/com/example/gemini/SafetySettings.java b/java/src/main/java/com/example/gemini/SafetySettings.java index 3846094..0159277 100644 --- a/java/src/main/java/com/example/gemini/SafetySettings.java +++ b/java/src/main/java/com/example/gemini/SafetySettings.java @@ -45,7 +45,7 @@ public static GenerateContentResponse safetySettings() { GenerateContentResponse response = client.models.generateContent( - "gemini-2.0-flash", + "gemini-2.5-flash", unsafePrompt, config); @@ -79,7 +79,7 @@ public static GenerateContentResponse safetySettingsMulti() throws Exception { GenerateContentResponse response = client.models.generateContent( - "gemini-2.0-flash", + "gemini-2.5-flash", unsafePrompt, config); diff --git a/java/src/main/java/com/example/gemini/SystemInstruction.java b/java/src/main/java/com/example/gemini/SystemInstruction.java index c52b269..cb081c4 100644 --- a/java/src/main/java/com/example/gemini/SystemInstruction.java +++ b/java/src/main/java/com/example/gemini/SystemInstruction.java @@ -39,7 +39,7 @@ public class SystemInstruction { GenerateContentResponse response = client.models.generateContent( - "gemini-2.0-flash", + "gemini-2.5-flash", "Good morning! How are you?", config); diff --git a/java/src/main/java/com/example/gemini/TextGeneration.java b/java/src/main/java/com/example/gemini/TextGeneration.java index 1a1e252..fe81378 100644 --- a/java/src/main/java/com/example/gemini/TextGeneration.java +++ b/java/src/main/java/com/example/gemini/TextGeneration.java @@ -35,7 +35,7 @@ public class TextGeneration { GenerateContentResponse response = client.models.generateContent( - "gemini-2.0-flash", + "gemini-2.5-flash", "Write a story about a magic backpack.", null); @@ -50,7 +50,7 @@ public static String textGenTextOnlyPromptStreaming() { ResponseStream responseStream = client.models.generateContentStream( - "gemini-2.0-flash", + "gemini-2.5-flash", "Write a story about a magic backpack.", null); @@ -77,7 +77,7 @@ public static String textGenTextOnlyPromptStreaming() { Part.fromText("Tell me about this instrument."), Part.fromBytes(imageData, "image/jpeg")); - GenerateContentResponse response = client.models.generateContent("gemini-2.0-flash", content, null); + GenerateContentResponse response = client.models.generateContent("gemini-2.5-flash", content, null); System.out.println(response.text()); // [END text_gen_multimodal_one_image_prompt] @@ -99,7 +99,7 @@ public static String textGenMultimodalOneImagePromptStreaming() throws Exception ResponseStream responseStream = client.models.generateContentStream( - "gemini-2.0-flash", + "gemini-2.5-flash", content, null); @@ -131,7 +131,7 @@ public static String textGenMultimodalOneImagePromptStreaming() throws Exception Part.fromBytes(cajunImageData, "image/jpeg")); - GenerateContentResponse response = client.models.generateContent("gemini-2.0-flash", content, null); + GenerateContentResponse response = client.models.generateContent("gemini-2.5-flash", content, null); System.out.println(response.text()); // [END text_gen_multimodal_multi_image_prompt] @@ -155,7 +155,7 @@ public static String textGenMultimodalMultiImagePromptStreaming() throws Excepti Part.fromBytes(cajunImageData, "image/jpeg")); ResponseStream responseStream = - client.models.generateContentStream("gemini-2.0-flash", content, null); + client.models.generateContentStream("gemini-2.5-flash", content, null); StringBuilder response = new StringBuilder(); for (GenerateContentResponse res : responseStream) { @@ -179,7 +179,7 @@ public static String textGenMultimodalMultiImagePromptStreaming() throws Excepti Content.fromParts(Part.fromText("Give me a summary of this audio file."), Part.fromBytes(audioData, "audio/mpeg")); - GenerateContentResponse response = client.models.generateContent("gemini-2.0-flash", content, null); + GenerateContentResponse response = client.models.generateContent("gemini-2.5-flash", content, null); System.out.println(response.text()); // [END text_gen_multimodal_audio] @@ -198,7 +198,7 @@ public static String textGenMultimodalAudioStreaming() throws Exception { Part.fromBytes(audioData, "audio/mpeg")); ResponseStream responseStream = - client.models.generateContentStream("gemini-2.0-flash", content, null); + client.models.generateContentStream("gemini-2.5-flash", content, null); StringBuilder response = new StringBuilder(); for (GenerateContentResponse res : responseStream) { @@ -222,7 +222,7 @@ public static String textGenMultimodalAudioStreaming() throws Exception { Content.fromParts(Part.fromText("Describe this video clip."), Part.fromBytes(videoData, "video/mp4")); - GenerateContentResponse response = client.models.generateContent("gemini-2.0-flash", content, null); + GenerateContentResponse response = client.models.generateContent("gemini-2.5-flash", content, null); System.out.println(response.text()); // [END text_gen_multimodal_video_prompt] @@ -241,7 +241,7 @@ public static String textGenMultimodalVideoPromptStreaming() throws Exception { Part.fromBytes(videoData, "video/mp4")); ResponseStream responseStream = - client.models.generateContentStream("gemini-2.0-flash", content, null); + client.models.generateContentStream("gemini-2.5-flash", content, null); StringBuilder response = new StringBuilder(); for (GenerateContentResponse res : responseStream) { @@ -265,7 +265,7 @@ public static String textGenMultimodalVideoPromptStreaming() throws Exception { Content.fromParts(Part.fromText("Give me a summary of this document."), Part.fromBytes(pdfData, "application/pdf")); - GenerateContentResponse response = client.models.generateContent("gemini-2.0-flash", content, null); + GenerateContentResponse response = client.models.generateContent("gemini-2.5-flash", content, null); System.out.println(response.text()); // [END text_gen_multimodal_pdf] @@ -284,7 +284,7 @@ public static String textGenMultimodalPdfStreaming() throws Exception { Part.fromBytes(pdfData, "application/pdf")); ResponseStream responseStream = - client.models.generateContentStream("gemini-2.0-flash", content, null); + client.models.generateContentStream("gemini-2.5-flash", content, null); StringBuilder response = new StringBuilder(); for (GenerateContentResponse res : responseStream) { diff --git a/javascript/cache.js b/javascript/cache.js index 6fd67e4..7cbecb4 100644 --- a/javascript/cache.js +++ b/javascript/cache.js @@ -39,7 +39,7 @@ export async function cacheCreate() { config: { mimeType: "text/plain" }, }); console.log("Uploaded file name:", document.name); - const modelName = "gemini-1.5-flash-001"; + const modelName = "gemini-2.5-flash"; const contents = [ createUserContent(createPartFromUri(document.uri, document.mimeType)), @@ -77,7 +77,7 @@ export async function cacheCreateFromName() { config: { mimeType: "text/plain" }, }); console.log("Uploaded file name:", document.name); - const modelName = "gemini-1.5-flash-001"; + const modelName = "gemini-2.5-flash"; const contents = [ createUserContent(createPartFromUri(document.uri, document.mimeType)), @@ -111,7 +111,7 @@ export async function cacheCreateFromChat() { // Make sure to include the following import: // import {GoogleGenAI} from '@google/genai'; const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); - const modelName = "gemini-1.5-flash-001"; + const modelName = "gemini-2.5-flash"; const systemInstruction = "You are an expert analyzing transcripts."; // Create a chat session with the system instruction. @@ -176,7 +176,7 @@ export async function cacheDelete() { config: { mimeType: "text/plain" }, }); console.log("Uploaded file name:", document.name); - const modelName = "gemini-1.5-flash-001"; + const modelName = "gemini-2.5-flash"; const contents = [ createUserContent(createPartFromUri(document.uri, document.mimeType)), @@ -205,7 +205,7 @@ export async function cacheGet() { config: { mimeType: "text/plain" }, }); console.log("Uploaded file name:", document.name); - const modelName = "gemini-1.5-flash-001"; + const modelName = "gemini-2.5-flash"; const contents = [ createUserContent(createPartFromUri(document.uri, document.mimeType)), @@ -237,7 +237,7 @@ export async function cacheList() { config: { mimeType: "text/plain" }, }); console.log("Uploaded file name:", document.name); - const modelName = "gemini-1.5-flash-001"; + const modelName = "gemini-2.5-flash"; const contents = [ createUserContent(createPartFromUri(document.uri, document.mimeType)), @@ -278,7 +278,7 @@ export async function cacheUpdate() { config: { mimeType: "text/plain" }, }); console.log("Uploaded file name:", document.name); - const modelName = "gemini-1.5-flash-001"; + const modelName = "gemini-2.5-flash"; const contents = [ createUserContent(createPartFromUri(document.uri, document.mimeType)), diff --git a/javascript/chat.js b/javascript/chat.js index 3823517..70a016d 100644 --- a/javascript/chat.js +++ b/javascript/chat.js @@ -34,7 +34,7 @@ export async function chat() { // import {GoogleGenAI} from '@google/genai'; const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); const chat = ai.chats.create({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", history: [ { role: "user", @@ -67,7 +67,7 @@ export async function chatStreaming() { // import {GoogleGenAI} from '@google/genai'; const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); const chat = ai.chats.create({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", history: [ { role: "user", @@ -108,7 +108,7 @@ export async function chatStreamingWithImages() { // Make sure to include the following import: // import {GoogleGenAI} from '@google/genai'; const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); - const chat = ai.chats.create({ model: "gemini-2.0-flash" }); + const chat = ai.chats.create({ model: "gemini-2.5-flash" }); console.log("Streaming response for initial text message:"); const stream1 = await chat.sendMessageStream({ diff --git a/javascript/code_execution.js b/javascript/code_execution.js index ecb2685..5aaf538 100644 --- a/javascript/code_execution.js +++ b/javascript/code_execution.js @@ -24,7 +24,7 @@ export async function codeExecutionBasic() { const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); const response = await ai.models.generateContent({ - model: "gemini-2.0-pro-exp-02-05", + model: "gemini-2.5-pro", contents: `Write and execute code that calculates the sum of the first 50 prime numbers. Ensure that only the executable code and its resulting output are generated.`, }); @@ -109,7 +109,7 @@ export async function codeExecutionRequestOverride() { const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: "What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50.", config: { @@ -164,7 +164,7 @@ export async function codeExecutionChat() { const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); const chat = ai.chats.create({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", }); const response = await chat.sendMessage({ diff --git a/javascript/configure_model_parameters.js b/javascript/configure_model_parameters.js index f199352..41b50b6 100644 --- a/javascript/configure_model_parameters.js +++ b/javascript/configure_model_parameters.js @@ -24,7 +24,7 @@ export async function configureModelParameters() { const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: "Tell me a story about a magic backpack.", config: { candidateCount: 1, diff --git a/javascript/controlled_generation.js b/javascript/controlled_generation.js index 11a9387..8a1c453 100644 --- a/javascript/controlled_generation.js +++ b/javascript/controlled_generation.js @@ -34,7 +34,7 @@ export async function jsonControlledGeneration() { // import {GoogleGenAI} from '@google/genai'; const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: "List a few popular cookie recipes.", config: { responseMimeType: "application/json", @@ -67,7 +67,7 @@ export async function jsonNoSchema() { "Recipe = {'recipeName': str, 'ingredients': list[str]}\n" + "Return: list[Recipe]"; const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: prompt, }); console.log(response.text); @@ -86,7 +86,7 @@ export async function jsonEnum() { config: { mimeType: "image/jpeg" }, }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ "What kind of instrument is this?", createPartFromUri(organ.uri, organ.mimeType), @@ -110,7 +110,7 @@ export async function enumInJson() { // import {GoogleGenAI} from '@google/genai'; const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: "List about 10 cookie recipes, grade them based on popularity", config: { responseMimeType: "application/json", @@ -143,7 +143,7 @@ export async function jsonEnumRaw() { config: { mimeType: "image/jpeg" }, }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ "What kind of instrument is this?", createPartFromUri(organ.uri, organ.mimeType), @@ -172,7 +172,7 @@ export async function xEnum() { config: { mimeType: "image/jpeg" }, }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ "What kind of instrument is this?", createPartFromUri(organ.uri, organ.mimeType), @@ -201,7 +201,7 @@ export async function xEnumRaw() { config: { mimeType: "image/jpeg" }, }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ "What kind of instrument is this?", createPartFromUri(organ.uri, organ.mimeType), diff --git a/javascript/count_tokens.js b/javascript/count_tokens.js index 29b01ce..52a7179 100644 --- a/javascript/count_tokens.js +++ b/javascript/count_tokens.js @@ -40,13 +40,13 @@ export async function tokensTextOnly() { const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); const prompt = "The quick brown fox jumps over the lazy dog."; const countTokensResponse = await ai.models.countTokens({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: prompt, }); console.log(countTokensResponse.totalTokens); const generateResponse = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: prompt, }); console.log(generateResponse.usageMetadata); @@ -68,13 +68,13 @@ export async function tokensChat() { { role: "model", parts: [{ text: "Hi Bob!" }] }, ]; const chat = ai.chats.create({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", history: history, }); // Count tokens for the current chat history. const countTokensResponse = await ai.models.countTokens({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: chat.getHistory(), }); console.log(countTokensResponse.totalTokens); @@ -92,7 +92,7 @@ export async function tokensChat() { const combinedHistory = chat.getHistory(); combinedHistory.push(extraMessage); const combinedCountTokensResponse = await ai.models.countTokens({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: combinedHistory, }); console.log( @@ -125,13 +125,13 @@ export async function tokensMultimodalImageInline() { ]); const countTokensResponse = await ai.models.countTokens({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: contents, }); console.log(countTokensResponse.totalTokens); const generateResponse = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: contents, }); console.log(generateResponse.usageMetadata); @@ -155,7 +155,7 @@ export async function tokensMultimodalImageFileApi() { }); const countTokensResponse = await ai.models.countTokens({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ prompt, createPartFromUri(organ.uri, organ.mimeType), @@ -164,7 +164,7 @@ export async function tokensMultimodalImageFileApi() { console.log(countTokensResponse.totalTokens); const generateResponse = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ prompt, createPartFromUri(organ.uri, organ.mimeType), @@ -198,7 +198,7 @@ export async function tokensMultimodalVideoAudioFileApi() { } const countTokensResponse = await ai.models.countTokens({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ prompt, createPartFromUri(videoFile.uri, videoFile.mimeType), @@ -207,7 +207,7 @@ export async function tokensMultimodalVideoAudioFileApi() { console.log(countTokensResponse.totalTokens); const generateResponse = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ prompt, createPartFromUri(videoFile.uri, videoFile.mimeType), @@ -233,7 +233,7 @@ export async function tokensMultimodalPdfFileApi() { }); const prompt = "Give me a summary of this document."; const countTokensResponse = await ai.models.countTokens({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ prompt, createPartFromUri(samplePdf.uri, samplePdf.mimeType), @@ -242,7 +242,7 @@ export async function tokensMultimodalPdfFileApi() { console.log(countTokensResponse.totalTokens); const generateResponse = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ prompt, createPartFromUri(samplePdf.uri, samplePdf.mimeType), @@ -267,7 +267,7 @@ export async function tokensCachedContent() { }); const cache = await ai.caches.create({ - model: "gemini-1.5-flash-001", + model: "gemini-2.5-flash", config: { contents: createUserContent([ "Here the Apollo 11 transcript:", @@ -280,13 +280,13 @@ export async function tokensCachedContent() { const prompt = "Please give a short summary of this file."; const countTokensResponse = await ai.models.countTokens({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: prompt, }); console.log(countTokensResponse.totalTokens); const generateResponse = await ai.models.generateContent({ - model: "gemini-1.5-flash-001", + model: "gemini-2.5-flash", contents: prompt, config: { cachedContent: cache.name }, }); diff --git a/javascript/files.js b/javascript/files.js index 16a1ba1..3ff152b 100644 --- a/javascript/files.js +++ b/javascript/files.js @@ -42,7 +42,7 @@ export async function filesCreateText() { console.log("Uploaded file:", myfile); const result = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ createPartFromUri(myfile.uri, myfile.mimeType), "\n\n", @@ -66,7 +66,7 @@ export async function filesCreateImage() { console.log("Uploaded file:", myfile); const result = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ createPartFromUri(myfile.uri, myfile.mimeType), "\n\n", @@ -90,7 +90,7 @@ export async function filesCreateAudio() { console.log("Uploaded file:", myfile); const result = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ createPartFromUri(myfile.uri, myfile.mimeType), "Describe this audio clip", @@ -121,7 +121,7 @@ export async function filesCreateVideo() { } const result = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ createPartFromUri(myfile.uri, myfile.mimeType), "Describe this video clip", @@ -142,7 +142,7 @@ export async function filesCreatePdf() { config: { mimeType: "application/pdf" }, }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: createUserContent([ "Give me a summary of this pdf file.", createPartFromUri(samplePdf.uri, samplePdf.mimeType), diff --git a/javascript/function_calling.js b/javascript/function_calling.js index 350907e..47ecafd 100644 --- a/javascript/function_calling.js +++ b/javascript/function_calling.js @@ -145,7 +145,7 @@ export async function functionCalling() { // Step 1: Call generateContent with function calling enabled. const generateContentResponse = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: "I have 57 cats, each owns 44 mittens, how many mittens is that in total?", config: { @@ -194,7 +194,7 @@ export async function functionCalling() { console.log("Function result:", resultValue); // Step 4: Use the chat API to send the result as the final answer. - const chat = ai.chats.create({ model: "gemini-2.0-flash" }); + const chat = ai.chats.create({ model: "gemini-2.5-flash" }); const chatResponse = await chat.sendMessage({ message: "The final result is " + resultValue, }); diff --git a/javascript/safety_settings.js b/javascript/safety_settings.js index 0d6807a..ade29d1 100644 --- a/javascript/safety_settings.js +++ b/javascript/safety_settings.js @@ -26,7 +26,7 @@ export async function safetySettings() { "I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them including expletives."; const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: unsafePrompt, config: { safetySettings: [ @@ -53,7 +53,7 @@ export async function safetySettingsMulti() { "I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them including expletives."; const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: unsafePrompt, config: { safetySettings: [ diff --git a/javascript/system_instruction.js b/javascript/system_instruction.js index ba50de5..bc526ef 100644 --- a/javascript/system_instruction.js +++ b/javascript/system_instruction.js @@ -23,7 +23,7 @@ export async function systemInstruction() { // import {GoogleGenAI} from '@google/genai'; const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: "Good morning! How are you?", config: { systemInstruction: "You are a cat. Your name is Neko.", diff --git a/javascript/text_generation.js b/javascript/text_generation.js index a2abd38..24d1e9b 100644 --- a/javascript/text_generation.js +++ b/javascript/text_generation.js @@ -38,7 +38,7 @@ export async function textGenTextOnlyPrompt() { const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: "Write a story about a magic backpack.", }); console.log(response.text); @@ -53,7 +53,7 @@ export async function textGenTextOnlyPromptStreaming() { const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); const response = await ai.models.generateContentStream({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: "Write a story about a magic backpack.", }); let text = ""; @@ -76,7 +76,7 @@ export async function textGenMultimodalOneImagePrompt() { }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: [ createUserContent([ "Tell me about this instrument", @@ -100,7 +100,7 @@ export async function textGenMultimodalOneImagePromptStreaming() { }); const response = await ai.models.generateContentStream({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: [ createUserContent([ "Tell me about this instrument", @@ -133,7 +133,7 @@ export async function textGenMultimodalMultiImagePrompt() { }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: [ createUserContent([ "What is the difference between both of these instruments?", @@ -162,7 +162,7 @@ export async function textGenMultimodalMultiImagePromptStreaming() { }); const response = await ai.models.generateContentStream({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: [ createUserContent([ "What is the difference between both of these instruments?", @@ -191,7 +191,7 @@ export async function textGenMultimodalAudio() { }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: [ createUserContent([ "Give me a summary of this audio file.", @@ -215,7 +215,7 @@ export async function textGenMultimodalAudioStreaming() { }); const response = await ai.models.generateContentStream({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: [ createUserContent([ "Give me a summary of this audio file.", @@ -251,7 +251,7 @@ export async function textGenMultimodalVideoPrompt() { } const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: [ createUserContent([ "Describe this video clip", @@ -283,7 +283,7 @@ export async function textGenMultimodalVideoPromptStreaming() { } const response = await ai.models.generateContentStream({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: [ createUserContent([ "Describe this video clip", @@ -311,7 +311,7 @@ export async function textGenMultimodalPdf() { }); const response = await ai.models.generateContent({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: [ createUserContent([ "Give me a summary of this document:", @@ -335,7 +335,7 @@ export async function textGenMultimodalPdfStreaming() { }); const response = await ai.models.generateContentStream({ - model: "gemini-2.0-flash", + model: "gemini-2.5-flash", contents: [ createUserContent([ "Give me a summary of this document:", diff --git a/javascript/thinking_generation.js b/javascript/thinking_generation.js index da05f5d..ab73c42 100644 --- a/javascript/thinking_generation.js +++ b/javascript/thinking_generation.js @@ -23,7 +23,7 @@ if (!process.env.GEMINI_API_KEY) { } // Define the thinking model centrally -const MODEL_ID = "gemini-2.5-pro-exp-03-25"; +const MODEL_ID = "gemini-2.5-pro"; const ai = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY }); diff --git a/python/cache.py b/python/cache.py index 38098bb..6139185 100644 --- a/python/cache.py +++ b/python/cache.py @@ -27,7 +27,7 @@ def test_cache_create(self): client = genai.Client() document = client.files.upload(file=media / "a11.txt") - model_name = "gemini-1.5-flash-001" + model_name = "gemini-2.5-flash" cache = client.caches.create( model=model_name, @@ -54,7 +54,7 @@ def test_cache_create_from_name(self): client = genai.Client() document = client.files.upload(file=media / "a11.txt") - model_name = "gemini-1.5-flash-001" + model_name = "gemini-2.5-flash" cache = client.caches.create( model=model_name, @@ -82,7 +82,7 @@ def test_cache_create_from_chat(self): from google.genai import types client = genai.Client() - model_name = "gemini-1.5-flash-001" + model_name = "gemini-2.5-flash" system_instruction = "You are an expert analyzing transcripts." # Create a chat session with the given system instruction. @@ -127,7 +127,7 @@ def test_cache_delete(self): client = genai.Client() document = client.files.upload(file=media / "a11.txt") - model_name = "gemini-1.5-flash-001" + model_name = "gemini-2.5-flash" cache = client.caches.create( model=model_name, @@ -145,7 +145,7 @@ def test_cache_get(self): client = genai.Client() document = client.files.upload(file=media / "a11.txt") - model_name = "gemini-1.5-flash-001" + model_name = "gemini-2.5-flash" cache = client.caches.create( model=model_name, @@ -164,7 +164,7 @@ def test_cache_list(self): client = genai.Client() document = client.files.upload(file=media / "a11.txt") - model_name = "gemini-1.5-flash-001" + model_name = "gemini-2.5-flash" cache = client.caches.create( model=model_name, @@ -187,7 +187,7 @@ def test_cache_update(self): client = genai.Client() document = client.files.upload(file=media / "a11.txt") - model_name = "gemini-1.5-flash-001" + model_name = "gemini-2.5-flash" cache = client.caches.create( model=model_name, diff --git a/python/chat.py b/python/chat.py index 90b3a8c..7e0ae8a 100644 --- a/python/chat.py +++ b/python/chat.py @@ -28,7 +28,7 @@ def test_chat(self): client = genai.Client() # Pass initial history using the "history" argument chat = client.chats.create( - model="gemini-2.0-flash", + model="gemini-2.5-flash", history=[ types.Content(role="user", parts=[types.Part(text="Hello")]), types.Content( @@ -54,7 +54,7 @@ def test_chat_streaming(self): client = genai.Client() chat = client.chats.create( - model="gemini-2.0-flash", + model="gemini-2.5-flash", history=[ types.Content(role="user", parts=[types.Part(text="Hello")]), types.Content( @@ -84,7 +84,7 @@ def test_chat_streaming_with_images(self): from google import genai client = genai.Client() - chat = client.chats.create(model="gemini-2.0-flash") + chat = client.chats.create(model="gemini-2.5-flash") response = chat.send_message_stream( message="Hello, I'm interested in learning about musical instruments. Can I show you one?" diff --git a/python/code_execution.py b/python/code_execution.py index fdad6d6..2a4b86f 100644 --- a/python/code_execution.py +++ b/python/code_execution.py @@ -24,7 +24,7 @@ def test_code_execution_basic(self): client = genai.Client() response = client.models.generate_content( - model="gemini-2.0-pro-exp-02-05", + model="gemini-2.5-pro", contents=( "Write and execute code that calculates the sum of the first 50 prime numbers. " "Ensure that only the executable code and its resulting output are generated." @@ -80,7 +80,7 @@ def test_code_execution_request_override(self): client = genai.Client() response = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=( "What is the sum of the first 50 prime numbers? " "Generate and run code for the calculation, and make sure you get all 50." @@ -140,7 +140,7 @@ def test_code_execution_chat(self): client = genai.Client() chat = client.chats.create( - model="gemini-2.0-flash", + model="gemini-2.5-flash", config=types.GenerateContentConfig( tools=[types.Tool(code_execution=types.ToolCodeExecution())], ), diff --git a/python/configure_model_parameters.py b/python/configure_model_parameters.py index 6dc7ea1..48726ee 100644 --- a/python/configure_model_parameters.py +++ b/python/configure_model_parameters.py @@ -24,7 +24,7 @@ def test_configure_model_parameters(self): client = genai.Client() response = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents="Tell me a story about a magic backpack.", config=types.GenerateContentConfig( candidate_count=1, diff --git a/python/controlled_generation.py b/python/controlled_generation.py index 468b28b..b3e2b32 100644 --- a/python/controlled_generation.py +++ b/python/controlled_generation.py @@ -32,7 +32,7 @@ class Recipe(TypedDict): client = genai.Client() result = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents="List a few popular cookie recipes.", config=types.GenerateContentConfig( response_mime_type="application/json", response_schema=list[Recipe] @@ -53,7 +53,7 @@ def test_json_no_schema(self): "Return: list[Recipe]" ) result = client.models.generate_content( - model="gemini-2.0-flash", contents=prompt + model="gemini-2.5-flash", contents=prompt ) print(result) # [END json_no_schema] @@ -74,7 +74,7 @@ class Choice(enum.Enum): client = genai.Client() organ = client.files.upload(file=media / "organ.jpg") result = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=["What kind of instrument is this:", organ], config=types.GenerateContentConfig( response_mime_type="application/json", response_schema=Choice @@ -104,7 +104,7 @@ class Recipe(TypedDict): client = genai.Client() result = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents="List about 10 cookie recipes, grade them based on popularity", config=types.GenerateContentConfig( response_mime_type="application/json", response_schema=list[Recipe] @@ -122,7 +122,7 @@ def test_json_enum_raw(self): client = genai.Client() organ = client.files.upload(file=media / "organ.jpg") result = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=["What kind of instrument is this:", organ], config=types.GenerateContentConfig( response_mime_type="application/json", @@ -151,7 +151,7 @@ class Choice(enum.Enum): client = genai.Client() organ = client.files.upload(file=media / "organ.jpg") result = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=["What kind of instrument is this:", organ], config=types.GenerateContentConfig( response_mime_type="text/x.enum", response_schema=Choice @@ -168,7 +168,7 @@ def test_x_enum_raw(self): client = genai.Client() organ = client.files.upload(file=media / "organ.jpg") result = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=["What kind of instrument is this:", organ], config=types.GenerateContentConfig( response_mime_type="text/x.enum", diff --git a/python/count_tokens.py b/python/count_tokens.py index 484a63d..9dc8a30 100644 --- a/python/count_tokens.py +++ b/python/count_tokens.py @@ -25,7 +25,7 @@ def test_tokens_context_window(self): from google import genai client = genai.Client() - model_info = client.models.get(model="gemini-2.0-flash") + model_info = client.models.get(model="gemini-2.5-flash") print(f"{model_info.input_token_limit=}") print(f"{model_info.output_token_limit=}") # ( e.g., input_token_limit=30720, output_token_limit=2048 ) @@ -40,13 +40,13 @@ def test_tokens_text_only(self): # Count tokens using the new client method. total_tokens = client.models.count_tokens( - model="gemini-2.0-flash", contents=prompt + model="gemini-2.5-flash", contents=prompt ) print("total_tokens: ", total_tokens) # ( e.g., total_tokens: 10 ) response = client.models.generate_content( - model="gemini-2.0-flash", contents=prompt + model="gemini-2.5-flash", contents=prompt ) # The usage_metadata provides detailed token counts. @@ -62,7 +62,7 @@ def test_tokens_chat(self): client = genai.Client() chat = client.chats.create( - model="gemini-2.0-flash", + model="gemini-2.5-flash", history=[ types.Content( role="user", parts=[types.Part(text="Hi my name is Bob")] @@ -73,7 +73,7 @@ def test_tokens_chat(self): # Count tokens for the chat history. print( client.models.count_tokens( - model="gemini-2.0-flash", contents=chat.get_history() + model="gemini-2.5-flash", contents=chat.get_history() ) ) # ( e.g., total_tokens: 10 ) @@ -94,7 +94,7 @@ def test_tokens_chat(self): ) history = chat.get_history() history.append(extra) - print(client.models.count_tokens(model="gemini-2.0-flash", contents=history)) + print(client.models.count_tokens(model="gemini-2.5-flash", contents=history)) # ( e.g., total_tokens: 56 ) # [END tokens_chat] @@ -110,13 +110,13 @@ def test_tokens_multimodal_image_inline(self): # Count tokens for combined text and inline image. print( client.models.count_tokens( - model="gemini-2.0-flash", contents=[prompt, your_image_file] + model="gemini-2.5-flash", contents=[prompt, your_image_file] ) ) # ( e.g., total_tokens: 263 ) response = client.models.generate_content( - model="gemini-2.0-flash", contents=[prompt, your_image_file] + model="gemini-2.5-flash", contents=[prompt, your_image_file] ) print(response.usage_metadata) # ( e.g., prompt_token_count: 264, candidates_token_count: 80, total_token_count: 345 ) @@ -132,13 +132,13 @@ def test_tokens_multimodal_image_file_api(self): print( client.models.count_tokens( - model="gemini-2.0-flash", contents=[prompt, your_image_file] + model="gemini-2.5-flash", contents=[prompt, your_image_file] ) ) # ( e.g., total_tokens: 263 ) response = client.models.generate_content( - model="gemini-2.0-flash", contents=[prompt, your_image_file] + model="gemini-2.5-flash", contents=[prompt, your_image_file] ) print(response.usage_metadata) # ( e.g., prompt_token_count: 264, candidates_token_count: 80, total_token_count: 345 ) @@ -162,13 +162,13 @@ def test_tokens_multimodal_video_audio_file_api(self): print( client.models.count_tokens( - model="gemini-2.0-flash", contents=[prompt, your_file] + model="gemini-2.5-flash", contents=[prompt, your_file] ) ) # ( e.g., total_tokens: 300 ) response = client.models.generate_content( - model="gemini-2.0-flash", contents=[prompt, your_file] + model="gemini-2.5-flash", contents=[prompt, your_file] ) print(response.usage_metadata) # ( e.g., prompt_token_count: 301, candidates_token_count: 60, total_token_count: 361 ) @@ -181,13 +181,13 @@ def test_tokens_multimodal_pdf_file_api(self): client = genai.Client() sample_pdf = client.files.upload(file=media / "test.pdf") token_count = client.models.count_tokens( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=["Give me a summary of this document.", sample_pdf], ) print(f"{token_count=}") response = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=["Give me a summary of this document.", sample_pdf], ) print(response.usage_metadata) @@ -203,7 +203,7 @@ def test_tokens_cached_content(self): your_file = client.files.upload(file=media / "a11.txt") cache = client.caches.create( - model="gemini-1.5-flash-001", + model="gemini-2.5-flash", config={ "contents": ["Here the Apollo 11 transcript:", your_file], "system_instruction": None, @@ -215,11 +215,11 @@ def test_tokens_cached_content(self): prompt = "Please give a short summary of this file." # Count tokens for the prompt (the cached content is not passed here). - print(client.models.count_tokens(model="gemini-2.0-flash", contents=prompt)) + print(client.models.count_tokens(model="gemini-2.5-flash", contents=prompt)) # ( e.g., total_tokens: 9 ) response = client.models.generate_content( - model="gemini-1.5-flash-001", + model="gemini-2.5-flash", contents=prompt, config=types.GenerateContentConfig( cached_content=cache.name, diff --git a/python/files.py b/python/files.py index 3c44083..ab3a554 100644 --- a/python/files.py +++ b/python/files.py @@ -29,7 +29,7 @@ def test_files_create_text(self): print(f"{myfile=}") result = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=[myfile, "\n\n", "Can you add a few more lines to this poem?"], ) print(f"{result.text=}") @@ -44,7 +44,7 @@ def test_files_create_image(self): print(f"{myfile=}") result = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=[ myfile, "\n\n", @@ -63,7 +63,7 @@ def test_files_create_audio(self): print(f"{myfile=}") result = client.models.generate_content( - model="gemini-2.0-flash", contents=[myfile, "Describe this audio clip"] + model="gemini-2.5-flash", contents=[myfile, "Describe this audio clip"] ) print(f"{result.text=}") # [END files_create_audio] @@ -86,7 +86,7 @@ def test_files_create_video(self): myfile = client.files.get(name=myfile.name) result = client.models.generate_content( - model="gemini-2.0-flash", contents=[myfile, "Describe this video clip"] + model="gemini-2.5-flash", contents=[myfile, "Describe this video clip"] ) print(f"{result.text=}") # [END files_create_video] @@ -98,7 +98,7 @@ def test_files_create_pdf(self): client = genai.Client() sample_pdf = client.files.upload(file=media / "test.pdf") response = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=["Give me a summary of this pdf file.", sample_pdf], ) print(response.text) @@ -116,7 +116,7 @@ def test_files_create_from_IO(self): file=f, config=types.UploadFileConfig(mime_type="application/pdf") ) response = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=["Give me a summary of this pdf file.", sample_pdf], ) print(response.text) @@ -156,7 +156,7 @@ def test_files_delete(self): try: result = client.models.generate_content( - model="gemini-2.0-flash", contents=[myfile, "Describe this file."] + model="gemini-2.5-flash", contents=[myfile, "Describe this file."] ) print(result) except genai.errors.ClientError: diff --git a/python/function_calling.py b/python/function_calling.py index 87f3bd1..411c1e4 100644 --- a/python/function_calling.py +++ b/python/function_calling.py @@ -42,7 +42,7 @@ def divide(a: float, b: float) -> float: # Create a chat session; function calling (via tools) is enabled in the config. chat = client.chats.create( - model="gemini-2.0-flash", + model="gemini-2.5-flash", config=types.GenerateContentConfig(tools=[add, subtract, multiply, divide]), ) response = chat.send_message( diff --git a/python/models.py b/python/models.py index c6cdf4d..71019ec 100644 --- a/python/models.py +++ b/python/models.py @@ -41,7 +41,7 @@ def test_models_get(self): from google import genai client = genai.Client() - model_info = client.models.get(model="gemini-2.0-flash") + model_info = client.models.get(model="gemini-2.5-flash") print(model_info) # [END models_get] diff --git a/python/safety_settings.py b/python/safety_settings.py index ef088f5..841a270 100644 --- a/python/safety_settings.py +++ b/python/safety_settings.py @@ -29,7 +29,7 @@ def test_safety_settings(self): ) # Set safety_settings for a single category using a list of SafetySetting. response = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=unsafe_prompt, config=types.GenerateContentConfig( safety_settings=[ @@ -54,7 +54,7 @@ def test_safety_settings_multi(self): "Write a ironic phrase about them including expletives." ) response = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=unsafe_prompt, config=types.GenerateContentConfig( safety_settings=[ diff --git a/python/system_instruction.py b/python/system_instruction.py index 2047e40..545a68a 100644 --- a/python/system_instruction.py +++ b/python/system_instruction.py @@ -24,7 +24,7 @@ def test_system_instruction(self): client = genai.Client() response = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents="Good morning! How are you?", config=types.GenerateContentConfig( system_instruction="You are a cat. Your name is Neko." diff --git a/python/text_generation.py b/python/text_generation.py index f9da772..a9ced90 100644 --- a/python/text_generation.py +++ b/python/text_generation.py @@ -27,7 +27,7 @@ def test_text_gen_text_only_prompt(self): client = genai.Client() response = client.models.generate_content( - model="gemini-2.0-flash", contents="Write a story about a magic backpack." + model="gemini-2.5-flash", contents="Write a story about a magic backpack." ) print(response.text) # [END text_gen_text_only_prompt] @@ -38,7 +38,7 @@ def test_text_gen_text_only_prompt_streaming(self): client = genai.Client() response = client.models.generate_content_stream( - model="gemini-2.0-flash", contents="Write a story about a magic backpack." + model="gemini-2.5-flash", contents="Write a story about a magic backpack." ) for chunk in response: print(chunk.text) @@ -53,7 +53,7 @@ def test_text_gen_multimodal_one_image_prompt(self): client = genai.Client() organ = PIL.Image.open(media / "organ.jpg") response = client.models.generate_content( - model="gemini-2.0-flash", contents=["Tell me about this instrument", organ] + model="gemini-2.5-flash", contents=["Tell me about this instrument", organ] ) print(response.text) # [END text_gen_multimodal_one_image_prompt] @@ -66,7 +66,7 @@ def test_text_gen_multimodal_one_image_prompt_streaming(self): client = genai.Client() organ = PIL.Image.open(media / "organ.jpg") response = client.models.generate_content_stream( - model="gemini-2.0-flash", contents=["Tell me about this instrument", organ] + model="gemini-2.5-flash", contents=["Tell me about this instrument", organ] ) for chunk in response: print(chunk.text) @@ -82,7 +82,7 @@ def test_text_gen_multimodal_multi_image_prompt(self): organ = PIL.Image.open(media / "organ.jpg") cajun_instrument = PIL.Image.open(media / "Cajun_instruments.jpg") response = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=[ "What is the difference between both of these instruments?", organ, @@ -101,7 +101,7 @@ def test_text_gen_multimodal_multi_image_prompt_streaming(self): organ = PIL.Image.open(media / "organ.jpg") cajun_instrument = PIL.Image.open(media / "Cajun_instruments.jpg") response = client.models.generate_content_stream( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=[ "What is the difference between both of these instruments?", organ, @@ -120,7 +120,7 @@ def test_text_gen_multimodal_audio(self): client = genai.Client() sample_audio = client.files.upload(file=media / "sample.mp3") response = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=["Give me a summary of this audio file.", sample_audio], ) print(response.text) @@ -133,7 +133,7 @@ def test_text_gen_multimodal_audio_streaming(self): client = genai.Client() sample_audio = client.files.upload(file=media / "sample.mp3") response = client.models.generate_content_stream( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=["Give me a summary of this audio file.", sample_audio], ) for chunk in response: @@ -159,7 +159,7 @@ def test_text_gen_multimodal_video_prompt(self): myfile = client.files.get(name=myfile.name) response = client.models.generate_content( - model="gemini-2.0-flash", contents=[myfile, "Describe this video clip"] + model="gemini-2.5-flash", contents=[myfile, "Describe this video clip"] ) print(f"{response.text=}") # [END text_gen_multimodal_video_prompt] @@ -182,7 +182,7 @@ def test_text_gen_multimodal_video_prompt_streaming(self): myfile = client.files.get(name=myfile.name) response = client.models.generate_content_stream( - model="gemini-2.0-flash", contents=[myfile, "Describe this video clip"] + model="gemini-2.5-flash", contents=[myfile, "Describe this video clip"] ) for chunk in response: print(chunk.text) @@ -196,7 +196,7 @@ def test_text_gen_multimodal_pdf(self): client = genai.Client() sample_pdf = client.files.upload(file=media / "test.pdf") response = client.models.generate_content( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=["Give me a summary of this document:", sample_pdf], ) print(f"{response.text=}") @@ -209,7 +209,7 @@ def test_text_gen_multimodal_pdf_streaming(self): client = genai.Client() sample_pdf = client.files.upload(file=media / "test.pdf") response = client.models.generate_content_stream( - model="gemini-2.0-flash", + model="gemini-2.5-flash", contents=["Give me a summary of this document:", sample_pdf], ) diff --git a/python/thinking_generation.py b/python/thinking_generation.py index 920586c..4626aa7 100644 --- a/python/thinking_generation.py +++ b/python/thinking_generation.py @@ -16,7 +16,7 @@ from absl.testing import absltest # Define the thinking model centrally -MODEL_ID = "gemini-2.5-pro-exp-03-25" +MODEL_ID = "gemini-2.5-pro" class ThinkingUnitTests(absltest.TestCase):