|
| 1 | +# # Copyright 2024 Google LLC |
| 2 | +# # |
| 3 | +# # Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# # you may not use this file except in compliance with the License. |
| 5 | +# # You may obtain a copy of the License at |
| 6 | +# # |
| 7 | +# # https://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# # |
| 9 | +# # Unless required by applicable law or agreed to in writing, software |
| 10 | +# # distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# # See the License for the specific language governing permissions and |
| 13 | +# # limitations under the License. |
| 14 | +# import os |
| 15 | +# |
| 16 | +# from vertexai.generative_models import GenerationResponse |
| 17 | +# |
| 18 | +# PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT") |
| 19 | +# |
| 20 | +# |
| 21 | +# def advanced_example() -> GenerationResponse: |
| 22 | +# # TODO: <ADD-START-REGION-TAG-HERE> |
| 23 | +# import vertexai |
| 24 | +# from vertexai.generative_models import GenerativeModel, Part |
| 25 | +# |
| 26 | +# # TODO(developer): Update and un-comment below line |
| 27 | +# # PROJECT_ID = "your-project-id" |
| 28 | +# vertexai.init(project=PROJECT_ID, location="us-central1") |
| 29 | +# |
| 30 | +# model = GenerativeModel("gemini-1.5-flash-002") |
| 31 | +# |
| 32 | +# contents = [ |
| 33 | +# Part.from_uri( |
| 34 | +# "gs://cloud-samples-data/generative-ai/video/pixel8.mp4", |
| 35 | +# mime_type="video/mp4", |
| 36 | +# ), |
| 37 | +# "Provide a description of the video.", |
| 38 | +# ] |
| 39 | +# |
| 40 | +# # tokens count for user prompt |
| 41 | +# response = model.count_tokens(contents) |
| 42 | +# print(f"Prompt Token Count: {response.total_tokens}") |
| 43 | +# print(f"Prompt Character Count: {response.total_billable_characters}") |
| 44 | +# # Example response: |
| 45 | +# # Prompt Token Count: 16822 |
| 46 | +# # Prompt Character Count: 30 |
| 47 | +# |
| 48 | +# # Send text to Gemini |
| 49 | +# response = model.generate_content(contents) |
| 50 | +# usage_metadata = response.usage_metadata |
| 51 | +# |
| 52 | +# # tokens count for model response |
| 53 | +# print(f"Prompt Token Count: {usage_metadata.prompt_token_count}") |
| 54 | +# print(f"Candidates Token Count: {usage_metadata.candidates_token_count}") |
| 55 | +# print(f"Total Token Count: {usage_metadata.total_token_count}") |
| 56 | +# # Example response: |
| 57 | +# # Prompt Token Count: 16822 |
| 58 | +# # Candidates Token Count: 71 |
| 59 | +# # Total Token Count: 16893 |
| 60 | +# |
| 61 | +# # TODO: <ADD-END-REGION-TAG-HERE> |
| 62 | +# return response |
| 63 | +# |
| 64 | +# |
| 65 | +# if __name__ == "__main__": |
| 66 | +# advanced_example() |
0 commit comments