Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 33 additions & 0 deletions api_names_out.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7702,6 +7702,19 @@
"/aiplatform:v1/GoogleCloudAiplatformV1DynamicRetrievalConfig": google_cloud_aiplatform_v1_dynamic_retrieval_config
"/aiplatform:v1/GoogleCloudAiplatformV1DynamicRetrievalConfig/dynamicThreshold": dynamic_threshold
"/aiplatform:v1/GoogleCloudAiplatformV1DynamicRetrievalConfig/mode": mode
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentRequest": google_cloud_aiplatform_v1_embed_content_request
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentRequest/autoTruncate": auto_truncate
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentRequest/content": content
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentRequest/outputDimensionality": output_dimensionality
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentRequest/taskType": task_type
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentRequest/title": title
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponse": google_cloud_aiplatform_v1_embed_content_response
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponse/embedding": embedding
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponse/truncated": truncated
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponse/usageMetadata": usage_metadata
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponseEmbedding": google_cloud_aiplatform_v1_embed_content_response_embedding
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponseEmbedding/values": values
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponseEmbedding/values/value": value
"/aiplatform:v1/GoogleCloudAiplatformV1EncryptionSpec": google_cloud_aiplatform_v1_encryption_spec
"/aiplatform:v1/GoogleCloudAiplatformV1EncryptionSpec/kmsKeyName": kms_key_name
"/aiplatform:v1/GoogleCloudAiplatformV1Endpoint": google_cloud_aiplatform_v1_endpoint
Expand Down Expand Up @@ -10055,6 +10068,8 @@
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequest": google_cloud_aiplatform_v1_predict_request
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequest/instances": instances
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequest/instances/instance": instance
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequest/labels": labels
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequest/labels/label": label
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequest/parameters": parameters
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequestResponseLoggingConfig": google_cloud_aiplatform_v1_predict_request_response_logging_config
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequestResponseLoggingConfig/bigqueryDestination": bigquery_destination
Expand Down Expand Up @@ -12444,6 +12459,22 @@
"/aiplatform:v1/GoogleCloudAiplatformV1UrlMetadata": google_cloud_aiplatform_v1_url_metadata
"/aiplatform:v1/GoogleCloudAiplatformV1UrlMetadata/retrievedUrl": retrieved_url
"/aiplatform:v1/GoogleCloudAiplatformV1UrlMetadata/urlRetrievalStatus": url_retrieval_status
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata": google_cloud_aiplatform_v1_usage_metadata
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/cacheTokensDetails": cache_tokens_details
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/cacheTokensDetails/cache_tokens_detail": cache_tokens_detail
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/cachedContentTokenCount": cached_content_token_count
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/candidatesTokenCount": candidates_token_count
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/candidatesTokensDetails": candidates_tokens_details
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/candidatesTokensDetails/candidates_tokens_detail": candidates_tokens_detail
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/promptTokenCount": prompt_token_count
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/promptTokensDetails": prompt_tokens_details
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/promptTokensDetails/prompt_tokens_detail": prompt_tokens_detail
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/thoughtsTokenCount": thoughts_token_count
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/toolUsePromptTokenCount": tool_use_prompt_token_count
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/toolUsePromptTokensDetails": tool_use_prompt_tokens_details
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/toolUsePromptTokensDetails/tool_use_prompt_tokens_detail": tool_use_prompt_tokens_detail
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/totalTokenCount": total_token_count
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/trafficType": traffic_type
"/aiplatform:v1/GoogleCloudAiplatformV1UserActionReference": google_cloud_aiplatform_v1_user_action_reference
"/aiplatform:v1/GoogleCloudAiplatformV1UserActionReference/dataLabelingJob": data_labeling_job
"/aiplatform:v1/GoogleCloudAiplatformV1UserActionReference/method": method_prop
Expand Down Expand Up @@ -14007,6 +14038,8 @@
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.computeTokens/endpoint": endpoint
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.countTokens": count_project_location_publisher_model_tokens
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.countTokens/endpoint": endpoint
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.embedContent": embed_project_location_publisher_model_content
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.embedContent/model": model
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.fetchPredictOperation": fetch_project_location_publisher_model_predict_operation
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.fetchPredictOperation/endpoint": endpoint
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.generateContent": generate_project_location_publisher_model_content
Expand Down
4 changes: 4 additions & 0 deletions generated/google-apis-aiplatform_v1/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# Release history for google-apis-aiplatform_v1

### v0.70.0 (2025-10-26)

* Regenerated from discovery document revision 20251010

### v0.69.0 (2025-10-19)

* Regenerated from discovery document revision 20251003
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7448,6 +7448,107 @@ def update!(**args)
end
end

# Request message for PredictionService.EmbedContent.
class GoogleCloudAiplatformV1EmbedContentRequest
include Google::Apis::Core::Hashable

# Optional. Whether to silently truncate the input content if it's longer than
# the maximum sequence length.
# Corresponds to the JSON property `autoTruncate`
# @return [Boolean]
attr_accessor :auto_truncate
alias_method :auto_truncate?, :auto_truncate

# The base structured datatype containing multi-part content of a message. A `
# Content` includes a `role` field designating the producer of the `Content` and
# a `parts` field containing multi-part data that contains the content of the
# message turn.
# Corresponds to the JSON property `content`
# @return [Google::Apis::AiplatformV1::GoogleCloudAiplatformV1Content]
attr_accessor :content

# Optional. Optional reduced dimension for the output embedding. If set,
# excessive values in the output embedding are truncated from the end.
# Corresponds to the JSON property `outputDimensionality`
# @return [Fixnum]
attr_accessor :output_dimensionality

# Optional. The task type of the embedding.
# Corresponds to the JSON property `taskType`
# @return [String]
attr_accessor :task_type

# Optional. An optional title for the text.
# Corresponds to the JSON property `title`
# @return [String]
attr_accessor :title

def initialize(**args)
update!(**args)
end

# Update properties of this object
def update!(**args)
@auto_truncate = args[:auto_truncate] if args.key?(:auto_truncate)
@content = args[:content] if args.key?(:content)
@output_dimensionality = args[:output_dimensionality] if args.key?(:output_dimensionality)
@task_type = args[:task_type] if args.key?(:task_type)
@title = args[:title] if args.key?(:title)
end
end

# Response message for PredictionService.EmbedContent.
class GoogleCloudAiplatformV1EmbedContentResponse
include Google::Apis::Core::Hashable

# A list of floats representing an embedding.
# Corresponds to the JSON property `embedding`
# @return [Google::Apis::AiplatformV1::GoogleCloudAiplatformV1EmbedContentResponseEmbedding]
attr_accessor :embedding

# Whether the input content was truncated before generating the embedding.
# Corresponds to the JSON property `truncated`
# @return [Boolean]
attr_accessor :truncated
alias_method :truncated?, :truncated

# Usage metadata about the content generation request and response. This message
# provides a detailed breakdown of token usage and other relevant metrics.
# Corresponds to the JSON property `usageMetadata`
# @return [Google::Apis::AiplatformV1::GoogleCloudAiplatformV1UsageMetadata]
attr_accessor :usage_metadata

def initialize(**args)
update!(**args)
end

# Update properties of this object
def update!(**args)
@embedding = args[:embedding] if args.key?(:embedding)
@truncated = args[:truncated] if args.key?(:truncated)
@usage_metadata = args[:usage_metadata] if args.key?(:usage_metadata)
end
end

# A list of floats representing an embedding.
class GoogleCloudAiplatformV1EmbedContentResponseEmbedding
include Google::Apis::Core::Hashable

# Embedding vector values.
# Corresponds to the JSON property `values`
# @return [Array<Float>]
attr_accessor :values

def initialize(**args)
update!(**args)
end

# Update properties of this object
def update!(**args)
@values = args[:values] if args.key?(:values)
end
end

# Represents a customer-managed encryption key spec that can be applied to a top-
# level resource.
class GoogleCloudAiplatformV1EncryptionSpec
Expand Down Expand Up @@ -24753,6 +24854,12 @@ class GoogleCloudAiplatformV1PredictRequest
# @return [Array<Object>]
attr_accessor :instances

# Optional. The user labels for Imagen billing usage only. Only Imagen supports
# labels. For other use cases, it will be ignored.
# Corresponds to the JSON property `labels`
# @return [Hash<String,String>]
attr_accessor :labels

# The parameters that govern the prediction. The schema of the parameters may be
# specified via Endpoint's DeployedModels' Model's PredictSchemata's
# parameters_schema_uri.
Expand All @@ -24767,6 +24874,7 @@ def initialize(**args)
# Update properties of this object
def update!(**args)
@instances = args[:instances] if args.key?(:instances)
@labels = args[:labels] if args.key?(:labels)
@parameters = args[:parameters] if args.key?(:parameters)
end
end
Expand Down Expand Up @@ -42827,6 +42935,97 @@ def update!(**args)
end
end

# Usage metadata about the content generation request and response. This message
# provides a detailed breakdown of token usage and other relevant metrics.
class GoogleCloudAiplatformV1UsageMetadata
include Google::Apis::Core::Hashable

# Output only. A detailed breakdown of the token count for each modality in the
# cached content.
# Corresponds to the JSON property `cacheTokensDetails`
# @return [Array<Google::Apis::AiplatformV1::GoogleCloudAiplatformV1ModalityTokenCount>]
attr_accessor :cache_tokens_details

# Output only. The number of tokens in the cached content that was used for this
# request.
# Corresponds to the JSON property `cachedContentTokenCount`
# @return [Fixnum]
attr_accessor :cached_content_token_count

# The total number of tokens in the generated candidates.
# Corresponds to the JSON property `candidatesTokenCount`
# @return [Fixnum]
attr_accessor :candidates_token_count

# Output only. A detailed breakdown of the token count for each modality in the
# generated candidates.
# Corresponds to the JSON property `candidatesTokensDetails`
# @return [Array<Google::Apis::AiplatformV1::GoogleCloudAiplatformV1ModalityTokenCount>]
attr_accessor :candidates_tokens_details

# The total number of tokens in the prompt. This includes any text, images, or
# other media provided in the request. When `cached_content` is set, this also
# includes the number of tokens in the cached content.
# Corresponds to the JSON property `promptTokenCount`
# @return [Fixnum]
attr_accessor :prompt_token_count

# Output only. A detailed breakdown of the token count for each modality in the
# prompt.
# Corresponds to the JSON property `promptTokensDetails`
# @return [Array<Google::Apis::AiplatformV1::GoogleCloudAiplatformV1ModalityTokenCount>]
attr_accessor :prompt_tokens_details

# Output only. The number of tokens that were part of the model's generated "
# thoughts" output, if applicable.
# Corresponds to the JSON property `thoughtsTokenCount`
# @return [Fixnum]
attr_accessor :thoughts_token_count

# Output only. The number of tokens in the results from tool executions, which
# are provided back to the model as input, if applicable.
# Corresponds to the JSON property `toolUsePromptTokenCount`
# @return [Fixnum]
attr_accessor :tool_use_prompt_token_count

# Output only. A detailed breakdown by modality of the token counts from the
# results of tool executions, which are provided back to the model as input.
# Corresponds to the JSON property `toolUsePromptTokensDetails`
# @return [Array<Google::Apis::AiplatformV1::GoogleCloudAiplatformV1ModalityTokenCount>]
attr_accessor :tool_use_prompt_tokens_details

# The total number of tokens for the entire request. This is the sum of `
# prompt_token_count`, `candidates_token_count`, `tool_use_prompt_token_count`,
# and `thoughts_token_count`.
# Corresponds to the JSON property `totalTokenCount`
# @return [Fixnum]
attr_accessor :total_token_count

# Output only. The traffic type for this request.
# Corresponds to the JSON property `trafficType`
# @return [String]
attr_accessor :traffic_type

def initialize(**args)
update!(**args)
end

# Update properties of this object
def update!(**args)
@cache_tokens_details = args[:cache_tokens_details] if args.key?(:cache_tokens_details)
@cached_content_token_count = args[:cached_content_token_count] if args.key?(:cached_content_token_count)
@candidates_token_count = args[:candidates_token_count] if args.key?(:candidates_token_count)
@candidates_tokens_details = args[:candidates_tokens_details] if args.key?(:candidates_tokens_details)
@prompt_token_count = args[:prompt_token_count] if args.key?(:prompt_token_count)
@prompt_tokens_details = args[:prompt_tokens_details] if args.key?(:prompt_tokens_details)
@thoughts_token_count = args[:thoughts_token_count] if args.key?(:thoughts_token_count)
@tool_use_prompt_token_count = args[:tool_use_prompt_token_count] if args.key?(:tool_use_prompt_token_count)
@tool_use_prompt_tokens_details = args[:tool_use_prompt_tokens_details] if args.key?(:tool_use_prompt_tokens_details)
@total_token_count = args[:total_token_count] if args.key?(:total_token_count)
@traffic_type = args[:traffic_type] if args.key?(:traffic_type)
end
end

# References an API call. It contains more information about long running
# operation and Jobs that are triggered by the API call.
class GoogleCloudAiplatformV1UserActionReference
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@ module Google
module Apis
module AiplatformV1
# Version of the google-apis-aiplatform_v1 gem
GEM_VERSION = "0.69.0"
GEM_VERSION = "0.70.0"

# Version of the code generator used to generate this client
GENERATOR_VERSION = "0.18.0"

# Revision of the discovery document this client was generated from
REVISION = "20251003"
REVISION = "20251010"
end
end
end
Loading