Skip to content

Commit b1ed69d

Browse files
feat: Automated regeneration of aiplatform v1 client (#24705)
Auto-created at 2025-10-26 09:26:18 +0000 using the toys pull request generator.
1 parent 8be9740 commit b1ed69d

File tree

6 files changed

+348
-2
lines changed

6 files changed

+348
-2
lines changed

api_names_out.yaml

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7702,6 +7702,19 @@
77027702
"/aiplatform:v1/GoogleCloudAiplatformV1DynamicRetrievalConfig": google_cloud_aiplatform_v1_dynamic_retrieval_config
77037703
"/aiplatform:v1/GoogleCloudAiplatformV1DynamicRetrievalConfig/dynamicThreshold": dynamic_threshold
77047704
"/aiplatform:v1/GoogleCloudAiplatformV1DynamicRetrievalConfig/mode": mode
7705+
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentRequest": google_cloud_aiplatform_v1_embed_content_request
7706+
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentRequest/autoTruncate": auto_truncate
7707+
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentRequest/content": content
7708+
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentRequest/outputDimensionality": output_dimensionality
7709+
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentRequest/taskType": task_type
7710+
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentRequest/title": title
7711+
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponse": google_cloud_aiplatform_v1_embed_content_response
7712+
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponse/embedding": embedding
7713+
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponse/truncated": truncated
7714+
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponse/usageMetadata": usage_metadata
7715+
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponseEmbedding": google_cloud_aiplatform_v1_embed_content_response_embedding
7716+
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponseEmbedding/values": values
7717+
"/aiplatform:v1/GoogleCloudAiplatformV1EmbedContentResponseEmbedding/values/value": value
77057718
"/aiplatform:v1/GoogleCloudAiplatformV1EncryptionSpec": google_cloud_aiplatform_v1_encryption_spec
77067719
"/aiplatform:v1/GoogleCloudAiplatformV1EncryptionSpec/kmsKeyName": kms_key_name
77077720
"/aiplatform:v1/GoogleCloudAiplatformV1Endpoint": google_cloud_aiplatform_v1_endpoint
@@ -10055,6 +10068,8 @@
1005510068
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequest": google_cloud_aiplatform_v1_predict_request
1005610069
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequest/instances": instances
1005710070
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequest/instances/instance": instance
10071+
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequest/labels": labels
10072+
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequest/labels/label": label
1005810073
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequest/parameters": parameters
1005910074
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequestResponseLoggingConfig": google_cloud_aiplatform_v1_predict_request_response_logging_config
1006010075
"/aiplatform:v1/GoogleCloudAiplatformV1PredictRequestResponseLoggingConfig/bigqueryDestination": bigquery_destination
@@ -12444,6 +12459,22 @@
1244412459
"/aiplatform:v1/GoogleCloudAiplatformV1UrlMetadata": google_cloud_aiplatform_v1_url_metadata
1244512460
"/aiplatform:v1/GoogleCloudAiplatformV1UrlMetadata/retrievedUrl": retrieved_url
1244612461
"/aiplatform:v1/GoogleCloudAiplatformV1UrlMetadata/urlRetrievalStatus": url_retrieval_status
12462+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata": google_cloud_aiplatform_v1_usage_metadata
12463+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/cacheTokensDetails": cache_tokens_details
12464+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/cacheTokensDetails/cache_tokens_detail": cache_tokens_detail
12465+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/cachedContentTokenCount": cached_content_token_count
12466+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/candidatesTokenCount": candidates_token_count
12467+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/candidatesTokensDetails": candidates_tokens_details
12468+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/candidatesTokensDetails/candidates_tokens_detail": candidates_tokens_detail
12469+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/promptTokenCount": prompt_token_count
12470+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/promptTokensDetails": prompt_tokens_details
12471+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/promptTokensDetails/prompt_tokens_detail": prompt_tokens_detail
12472+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/thoughtsTokenCount": thoughts_token_count
12473+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/toolUsePromptTokenCount": tool_use_prompt_token_count
12474+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/toolUsePromptTokensDetails": tool_use_prompt_tokens_details
12475+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/toolUsePromptTokensDetails/tool_use_prompt_tokens_detail": tool_use_prompt_tokens_detail
12476+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/totalTokenCount": total_token_count
12477+
"/aiplatform:v1/GoogleCloudAiplatformV1UsageMetadata/trafficType": traffic_type
1244712478
"/aiplatform:v1/GoogleCloudAiplatformV1UserActionReference": google_cloud_aiplatform_v1_user_action_reference
1244812479
"/aiplatform:v1/GoogleCloudAiplatformV1UserActionReference/dataLabelingJob": data_labeling_job
1244912480
"/aiplatform:v1/GoogleCloudAiplatformV1UserActionReference/method": method_prop
@@ -14007,6 +14038,8 @@
1400714038
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.computeTokens/endpoint": endpoint
1400814039
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.countTokens": count_project_location_publisher_model_tokens
1400914040
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.countTokens/endpoint": endpoint
14041+
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.embedContent": embed_project_location_publisher_model_content
14042+
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.embedContent/model": model
1401014043
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.fetchPredictOperation": fetch_project_location_publisher_model_predict_operation
1401114044
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.fetchPredictOperation/endpoint": endpoint
1401214045
"/aiplatform:v1/aiplatform.projects.locations.publishers.models.generateContent": generate_project_location_publisher_model_content

generated/google-apis-aiplatform_v1/CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,9 @@
11
# Release history for google-apis-aiplatform_v1
22

3+
### v0.70.0 (2025-10-26)
4+
5+
* Regenerated from discovery document revision 20251010
6+
37
### v0.69.0 (2025-10-19)
48

59
* Regenerated from discovery document revision 20251003

generated/google-apis-aiplatform_v1/lib/google/apis/aiplatform_v1/classes.rb

Lines changed: 199 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7448,6 +7448,107 @@ def update!(**args)
74487448
end
74497449
end
74507450

7451+
# Request message for PredictionService.EmbedContent.
7452+
class GoogleCloudAiplatformV1EmbedContentRequest
7453+
include Google::Apis::Core::Hashable
7454+
7455+
# Optional. Whether to silently truncate the input content if it's longer than
7456+
# the maximum sequence length.
7457+
# Corresponds to the JSON property `autoTruncate`
7458+
# @return [Boolean]
7459+
attr_accessor :auto_truncate
7460+
alias_method :auto_truncate?, :auto_truncate
7461+
7462+
# The base structured datatype containing multi-part content of a message. A `
7463+
# Content` includes a `role` field designating the producer of the `Content` and
7464+
# a `parts` field containing multi-part data that contains the content of the
7465+
# message turn.
7466+
# Corresponds to the JSON property `content`
7467+
# @return [Google::Apis::AiplatformV1::GoogleCloudAiplatformV1Content]
7468+
attr_accessor :content
7469+
7470+
# Optional. Optional reduced dimension for the output embedding. If set,
7471+
# excessive values in the output embedding are truncated from the end.
7472+
# Corresponds to the JSON property `outputDimensionality`
7473+
# @return [Fixnum]
7474+
attr_accessor :output_dimensionality
7475+
7476+
# Optional. The task type of the embedding.
7477+
# Corresponds to the JSON property `taskType`
7478+
# @return [String]
7479+
attr_accessor :task_type
7480+
7481+
# Optional. An optional title for the text.
7482+
# Corresponds to the JSON property `title`
7483+
# @return [String]
7484+
attr_accessor :title
7485+
7486+
def initialize(**args)
7487+
update!(**args)
7488+
end
7489+
7490+
# Update properties of this object
7491+
def update!(**args)
7492+
@auto_truncate = args[:auto_truncate] if args.key?(:auto_truncate)
7493+
@content = args[:content] if args.key?(:content)
7494+
@output_dimensionality = args[:output_dimensionality] if args.key?(:output_dimensionality)
7495+
@task_type = args[:task_type] if args.key?(:task_type)
7496+
@title = args[:title] if args.key?(:title)
7497+
end
7498+
end
7499+
7500+
# Response message for PredictionService.EmbedContent.
7501+
class GoogleCloudAiplatformV1EmbedContentResponse
7502+
include Google::Apis::Core::Hashable
7503+
7504+
# A list of floats representing an embedding.
7505+
# Corresponds to the JSON property `embedding`
7506+
# @return [Google::Apis::AiplatformV1::GoogleCloudAiplatformV1EmbedContentResponseEmbedding]
7507+
attr_accessor :embedding
7508+
7509+
# Whether the input content was truncated before generating the embedding.
7510+
# Corresponds to the JSON property `truncated`
7511+
# @return [Boolean]
7512+
attr_accessor :truncated
7513+
alias_method :truncated?, :truncated
7514+
7515+
# Usage metadata about the content generation request and response. This message
7516+
# provides a detailed breakdown of token usage and other relevant metrics.
7517+
# Corresponds to the JSON property `usageMetadata`
7518+
# @return [Google::Apis::AiplatformV1::GoogleCloudAiplatformV1UsageMetadata]
7519+
attr_accessor :usage_metadata
7520+
7521+
def initialize(**args)
7522+
update!(**args)
7523+
end
7524+
7525+
# Update properties of this object
7526+
def update!(**args)
7527+
@embedding = args[:embedding] if args.key?(:embedding)
7528+
@truncated = args[:truncated] if args.key?(:truncated)
7529+
@usage_metadata = args[:usage_metadata] if args.key?(:usage_metadata)
7530+
end
7531+
end
7532+
7533+
# A list of floats representing an embedding.
7534+
class GoogleCloudAiplatformV1EmbedContentResponseEmbedding
7535+
include Google::Apis::Core::Hashable
7536+
7537+
# Embedding vector values.
7538+
# Corresponds to the JSON property `values`
7539+
# @return [Array<Float>]
7540+
attr_accessor :values
7541+
7542+
def initialize(**args)
7543+
update!(**args)
7544+
end
7545+
7546+
# Update properties of this object
7547+
def update!(**args)
7548+
@values = args[:values] if args.key?(:values)
7549+
end
7550+
end
7551+
74517552
# Represents a customer-managed encryption key spec that can be applied to a top-
74527553
# level resource.
74537554
class GoogleCloudAiplatformV1EncryptionSpec
@@ -24753,6 +24854,12 @@ class GoogleCloudAiplatformV1PredictRequest
2475324854
# @return [Array<Object>]
2475424855
attr_accessor :instances
2475524856

24857+
# Optional. The user labels for Imagen billing usage only. Only Imagen supports
24858+
# labels. For other use cases, it will be ignored.
24859+
# Corresponds to the JSON property `labels`
24860+
# @return [Hash<String,String>]
24861+
attr_accessor :labels
24862+
2475624863
# The parameters that govern the prediction. The schema of the parameters may be
2475724864
# specified via Endpoint's DeployedModels' Model's PredictSchemata's
2475824865
# parameters_schema_uri.
@@ -24767,6 +24874,7 @@ def initialize(**args)
2476724874
# Update properties of this object
2476824875
def update!(**args)
2476924876
@instances = args[:instances] if args.key?(:instances)
24877+
@labels = args[:labels] if args.key?(:labels)
2477024878
@parameters = args[:parameters] if args.key?(:parameters)
2477124879
end
2477224880
end
@@ -42827,6 +42935,97 @@ def update!(**args)
4282742935
end
4282842936
end
4282942937

42938+
# Usage metadata about the content generation request and response. This message
42939+
# provides a detailed breakdown of token usage and other relevant metrics.
42940+
class GoogleCloudAiplatformV1UsageMetadata
42941+
include Google::Apis::Core::Hashable
42942+
42943+
# Output only. A detailed breakdown of the token count for each modality in the
42944+
# cached content.
42945+
# Corresponds to the JSON property `cacheTokensDetails`
42946+
# @return [Array<Google::Apis::AiplatformV1::GoogleCloudAiplatformV1ModalityTokenCount>]
42947+
attr_accessor :cache_tokens_details
42948+
42949+
# Output only. The number of tokens in the cached content that was used for this
42950+
# request.
42951+
# Corresponds to the JSON property `cachedContentTokenCount`
42952+
# @return [Fixnum]
42953+
attr_accessor :cached_content_token_count
42954+
42955+
# The total number of tokens in the generated candidates.
42956+
# Corresponds to the JSON property `candidatesTokenCount`
42957+
# @return [Fixnum]
42958+
attr_accessor :candidates_token_count
42959+
42960+
# Output only. A detailed breakdown of the token count for each modality in the
42961+
# generated candidates.
42962+
# Corresponds to the JSON property `candidatesTokensDetails`
42963+
# @return [Array<Google::Apis::AiplatformV1::GoogleCloudAiplatformV1ModalityTokenCount>]
42964+
attr_accessor :candidates_tokens_details
42965+
42966+
# The total number of tokens in the prompt. This includes any text, images, or
42967+
# other media provided in the request. When `cached_content` is set, this also
42968+
# includes the number of tokens in the cached content.
42969+
# Corresponds to the JSON property `promptTokenCount`
42970+
# @return [Fixnum]
42971+
attr_accessor :prompt_token_count
42972+
42973+
# Output only. A detailed breakdown of the token count for each modality in the
42974+
# prompt.
42975+
# Corresponds to the JSON property `promptTokensDetails`
42976+
# @return [Array<Google::Apis::AiplatformV1::GoogleCloudAiplatformV1ModalityTokenCount>]
42977+
attr_accessor :prompt_tokens_details
42978+
42979+
# Output only. The number of tokens that were part of the model's generated "
42980+
# thoughts" output, if applicable.
42981+
# Corresponds to the JSON property `thoughtsTokenCount`
42982+
# @return [Fixnum]
42983+
attr_accessor :thoughts_token_count
42984+
42985+
# Output only. The number of tokens in the results from tool executions, which
42986+
# are provided back to the model as input, if applicable.
42987+
# Corresponds to the JSON property `toolUsePromptTokenCount`
42988+
# @return [Fixnum]
42989+
attr_accessor :tool_use_prompt_token_count
42990+
42991+
# Output only. A detailed breakdown by modality of the token counts from the
42992+
# results of tool executions, which are provided back to the model as input.
42993+
# Corresponds to the JSON property `toolUsePromptTokensDetails`
42994+
# @return [Array<Google::Apis::AiplatformV1::GoogleCloudAiplatformV1ModalityTokenCount>]
42995+
attr_accessor :tool_use_prompt_tokens_details
42996+
42997+
# The total number of tokens for the entire request. This is the sum of `
42998+
# prompt_token_count`, `candidates_token_count`, `tool_use_prompt_token_count`,
42999+
# and `thoughts_token_count`.
43000+
# Corresponds to the JSON property `totalTokenCount`
43001+
# @return [Fixnum]
43002+
attr_accessor :total_token_count
43003+
43004+
# Output only. The traffic type for this request.
43005+
# Corresponds to the JSON property `trafficType`
43006+
# @return [String]
43007+
attr_accessor :traffic_type
43008+
43009+
def initialize(**args)
43010+
update!(**args)
43011+
end
43012+
43013+
# Update properties of this object
43014+
def update!(**args)
43015+
@cache_tokens_details = args[:cache_tokens_details] if args.key?(:cache_tokens_details)
43016+
@cached_content_token_count = args[:cached_content_token_count] if args.key?(:cached_content_token_count)
43017+
@candidates_token_count = args[:candidates_token_count] if args.key?(:candidates_token_count)
43018+
@candidates_tokens_details = args[:candidates_tokens_details] if args.key?(:candidates_tokens_details)
43019+
@prompt_token_count = args[:prompt_token_count] if args.key?(:prompt_token_count)
43020+
@prompt_tokens_details = args[:prompt_tokens_details] if args.key?(:prompt_tokens_details)
43021+
@thoughts_token_count = args[:thoughts_token_count] if args.key?(:thoughts_token_count)
43022+
@tool_use_prompt_token_count = args[:tool_use_prompt_token_count] if args.key?(:tool_use_prompt_token_count)
43023+
@tool_use_prompt_tokens_details = args[:tool_use_prompt_tokens_details] if args.key?(:tool_use_prompt_tokens_details)
43024+
@total_token_count = args[:total_token_count] if args.key?(:total_token_count)
43025+
@traffic_type = args[:traffic_type] if args.key?(:traffic_type)
43026+
end
43027+
end
43028+
4283043029
# References an API call. It contains more information about long running
4283143030
# operation and Jobs that are triggered by the API call.
4283243031
class GoogleCloudAiplatformV1UserActionReference

generated/google-apis-aiplatform_v1/lib/google/apis/aiplatform_v1/gem_version.rb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,13 @@ module Google
1616
module Apis
1717
module AiplatformV1
1818
# Version of the google-apis-aiplatform_v1 gem
19-
GEM_VERSION = "0.69.0"
19+
GEM_VERSION = "0.70.0"
2020

2121
# Version of the code generator used to generate this client
2222
GENERATOR_VERSION = "0.18.0"
2323

2424
# Revision of the discovery document this client was generated from
25-
REVISION = "20251003"
25+
REVISION = "20251010"
2626
end
2727
end
2828
end

0 commit comments

Comments
 (0)