Skip to content

Commit c999629

Browse files
1 parent 43cec79 commit c999629

File tree

4 files changed

+12
-9
lines changed

4 files changed

+12
-9
lines changed

clients/google-api-services-firebaseml/v2beta/2.0.0/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ Add the following lines to your `pom.xml` file:
2222
<dependency>
2323
<groupId>com.google.apis</groupId>
2424
<artifactId>google-api-services-firebaseml</artifactId>
25-
<version>v2beta-rev20240730-2.0.0</version>
25+
<version>v2beta-rev20240802-2.0.0</version>
2626
</dependency>
2727
</dependencies>
2828
</project>
@@ -35,7 +35,7 @@ repositories {
3535
mavenCentral()
3636
}
3737
dependencies {
38-
implementation 'com.google.apis:google-api-services-firebaseml:v2beta-rev20240730-2.0.0'
38+
implementation 'com.google.apis:google-api-services-firebaseml:v2beta-rev20240802-2.0.0'
3939
}
4040
```
4141

clients/google-api-services-firebaseml/v2beta/2.0.0/com/google/api/services/firebaseml/v2beta/model/GoogleCloudAiplatformV1beta1GenerateContentResponseUsageMetadata.java

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,8 @@ public final class GoogleCloudAiplatformV1beta1GenerateContentResponseUsageMetad
3737
private java.lang.Integer candidatesTokenCount;
3838

3939
/**
40-
* Number of tokens in the request.
40+
* Number of tokens in the request. When `cached_content` is set, this is still the total
41+
* effective prompt size meaning this includes the number of tokens in the cached content.
4142
* The value may be {@code null}.
4243
*/
4344
@com.google.api.client.util.Key
@@ -67,15 +68,17 @@ public GoogleCloudAiplatformV1beta1GenerateContentResponseUsageMetadata setCandi
6768
}
6869

6970
/**
70-
* Number of tokens in the request.
71+
* Number of tokens in the request. When `cached_content` is set, this is still the total
72+
* effective prompt size meaning this includes the number of tokens in the cached content.
7173
* @return value or {@code null} for none
7274
*/
7375
public java.lang.Integer getPromptTokenCount() {
7476
return promptTokenCount;
7577
}
7678

7779
/**
78-
* Number of tokens in the request.
80+
* Number of tokens in the request. When `cached_content` is set, this is still the total
81+
* effective prompt size meaning this includes the number of tokens in the cached content.
7982
* @param promptTokenCount promptTokenCount or {@code null} for none
8083
*/
8184
public GoogleCloudAiplatformV1beta1GenerateContentResponseUsageMetadata setPromptTokenCount(java.lang.Integer promptTokenCount) {

clients/google-api-services-firebaseml/v2beta/2.0.0/pom.xml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@
88

99
<groupId>com.google.apis</groupId>
1010
<artifactId>google-api-services-firebaseml</artifactId>
11-
<version>v2beta-rev20240730-2.0.0</version>
12-
<name>Firebase ML API v2beta-rev20240730-2.0.0</name>
11+
<version>v2beta-rev20240802-2.0.0</version>
12+
<name>Firebase ML API v2beta-rev20240802-2.0.0</name>
1313
<packaging>jar</packaging>
1414

1515
<inceptionYear>2011</inceptionYear>

clients/google-api-services-firebaseml/v2beta/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ Add the following lines to your `pom.xml` file:
2222
<dependency>
2323
<groupId>com.google.apis</groupId>
2424
<artifactId>google-api-services-firebaseml</artifactId>
25-
<version>v2beta-rev20240730-2.0.0</version>
25+
<version>v2beta-rev20240802-2.0.0</version>
2626
</dependency>
2727
</dependencies>
2828
</project>
@@ -35,7 +35,7 @@ repositories {
3535
mavenCentral()
3636
}
3737
dependencies {
38-
implementation 'com.google.apis:google-api-services-firebaseml:v2beta-rev20240730-2.0.0'
38+
implementation 'com.google.apis:google-api-services-firebaseml:v2beta-rev20240802-2.0.0'
3939
}
4040
```
4141

0 commit comments

Comments
 (0)