diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/internal/util/conversions.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/internal/util/conversions.kt index 3adf069a0ed..506570d96e1 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/internal/util/conversions.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/internal/util/conversions.kt @@ -26,10 +26,8 @@ import com.google.firebase.vertexai.common.shared.FunctionCall import com.google.firebase.vertexai.common.shared.FunctionCallPart import com.google.firebase.vertexai.common.shared.FunctionResponse import com.google.firebase.vertexai.common.shared.FunctionResponsePart -import com.google.firebase.vertexai.common.shared.HarmBlockThreshold import com.google.firebase.vertexai.type.BlobPart import com.google.firebase.vertexai.type.BlockReason -import com.google.firebase.vertexai.type.BlockThreshold import com.google.firebase.vertexai.type.Candidate import com.google.firebase.vertexai.type.Citation import com.google.firebase.vertexai.type.CitationMetadata @@ -41,6 +39,7 @@ import com.google.firebase.vertexai.type.FunctionCallingConfig import com.google.firebase.vertexai.type.FunctionDeclaration import com.google.firebase.vertexai.type.GenerateContentResponse import com.google.firebase.vertexai.type.GenerationConfig +import com.google.firebase.vertexai.type.HarmBlockThreshold import com.google.firebase.vertexai.type.HarmCategory import com.google.firebase.vertexai.type.HarmProbability import com.google.firebase.vertexai.type.HarmSeverity @@ -138,13 +137,16 @@ internal fun ToolConfig.toInternal() = ) ) -internal fun BlockThreshold.toInternal() = +internal fun HarmBlockThreshold.toInternal() = when (this) { - BlockThreshold.NONE -> HarmBlockThreshold.BLOCK_NONE - BlockThreshold.ONLY_HIGH -> HarmBlockThreshold.BLOCK_ONLY_HIGH - BlockThreshold.MEDIUM_AND_ABOVE -> HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE - BlockThreshold.LOW_AND_ABOVE -> HarmBlockThreshold.BLOCK_LOW_AND_ABOVE - BlockThreshold.UNSPECIFIED -> HarmBlockThreshold.UNSPECIFIED + HarmBlockThreshold.NONE -> + com.google.firebase.vertexai.common.shared.HarmBlockThreshold.BLOCK_NONE + HarmBlockThreshold.ONLY_HIGH -> + com.google.firebase.vertexai.common.shared.HarmBlockThreshold.BLOCK_ONLY_HIGH + HarmBlockThreshold.MEDIUM_AND_ABOVE -> + com.google.firebase.vertexai.common.shared.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE + HarmBlockThreshold.LOW_AND_ABOVE -> + com.google.firebase.vertexai.common.shared.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE } internal fun Tool.toInternal() = @@ -258,8 +260,7 @@ internal fun com.google.firebase.vertexai.common.server.FinishReason?.toPublic() com.google.firebase.vertexai.common.server.FinishReason.SAFETY -> FinishReason.SAFETY com.google.firebase.vertexai.common.server.FinishReason.STOP -> FinishReason.STOP com.google.firebase.vertexai.common.server.FinishReason.OTHER -> FinishReason.OTHER - com.google.firebase.vertexai.common.server.FinishReason.UNSPECIFIED -> FinishReason.UNSPECIFIED - com.google.firebase.vertexai.common.server.FinishReason.UNKNOWN -> FinishReason.UNKNOWN + else -> FinishReason.UNKNOWN } internal fun com.google.firebase.vertexai.common.shared.HarmCategory.toPublic() = @@ -270,7 +271,7 @@ internal fun com.google.firebase.vertexai.common.shared.HarmCategory.toPublic() HarmCategory.SEXUALLY_EXPLICIT com.google.firebase.vertexai.common.shared.HarmCategory.DANGEROUS_CONTENT -> HarmCategory.DANGEROUS_CONTENT - com.google.firebase.vertexai.common.shared.HarmCategory.UNKNOWN -> HarmCategory.UNKNOWN + else -> HarmCategory.UNKNOWN } internal fun com.google.firebase.vertexai.common.server.HarmProbability.toPublic() = @@ -280,9 +281,7 @@ internal fun com.google.firebase.vertexai.common.server.HarmProbability.toPublic com.google.firebase.vertexai.common.server.HarmProbability.LOW -> HarmProbability.LOW com.google.firebase.vertexai.common.server.HarmProbability.NEGLIGIBLE -> HarmProbability.NEGLIGIBLE - com.google.firebase.vertexai.common.server.HarmProbability.UNSPECIFIED -> - HarmProbability.UNSPECIFIED - com.google.firebase.vertexai.common.server.HarmProbability.UNKNOWN -> HarmProbability.UNKNOWN + else -> HarmProbability.UNKNOWN } internal fun com.google.firebase.vertexai.common.server.HarmSeverity.toPublic() = @@ -291,16 +290,14 @@ internal fun com.google.firebase.vertexai.common.server.HarmSeverity.toPublic() com.google.firebase.vertexai.common.server.HarmSeverity.MEDIUM -> HarmSeverity.MEDIUM com.google.firebase.vertexai.common.server.HarmSeverity.LOW -> HarmSeverity.LOW com.google.firebase.vertexai.common.server.HarmSeverity.NEGLIGIBLE -> HarmSeverity.NEGLIGIBLE - com.google.firebase.vertexai.common.server.HarmSeverity.UNSPECIFIED -> HarmSeverity.UNSPECIFIED - com.google.firebase.vertexai.common.server.HarmSeverity.UNKNOWN -> HarmSeverity.UNKNOWN + else -> HarmSeverity.UNKNOWN } internal fun com.google.firebase.vertexai.common.server.BlockReason.toPublic() = when (this) { - com.google.firebase.vertexai.common.server.BlockReason.UNSPECIFIED -> BlockReason.UNSPECIFIED com.google.firebase.vertexai.common.server.BlockReason.SAFETY -> BlockReason.SAFETY com.google.firebase.vertexai.common.server.BlockReason.OTHER -> BlockReason.OTHER - com.google.firebase.vertexai.common.server.BlockReason.UNKNOWN -> BlockReason.UNKNOWN + else -> BlockReason.UNKNOWN } internal fun com.google.firebase.vertexai.common.GenerateContentResponse.toPublic(): diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt index 1a23885cbe0..68eba03f4c0 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/Candidate.kt @@ -68,9 +68,6 @@ enum class FinishReason { /** A new and not yet supported value. */ UNKNOWN, - /** Reason is unspecified. */ - UNSPECIFIED, - /** Model finished successfully and stopped. */ STOP, diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/CountTokensResponse.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/CountTokensResponse.kt index 95eac64958f..ac4ee804715 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/CountTokensResponse.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/CountTokensResponse.kt @@ -20,9 +20,10 @@ package com.google.firebase.vertexai.type * Represents a response measuring model input. * * @property totalTokens A count of the tokens in the input - * @property totalBillableCharacters A count of the characters that are billable in the input + * @property totalBillableCharacters A count of the characters that are billable in the input, if + * available. */ -class CountTokensResponse(val totalTokens: Int, val totalBillableCharacters: Int) { +class CountTokensResponse(val totalTokens: Int, val totalBillableCharacters: Int? = null) { operator fun component1() = totalTokens operator fun component2() = totalBillableCharacters diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/BlockThreshold.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/HarmBlockThreshold.kt similarity index 92% rename from firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/BlockThreshold.kt rename to firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/HarmBlockThreshold.kt index 4d22d03981d..ade2d1a9513 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/BlockThreshold.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/HarmBlockThreshold.kt @@ -19,10 +19,7 @@ package com.google.firebase.vertexai.type /** * Represents the threshold for some [HarmCategory] that is allowed and blocked by [SafetySetting]. */ -enum class BlockThreshold { - /** The threshold was not specified. */ - UNSPECIFIED, - +enum class HarmBlockThreshold { /** Content with negligible harm is allowed. */ LOW_AND_ABOVE, diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/HarmProbability.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/HarmProbability.kt index 1a08c2ec39e..56ab86e4707 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/HarmProbability.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/HarmProbability.kt @@ -21,9 +21,6 @@ enum class HarmProbability { /** A new and not yet supported value. */ UNKNOWN, - /** Probability for harm is unspecified. */ - UNSPECIFIED, - /** Probability for harm is negligible. */ NEGLIGIBLE, diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/HarmSeverity.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/HarmSeverity.kt index a5ae583b357..8e3f0d37c9a 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/HarmSeverity.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/HarmSeverity.kt @@ -21,9 +21,6 @@ enum class HarmSeverity { /** A new and not yet supported value. */ UNKNOWN, - /** Severity for harm is unspecified. */ - UNSPECIFIED, - /** Severity for harm is negligible. */ NEGLIGIBLE, diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/PromptFeedback.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/PromptFeedback.kt index f9d2a134628..3043dd01632 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/PromptFeedback.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/PromptFeedback.kt @@ -34,9 +34,6 @@ enum class BlockReason { /** A new and not yet supported value. */ UNKNOWN, - /** Content was blocked for an unspecified reason. */ - UNSPECIFIED, - /** Content was blocked for violating provided [SafetySetting]. */ SAFETY, diff --git a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/SafetySetting.kt b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/SafetySetting.kt index 2ca039c46bf..afc881cd033 100644 --- a/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/SafetySetting.kt +++ b/firebase-vertexai/src/main/kotlin/com/google/firebase/vertexai/type/SafetySetting.kt @@ -17,9 +17,10 @@ package com.google.firebase.vertexai.type /** - * A configuration for a [BlockThreshold] of some [HarmCategory] allowed and blocked in responses. + * A configuration for a [HarmBlockThreshold] of some [HarmCategory] allowed and blocked in + * responses. * * @param harmCategory The relevant [HarmCategory]. * @param threshold The threshold form harm allowable. */ -class SafetySetting(val harmCategory: HarmCategory, val threshold: BlockThreshold) {} +class SafetySetting(val harmCategory: HarmCategory, val threshold: HarmBlockThreshold) {}