From 55c05a03925a219a2c91f67b8c371605c556e139 Mon Sep 17 00:00:00 2001
From: Daniel La Rocque Pros: callers needn't be concerned with details of on-device availability. Cons: this method spans a few concerns and splits request validation from usage.
+ * If instance variables weren't already part of the API, we could consider a better
+ * separation of concerns. This is comparable to {@link GenerativeModel.generateContent} for generating content in
+ * Cloud. This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
+ * Cloud. Chrome only downloads models as needed. Chrome knows a model is needed when code calls
+ * LanguageModel.create. Since Chrome manages the download, the SDK can only avoid redundant download requests by
+ * tracking if a download has previously been requested. Chrome uses a multi-turn session for all inference. Vertex uses single-turn for all
+ * inference. To map the Vertex API to Chrome's API, the SDK creates a new session for all
+ * inference. Chrome will remove a model from memory if it's no longer in use, so this method ensures a
+ * new session is created before an old session is destroyed. This is comparable to {@link GenerativeModel.generateContent} for generating content in
* Cloud. This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
* Cloud.Encapsulates a few concerns:
+ *
+ *
+ * ChatSession
class |
+| [(constructor)(apiSettings, model, chromeAdapter, params, requestOptions)](./ai.chatsession.md#chatsessionconstructor) | | Constructs a new instance of the ChatSession
class |
## Properties
@@ -47,7 +47,7 @@ Constructs a new instance of the `ChatSession` class
Signature:
```typescript
-constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
+constructor(apiSettings: ApiSettings, model: string, chromeAdapter: ChromeAdapter, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
```
#### Parameters
@@ -56,6 +56,7 @@ constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams |
| --- | --- | --- |
| apiSettings | ApiSettings | |
| model | string | |
+| chromeAdapter | ChromeAdapter | |
| params | [StartChatParams](./ai.startchatparams.md#startchatparams_interface) \| undefined | |
| requestOptions | [RequestOptions](./ai.requestoptions.md#requestoptions_interface) \| undefined | |
diff --git a/docs-devsite/ai.generativemodel.md b/docs-devsite/ai.generativemodel.md
index d91cf80e881..17c9d3c0863 100644
--- a/docs-devsite/ai.generativemodel.md
+++ b/docs-devsite/ai.generativemodel.md
@@ -23,12 +23,13 @@ export declare class GenerativeModel extends AIModel
| Constructor | Modifiers | Description |
| --- | --- | --- |
-| [(constructor)(ai, modelParams, requestOptions)](./ai.generativemodel.md#generativemodelconstructor) | | Constructs a new instance of the GenerativeModel
class |
+| [(constructor)(ai, modelParams, chromeAdapter, requestOptions)](./ai.generativemodel.md#generativemodelconstructor) | | Constructs a new instance of the GenerativeModel
class |
## Properties
| Property | Modifiers | Type | Description |
| --- | --- | --- | --- |
+| [DEFAULT\_HYBRID\_IN\_CLOUD\_MODEL](./ai.generativemodel.md#generativemodeldefault_hybrid_in_cloud_model) | static
| string | Defines the name of the default in-cloud model to use for hybrid inference. |
| [generationConfig](./ai.generativemodel.md#generativemodelgenerationconfig) | | [GenerationConfig](./ai.generationconfig.md#generationconfig_interface) | |
| [requestOptions](./ai.generativemodel.md#generativemodelrequestoptions) | | [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | |
| [safetySettings](./ai.generativemodel.md#generativemodelsafetysettings) | | [SafetySetting](./ai.safetysetting.md#safetysetting_interface)\[\] | |
@@ -52,7 +53,7 @@ Constructs a new instance of the `GenerativeModel` class
Signature:
```typescript
-constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions);
+constructor(ai: AI, modelParams: ModelParams, chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions);
```
#### Parameters
@@ -61,8 +62,19 @@ constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions);
| --- | --- | --- |
| ai | [AI](./ai.ai.md#ai_interface) | |
| modelParams | [ModelParams](./ai.modelparams.md#modelparams_interface) | |
+| chromeAdapter | ChromeAdapter | |
| requestOptions | [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | |
+## GenerativeModel.DEFAULT\_HYBRID\_IN\_CLOUD\_MODEL
+
+Defines the name of the default in-cloud model to use for hybrid inference.
+
+Signature:
+
+```typescript
+static DEFAULT_HYBRID_IN_CLOUD_MODEL: string;
+```
+
## GenerativeModel.generationConfig
Signature:
diff --git a/docs-devsite/ai.hybridparams.md b/docs-devsite/ai.hybridparams.md
new file mode 100644
index 00000000000..b2b3b1030fe
--- /dev/null
+++ b/docs-devsite/ai.hybridparams.md
@@ -0,0 +1,57 @@
+Project: /docs/reference/js/_project.yaml
+Book: /docs/reference/_book.yaml
+page_type: reference
+
+{% comment %}
+DO NOT EDIT THIS FILE!
+This is generated by the JS SDK team, and any local changes will be
+overwritten. Changes should be made in the source code at
+https://github.com/firebase/firebase-js-sdk
+{% endcomment %}
+
+# HybridParams interface
+Toggles hybrid inference.
+
+Signature:
+
+```typescript
+export interface HybridParams
+```
+
+## Properties
+
+| Property | Type | Description |
+| --- | --- | --- |
+| [inCloudParams](./ai.hybridparams.md#hybridparamsincloudparams) | [ModelParams](./ai.modelparams.md#modelparams_interface) | Optional. Specifies advanced params for in-cloud inference. |
+| [mode](./ai.hybridparams.md#hybridparamsmode) | [InferenceMode](./ai.md#inferencemode) | Specifies on-device or in-cloud inference. Defaults to prefer on-device. |
+| [onDeviceParams](./ai.hybridparams.md#hybridparamsondeviceparams) | [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | Optional. Specifies advanced params for on-device inference. |
+
+## HybridParams.inCloudParams
+
+Optional. Specifies advanced params for in-cloud inference.
+
+Signature:
+
+```typescript
+inCloudParams?: ModelParams;
+```
+
+## HybridParams.mode
+
+Specifies on-device or in-cloud inference. Defaults to prefer on-device.
+
+Signature:
+
+```typescript
+mode: InferenceMode;
+```
+
+## HybridParams.onDeviceParams
+
+Optional. Specifies advanced params for on-device inference.
+
+Signature:
+
+```typescript
+onDeviceParams?: OnDeviceParams;
+```
diff --git a/docs-devsite/ai.md b/docs-devsite/ai.md
index c43c0391ba4..01b3a455682 100644
--- a/docs-devsite/ai.md
+++ b/docs-devsite/ai.md
@@ -20,7 +20,7 @@ The Firebase AI Web SDK.
| [getAI(app, options)](./ai.md#getai_a94a413) | Returns the default [AI](./ai.ai.md#ai_interface) instance that is associated with the provided [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface). If no instance exists, initializes a new instance with the default settings. |
| [getVertexAI(app, options)](./ai.md#getvertexai_04094cf) | |
| function(ai, ...) |
-| [getGenerativeModel(ai, modelParams, requestOptions)](./ai.md#getgenerativemodel_80bd839) | Returns a [GenerativeModel](./ai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. |
+| [getGenerativeModel(ai, modelParams, requestOptions)](./ai.md#getgenerativemodel_c63f46a) | Returns a [GenerativeModel](./ai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. |
| [getImagenModel(ai, modelParams, requestOptions)](./ai.md#getimagenmodel_e1f6645) | (Public Preview) Returns an [ImagenModel](./ai.imagenmodel.md#imagenmodel_class) class with methods for using Imagen.Only Imagen 3 models (named imagen-3.0-*
) are supported. |
## Classes
@@ -97,6 +97,7 @@ The Firebase AI Web SDK.
| [GenerativeContentBlob](./ai.generativecontentblob.md#generativecontentblob_interface) | Interface for sending an image. |
| [GroundingAttribution](./ai.groundingattribution.md#groundingattribution_interface) | |
| [GroundingMetadata](./ai.groundingmetadata.md#groundingmetadata_interface) | Metadata returned to client when grounding is enabled. |
+| [HybridParams](./ai.hybridparams.md#hybridparams_interface) | Toggles hybrid inference. |
| [ImagenGCSImage](./ai.imagengcsimage.md#imagengcsimage_interface) | An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.This feature is not available yet. |
| [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface) | (Public Preview) Configuration options for generating images with Imagen.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images-imagen) for more details. |
| [ImagenGenerationResponse](./ai.imagengenerationresponse.md#imagengenerationresponse_interface) | (Public Preview) The response from a request to generate images with Imagen. |
@@ -105,10 +106,11 @@ The Firebase AI Web SDK.
| [ImagenSafetySettings](./ai.imagensafetysettings.md#imagensafetysettings_interface) | (Public Preview) Settings for controlling the aggressiveness of filtering out sensitive content.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details. |
| [InlineDataPart](./ai.inlinedatapart.md#inlinedatapart_interface) | Content part interface if the part represents an image. |
| [ModalityTokenCount](./ai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. |
-| [ModelParams](./ai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_80bd839). |
+| [ModelParams](./ai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). |
| [ObjectSchemaInterface](./ai.objectschemainterface.md#objectschemainterface_interface) | Interface for [ObjectSchema](./ai.objectschema.md#objectschema_class) class. |
+| [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | Encapsulates configuration for on-device inference. |
| [PromptFeedback](./ai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with blockReason
and the relevant safetyRatings
. |
-| [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_80bd839). |
+| [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). |
| [RetrievedContextAttribution](./ai.retrievedcontextattribution.md#retrievedcontextattribution_interface) | |
| [SafetyRating](./ai.safetyrating.md#safetyrating_interface) | A safety rating associated with a [GenerateContentCandidate](./ai.generatecontentcandidate.md#generatecontentcandidate_interface) |
| [SafetySetting](./ai.safetysetting.md#safetysetting_interface) | Safety setting that can be sent as part of request parameters. |
@@ -140,6 +142,7 @@ The Firebase AI Web SDK.
| Type Alias | Description |
| --- | --- |
| [BackendType](./ai.md#backendtype) | Type alias representing valid backend types. It can be either 'VERTEX_AI'
or 'GOOGLE_AI'
. |
+| [InferenceMode](./ai.md#inferencemode) | Determines whether inference happens on-device or in-cloud. |
| [Part](./ai.md#part) | Content part - includes text, image/video, or function call/response part types. |
| [ResponseModality](./ai.md#responsemodality) | (Public Preview) Generation modalities to be returned in generation responses. |
| [Role](./ai.md#role) | Role is the producer of the content. |
@@ -226,14 +229,14 @@ export declare function getVertexAI(app?: FirebaseApp, options?: VertexAIOptions
## function(ai, ...)
-### getGenerativeModel(ai, modelParams, requestOptions) {:#getgenerativemodel_80bd839}
+### getGenerativeModel(ai, modelParams, requestOptions) {:#getgenerativemodel_c63f46a}
Returns a [GenerativeModel](./ai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality.
Signature:
```typescript
-export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel;
+export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
```
#### Parameters
@@ -241,7 +244,7 @@ export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, req
| Parameter | Type | Description |
| --- | --- | --- |
| ai | [AI](./ai.ai.md#ai_interface) | |
-| modelParams | [ModelParams](./ai.modelparams.md#modelparams_interface) | |
+| modelParams | [ModelParams](./ai.modelparams.md#modelparams_interface) \| [HybridParams](./ai.hybridparams.md#hybridparams_interface) | |
| requestOptions | [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | |
Returns:
@@ -360,6 +363,16 @@ Type alias representing valid backend types. It can be either `'VERTEX_AI'` or `
export type BackendType = (typeof BackendType)[keyof typeof BackendType];
```
+## InferenceMode
+
+Determines whether inference happens on-device or in-cloud.
+
+Signature:
+
+```typescript
+export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
+```
+
## Part
Content part - includes text, image/video, or function call/response part types.
diff --git a/docs-devsite/ai.modelparams.md b/docs-devsite/ai.modelparams.md
index a92b2e9035d..a5722e7d69d 100644
--- a/docs-devsite/ai.modelparams.md
+++ b/docs-devsite/ai.modelparams.md
@@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# ModelParams interface
-Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_80bd839).
+Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a).
Signature:
diff --git a/docs-devsite/ai.ondeviceparams.md b/docs-devsite/ai.ondeviceparams.md
new file mode 100644
index 00000000000..f4bfcbb5cff
--- /dev/null
+++ b/docs-devsite/ai.ondeviceparams.md
@@ -0,0 +1,42 @@
+Project: /docs/reference/js/_project.yaml
+Book: /docs/reference/_book.yaml
+page_type: reference
+
+{% comment %}
+DO NOT EDIT THIS FILE!
+This is generated by the JS SDK team, and any local changes will be
+overwritten. Changes should be made in the source code at
+https://github.com/firebase/firebase-js-sdk
+{% endcomment %}
+
+# OnDeviceParams interface
+Encapsulates configuration for on-device inference.
+
+Signature:
+
+```typescript
+export interface OnDeviceParams
+```
+
+## Properties
+
+| Property | Type | Description |
+| --- | --- | --- |
+| [createOptions](./ai.ondeviceparams.md#ondeviceparamscreateoptions) | LanguageModelCreateOptions | |
+| [promptOptions](./ai.ondeviceparams.md#ondeviceparamspromptoptions) | LanguageModelPromptOptions | |
+
+## OnDeviceParams.createOptions
+
+Signature:
+
+```typescript
+createOptions?: LanguageModelCreateOptions;
+```
+
+## OnDeviceParams.promptOptions
+
+Signature:
+
+```typescript
+promptOptions?: LanguageModelPromptOptions;
+```
diff --git a/docs-devsite/ai.requestoptions.md b/docs-devsite/ai.requestoptions.md
index 73aa03c1d25..8178ef5b696 100644
--- a/docs-devsite/ai.requestoptions.md
+++ b/docs-devsite/ai.requestoptions.md
@@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# RequestOptions interface
-Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_80bd839).
+Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a).
Signature:
From 50f142a9805d316c9606f0520351e910aadefd83 Mon Sep 17 00:00:00 2001
From: Erik Eldridge 'VERTEX_AI'
or 'GOOGLE_AI'
. |
| [InferenceMode](./ai.md#inferencemode) | Determines whether inference happens on-device or in-cloud. |
+| [LanguageModelInitialPrompts](./ai.md#languagemodelinitialprompts) | |
+| [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | |
+| [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | |
+| [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | |
| [Part](./ai.md#part) | Content part - includes text, image/video, or function call/response part types. |
| [ResponseModality](./ai.md#responsemodality) | (Public Preview) Generation modalities to be returned in generation responses. |
| [Role](./ai.md#role) | Role is the producer of the content. |
@@ -373,6 +383,38 @@ Determines whether inference happens on-device or in-cloud.
export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
```
+## LanguageModelInitialPrompts
+
+Signature:
+
+```typescript
+export type LanguageModelInitialPrompts = LanguageModelMessage[] | LanguageModelMessageShorthand[];
+```
+
+## LanguageModelMessageContentValue
+
+Signature:
+
+```typescript
+export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
+```
+
+## LanguageModelMessageRole
+
+Signature:
+
+```typescript
+export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
+```
+
+## LanguageModelMessageType
+
+Signature:
+
+```typescript
+export type LanguageModelMessageType = 'text' | 'image' | 'audio';
+```
+
## Part
Content part - includes text, image/video, or function call/response part types.
diff --git a/docs-devsite/ai.ondeviceparams.md b/docs-devsite/ai.ondeviceparams.md
index f4bfcbb5cff..16fed65560d 100644
--- a/docs-devsite/ai.ondeviceparams.md
+++ b/docs-devsite/ai.ondeviceparams.md
@@ -22,7 +22,7 @@ export interface OnDeviceParams
| Property | Type | Description |
| --- | --- | --- |
-| [createOptions](./ai.ondeviceparams.md#ondeviceparamscreateoptions) | LanguageModelCreateOptions | |
+| [createOptions](./ai.ondeviceparams.md#ondeviceparamscreateoptions) | [LanguageModelCreateOptions](./ai.languagemodelcreateoptions.md#languagemodelcreateoptions_interface) | |
| [promptOptions](./ai.ondeviceparams.md#ondeviceparamspromptoptions) | LanguageModelPromptOptions | |
## OnDeviceParams.createOptions
diff --git a/packages/ai/src/methods/chrome-adapter.ts b/packages/ai/src/methods/chrome-adapter.ts
index aa3709048a2..d6de108668d 100644
--- a/packages/ai/src/methods/chrome-adapter.ts
+++ b/packages/ai/src/methods/chrome-adapter.ts
@@ -104,7 +104,7 @@ export class ChromeAdapter {
*
*
'VERTEX_AI'
or 'GOOGLE_AI'
. |
| [InferenceMode](./ai.md#inferencemode) | Determines whether inference happens on-device or in-cloud. |
-| [LanguageModelInitialPrompts](./ai.md#languagemodelinitialprompts) | |
| [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | |
| [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | |
| [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | |
@@ -383,14 +381,6 @@ Determines whether inference happens on-device or in-cloud.
export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
```
-## LanguageModelInitialPrompts
-
-Signature:
-
-```typescript
-export type LanguageModelInitialPrompts = LanguageModelMessage[] | LanguageModelMessageShorthand[];
-```
-
## LanguageModelMessageContentValue
Signature:
diff --git a/packages/ai/src/types/index.ts b/packages/ai/src/types/index.ts
index 698f15b8aea..bd13140566f 100644
--- a/packages/ai/src/types/index.ts
+++ b/packages/ai/src/types/index.ts
@@ -26,12 +26,10 @@ export * from './googleai';
export {
LanguageModelCreateOptions,
LanguageModelCreateCoreOptions,
- LanguageModelExpectedInput,
- LanguageModelInitialPrompts,
+ LanguageModelExpected,
LanguageModelMessage,
LanguageModelMessageContent,
LanguageModelMessageContentValue,
LanguageModelMessageRole,
- LanguageModelMessageShorthand,
LanguageModelMessageType
} from './language-model';
diff --git a/packages/ai/src/types/language-model.ts b/packages/ai/src/types/language-model.ts
index 503f3d49d05..83a728dc3be 100644
--- a/packages/ai/src/types/language-model.ts
+++ b/packages/ai/src/types/language-model.ts
@@ -15,7 +15,9 @@
* limitations under the License.
*/
/**
- * {@see https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl}
+ * The subset of the Prompt API
+ * ({@see https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl})
+ * required for hybrid functionality.
*/
export interface LanguageModel extends EventTarget {
create(options?: LanguageModelCreateOptions): Promisestream
to get chunks as they come in and/or use the response
promise to get the aggregated response when the stream is done. |
| [GenerationConfig](./ai.generationconfig.md#generationconfig_interface) | Config options for content-related requests |
| [GenerativeContentBlob](./ai.generativecontentblob.md#generativecontentblob_interface) | Interface for sending an image. |
-| [HybridParams](./ai.hybridparams.md#hybridparams_interface) | Toggles hybrid inference. |
| [GoogleSearch](./ai.googlesearch.md#googlesearch_interface) | Specifies the Google Search configuration. |
| [GoogleSearchTool](./ai.googlesearchtool.md#googlesearchtool_interface) | A tool that allows a Gemini model to connect to Google Search to access and incorporate up-to-date information from the web into its responses.Important: If using Grounding with Google Search, you are required to comply with the "Grounding with Google Search" usage requirements for your chosen API provider: [Gemini Developer API](https://ai.google.dev/gemini-api/terms#grounding-with-google-search) or Vertex AI Gemini API (see [Service Terms](https://cloud.google.com/terms/service-terms) section within the Service Specific Terms). |
| [GroundingChunk](./ai.groundingchunk.md#groundingchunk_interface) | Represents a chunk of retrieved data that supports a claim in the model's response. This is part of the grounding information provided when grounding is enabled. |
| [GroundingMetadata](./ai.groundingmetadata.md#groundingmetadata_interface) | Metadata returned when grounding is enabled.Currently, only Grounding with Google Search is supported (see [GoogleSearchTool](./ai.googlesearchtool.md#googlesearchtool_interface)).Important: If using Grounding with Google Search, you are required to comply with the "Grounding with Google Search" usage requirements for your chosen API provider: [Gemini Developer API](https://ai.google.dev/gemini-api/terms#grounding-with-google-search) or Vertex AI Gemini API (see [Service Terms](https://cloud.google.com/terms/service-terms) section within the Service Specific Terms). |
| [GroundingSupport](./ai.groundingsupport.md#groundingsupport_interface) | Provides information about how a specific segment of the model's response is supported by the retrieved grounding chunks. |
+| [HybridParams](./ai.hybridparams.md#hybridparams_interface) | Toggles hybrid inference. |
| [ImagenGCSImage](./ai.imagengcsimage.md#imagengcsimage_interface) | An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.This feature is not available yet. |
| [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface) | (Public Preview) Configuration options for generating images with Imagen.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images-imagen) for more details. |
| [ImagenGenerationResponse](./ai.imagengenerationresponse.md#imagengenerationresponse_interface) | (Public Preview) The response from a request to generate images with Imagen. |
@@ -95,7 +95,7 @@ The Firebase AI Web SDK.
| [LanguageModelMessage](./ai.languagemodelmessage.md#languagemodelmessage_interface) | |
| [LanguageModelMessageContent](./ai.languagemodelmessagecontent.md#languagemodelmessagecontent_interface) | |
| [ModalityTokenCount](./ai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. |
-| [ModelParams](./ai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_80bd839). |
+| [ModelParams](./ai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). |
| [ObjectSchemaRequest](./ai.objectschemarequest.md#objectschemarequest_interface) | Interface for JSON parameters in a schema of "object" when not using the Schema.object()
helper. |
| [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | Encapsulates configuration for on-device inference. |
| [PromptFeedback](./ai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with blockReason
and the relevant safetyRatings
. |
@@ -111,7 +111,7 @@ The Firebase AI Web SDK.
| [Segment](./ai.segment.md#segment_interface) | Represents a specific segment within a [Content](./ai.content.md#content_interface) object, often used to pinpoint the exact location of text or data that grounding information refers to. |
| [StartChatParams](./ai.startchatparams.md#startchatparams_interface) | Params for [GenerativeModel.startChat()](./ai.generativemodel.md#generativemodelstartchat). |
| [TextPart](./ai.textpart.md#textpart_interface) | Content part interface if the part represents a text string. |
-| [ThinkingConfig](./ai.thinkingconfig.md#thinkingconfig_interface) | Configuration for "thinking" behavior of compatible Gemini models.Certain models utilize a thinking process before generating a response. This allows them to reason through complex problems and plan a more coherent and accurate answer. |
+| [ThinkingConfig](./ai.thinkingconfig.md#thinkingconfig_interface) | |
| [ToolConfig](./ai.toolconfig.md#toolconfig_interface) | Tool config. This config is shared for all tools provided in the request. |
| [UsageMetadata](./ai.usagemetadata.md#usagemetadata_interface) | Usage metadata about a [GenerateContentResponse](./ai.generatecontentresponse.md#generatecontentresponse_interface). |
| [VideoMetadata](./ai.videometadata.md#videometadata_interface) | Describes the input video content. |
@@ -157,6 +157,10 @@ The Firebase AI Web SDK.
| [ImagenAspectRatio](./ai.md#imagenaspectratio) | (Public Preview) Aspect ratios for Imagen images.To specify an aspect ratio for generated images, set the aspectRatio
property in your [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface).See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details and examples of the supported aspect ratios. |
| [ImagenPersonFilterLevel](./ai.md#imagenpersonfilterlevel) | (Public Preview) A filter level controlling whether generation of images containing people or faces is allowed.See the personGeneration documentation for more details. |
| [ImagenSafetyFilterLevel](./ai.md#imagensafetyfilterlevel) | (Public Preview) A filter level controlling how aggressively to filter sensitive content.Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI are assessed against a list of safety filters, which include 'harmful categories' (for example, violence
, sexual
, derogatory
, and toxic
). This filter level controls how aggressively to filter out potentially harmful content from responses. See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) and the [Responsible AI and usage guidelines](https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters) for more details. |
+| [InferenceMode](./ai.md#inferencemode) | Determines whether inference happens on-device or in-cloud. |
+| [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | |
+| [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | |
+| [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | |
| [Modality](./ai.md#modality) | Content part modality. |
| [Part](./ai.md#part) | Content part - includes text, image/video, or function call/response part types. |
| [ResponseModality](./ai.md#responsemodality) | (Public Preview) Generation modalities to be returned in generation responses. |
@@ -700,6 +704,40 @@ Text prompts provided as inputs and images (generated or uploaded) through Image
export type ImagenSafetyFilterLevel = (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel];
```
+## InferenceMode
+
+Determines whether inference happens on-device or in-cloud.
+
+Signature:
+
+```typescript
+export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
+```
+
+## LanguageModelMessageContentValue
+
+Signature:
+
+```typescript
+export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
+```
+
+## LanguageModelMessageRole
+
+Signature:
+
+```typescript
+export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
+```
+
+## LanguageModelMessageType
+
+Signature:
+
+```typescript
+export type LanguageModelMessageType = 'text' | 'image' | 'audio';
+```
+
## Modality
Content part modality.
diff --git a/docs-devsite/ai.thinkingconfig.md b/docs-devsite/ai.thinkingconfig.md
index ec348a20487..92e58e56c4c 100644
--- a/docs-devsite/ai.thinkingconfig.md
+++ b/docs-devsite/ai.thinkingconfig.md
@@ -10,10 +10,6 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# ThinkingConfig interface
-Configuration for "thinking" behavior of compatible Gemini models.
-
-Certain models utilize a thinking process before generating a response. This allows them to reason through complex problems and plan a more coherent and accurate answer.
-
Signature:
```typescript
From e9dc43e89efcfb470181713304995746755288c7 Mon Sep 17 00:00:00 2001
From: Christina Holland GenerativeModel
class |
+| [(constructor)(ai, modelParams, requestOptions, chromeAdapter)](./ai.generativemodel.md#generativemodelconstructor) | | Constructs a new instance of the GenerativeModel
class |
## Properties
@@ -53,7 +53,7 @@ Constructs a new instance of the `GenerativeModel` class
Signature:
```typescript
-constructor(ai: AI, modelParams: ModelParams, chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions);
+constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions, chromeAdapter?: ChromeAdapter | undefined);
```
#### Parameters
@@ -62,8 +62,8 @@ constructor(ai: AI, modelParams: ModelParams, chromeAdapter: ChromeAdapter, requ
| --- | --- | --- |
| ai | [AI](./ai.ai.md#ai_interface) | |
| modelParams | [ModelParams](./ai.modelparams.md#modelparams_interface) | |
-| chromeAdapter | ChromeAdapter | |
| requestOptions | [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | |
+| chromeAdapter | [ChromeAdapter](./ai.chromeadapter.md#chromeadapter_interface) \| undefined | |
## GenerativeModel.DEFAULT\_HYBRID\_IN\_CLOUD\_MODEL
diff --git a/docs-devsite/ai.hybridparams.md b/docs-devsite/ai.hybridparams.md
index b2b3b1030fe..1934b68597f 100644
--- a/docs-devsite/ai.hybridparams.md
+++ b/docs-devsite/ai.hybridparams.md
@@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# HybridParams interface
-Toggles hybrid inference.
+Configures hybrid inference.
Signature:
diff --git a/docs-devsite/ai.imagengcsimage.md b/docs-devsite/ai.imagengcsimage.md
index cd11d8ee354..ec51c714e0f 100644
--- a/docs-devsite/ai.imagengcsimage.md
+++ b/docs-devsite/ai.imagengcsimage.md
@@ -10,6 +10,9 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# ImagenGCSImage interface
+> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment.
+>
+
An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.
This feature is not available yet.
@@ -24,11 +27,14 @@ export interface ImagenGCSImage
| Property | Type | Description |
| --- | --- | --- |
-| [gcsURI](./ai.imagengcsimage.md#imagengcsimagegcsuri) | string | The URI of the file stored in a Cloud Storage for Firebase bucket. |
-| [mimeType](./ai.imagengcsimage.md#imagengcsimagemimetype) | string | The MIME type of the image; either "image/png"
or "image/jpeg"
.To request a different format, set the imageFormat
property in your [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface). |
+| [gcsURI](./ai.imagengcsimage.md#imagengcsimagegcsuri) | string | (Public Preview) The URI of the file stored in a Cloud Storage for Firebase bucket. |
+| [mimeType](./ai.imagengcsimage.md#imagengcsimagemimetype) | string | (Public Preview) The MIME type of the image; either "image/png"
or "image/jpeg"
.To request a different format, set the imageFormat
property in your [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface). |
## ImagenGCSImage.gcsURI
+> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment.
+>
+
The URI of the file stored in a Cloud Storage for Firebase bucket.
Signature:
@@ -43,6 +49,9 @@ gcsURI: string;
## ImagenGCSImage.mimeType
+> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment.
+>
+
The MIME type of the image; either `"image/png"` or `"image/jpeg"`.
To request a different format, set the `imageFormat` property in your [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface).
diff --git a/docs-devsite/ai.languagemodelcreatecoreoptions.md b/docs-devsite/ai.languagemodelcreatecoreoptions.md
index 45c2e7f5db4..832ea3b8ca8 100644
--- a/docs-devsite/ai.languagemodelcreatecoreoptions.md
+++ b/docs-devsite/ai.languagemodelcreatecoreoptions.md
@@ -10,6 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# LanguageModelCreateCoreOptions interface
+
Signature:
```typescript
diff --git a/docs-devsite/ai.languagemodelcreateoptions.md b/docs-devsite/ai.languagemodelcreateoptions.md
index 417519a54b6..54d1ecaa803 100644
--- a/docs-devsite/ai.languagemodelcreateoptions.md
+++ b/docs-devsite/ai.languagemodelcreateoptions.md
@@ -10,6 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# LanguageModelCreateOptions interface
+
Signature:
```typescript
diff --git a/docs-devsite/ai.languagemodelexpected.md b/docs-devsite/ai.languagemodelexpected.md
index 26ed28b741e..e33d922007c 100644
--- a/docs-devsite/ai.languagemodelexpected.md
+++ b/docs-devsite/ai.languagemodelexpected.md
@@ -10,6 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# LanguageModelExpected interface
+
Signature:
```typescript
diff --git a/docs-devsite/ai.languagemodelmessage.md b/docs-devsite/ai.languagemodelmessage.md
index 420059e4892..efedf369945 100644
--- a/docs-devsite/ai.languagemodelmessage.md
+++ b/docs-devsite/ai.languagemodelmessage.md
@@ -10,6 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# LanguageModelMessage interface
+
Signature:
```typescript
diff --git a/docs-devsite/ai.languagemodelmessagecontent.md b/docs-devsite/ai.languagemodelmessagecontent.md
index 40b4cc16bce..b87f8a28b3a 100644
--- a/docs-devsite/ai.languagemodelmessagecontent.md
+++ b/docs-devsite/ai.languagemodelmessagecontent.md
@@ -10,6 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# LanguageModelMessageContent interface
+
Signature:
```typescript
diff --git a/docs-devsite/ai.languagemodelpromptoptions.md b/docs-devsite/ai.languagemodelpromptoptions.md
new file mode 100644
index 00000000000..cde9b9af3be
--- /dev/null
+++ b/docs-devsite/ai.languagemodelpromptoptions.md
@@ -0,0 +1,32 @@
+Project: /docs/reference/js/_project.yaml
+Book: /docs/reference/_book.yaml
+page_type: reference
+
+{% comment %}
+DO NOT EDIT THIS FILE!
+This is generated by the JS SDK team, and any local changes will be
+overwritten. Changes should be made in the source code at
+https://github.com/firebase/firebase-js-sdk
+{% endcomment %}
+
+# LanguageModelPromptOptions interface
+
+Signature:
+
+```typescript
+export interface LanguageModelPromptOptions
+```
+
+## Properties
+
+| Property | Type | Description |
+| --- | --- | --- |
+| [responseConstraint](./ai.languagemodelpromptoptions.md#languagemodelpromptoptionsresponseconstraint) | object | |
+
+## LanguageModelPromptOptions.responseConstraint
+
+Signature:
+
+```typescript
+responseConstraint?: object;
+```
diff --git a/docs-devsite/ai.md b/docs-devsite/ai.md
index 63f01b6c6b6..464c14bf1be 100644
--- a/docs-devsite/ai.md
+++ b/docs-devsite/ai.md
@@ -51,6 +51,7 @@ The Firebase AI Web SDK.
| [AI](./ai.ai.md#ai_interface) | An instance of the Firebase AI SDK.Do not create this instance directly. Instead, use [getAI()](./ai.md#getai_a94a413). |
| [AIOptions](./ai.aioptions.md#aioptions_interface) | Options for initializing the AI service using [getAI()](./ai.md#getai_a94a413). This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API) and configuring its specific options (like location for Vertex AI). |
| [BaseParams](./ai.baseparams.md#baseparams_interface) | Base parameters for a number of methods. |
+| [ChromeAdapter](./ai.chromeadapter.md#chromeadapter_interface) | Defines an inference "backend" that uses Chrome's on-device model, and encapsulates logic for detecting when on-device is possible. |
| [Citation](./ai.citation.md#citation_interface) | A single citation. |
| [CitationMetadata](./ai.citationmetadata.md#citationmetadata_interface) | Citation metadata that may be found on a [GenerateContentCandidate](./ai.generatecontentcandidate.md#generatecontentcandidate_interface). |
| [Content](./ai.content.md#content_interface) | Content type for both prompts and response candidates. |
@@ -81,8 +82,8 @@ The Firebase AI Web SDK.
| [GroundingChunk](./ai.groundingchunk.md#groundingchunk_interface) | Represents a chunk of retrieved data that supports a claim in the model's response. This is part of the grounding information provided when grounding is enabled. |
| [GroundingMetadata](./ai.groundingmetadata.md#groundingmetadata_interface) | Metadata returned when grounding is enabled.Currently, only Grounding with Google Search is supported (see [GoogleSearchTool](./ai.googlesearchtool.md#googlesearchtool_interface)).Important: If using Grounding with Google Search, you are required to comply with the "Grounding with Google Search" usage requirements for your chosen API provider: [Gemini Developer API](https://ai.google.dev/gemini-api/terms#grounding-with-google-search) or Vertex AI Gemini API (see [Service Terms](https://cloud.google.com/terms/service-terms) section within the Service Specific Terms). |
| [GroundingSupport](./ai.groundingsupport.md#groundingsupport_interface) | Provides information about how a specific segment of the model's response is supported by the retrieved grounding chunks. |
-| [HybridParams](./ai.hybridparams.md#hybridparams_interface) | Toggles hybrid inference. |
-| [ImagenGCSImage](./ai.imagengcsimage.md#imagengcsimage_interface) | An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.This feature is not available yet. |
+| [HybridParams](./ai.hybridparams.md#hybridparams_interface) | Configures hybrid inference. |
+| [ImagenGCSImage](./ai.imagengcsimage.md#imagengcsimage_interface) | (Public Preview) An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.This feature is not available yet. |
| [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface) | (Public Preview) Configuration options for generating images with Imagen.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images-imagen) for more details. |
| [ImagenGenerationResponse](./ai.imagengenerationresponse.md#imagengenerationresponse_interface) | (Public Preview) The response from a request to generate images with Imagen. |
| [ImagenInlineImage](./ai.imageninlineimage.md#imageninlineimage_interface) | (Public Preview) An image generated by Imagen, represented as inline data. |
@@ -94,9 +95,10 @@ The Firebase AI Web SDK.
| [LanguageModelExpected](./ai.languagemodelexpected.md#languagemodelexpected_interface) | |
| [LanguageModelMessage](./ai.languagemodelmessage.md#languagemodelmessage_interface) | |
| [LanguageModelMessageContent](./ai.languagemodelmessagecontent.md#languagemodelmessagecontent_interface) | |
+| [LanguageModelPromptOptions](./ai.languagemodelpromptoptions.md#languagemodelpromptoptions_interface) | |
| [ModalityTokenCount](./ai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. |
| [ModelParams](./ai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). |
-| [ObjectSchemaRequest](./ai.objectschemarequest.md#objectschemarequest_interface) | Interface for JSON parameters in a schema of "object" when not using the Schema.object()
helper. |
+| [ObjectSchemaRequest](./ai.objectschemarequest.md#objectschemarequest_interface) | Interface for JSON parameters in a schema of [SchemaType](./ai.md#schematype) "object" when not using the Schema.object()
helper. |
| [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | Encapsulates configuration for on-device inference. |
| [PromptFeedback](./ai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with blockReason
and the relevant safetyRatings
. |
| [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). |
@@ -111,7 +113,7 @@ The Firebase AI Web SDK.
| [Segment](./ai.segment.md#segment_interface) | Represents a specific segment within a [Content](./ai.content.md#content_interface) object, often used to pinpoint the exact location of text or data that grounding information refers to. |
| [StartChatParams](./ai.startchatparams.md#startchatparams_interface) | Params for [GenerativeModel.startChat()](./ai.generativemodel.md#generativemodelstartchat). |
| [TextPart](./ai.textpart.md#textpart_interface) | Content part interface if the part represents a text string. |
-| [ThinkingConfig](./ai.thinkingconfig.md#thinkingconfig_interface) | |
+| [ThinkingConfig](./ai.thinkingconfig.md#thinkingconfig_interface) | Configuration for "thinking" behavior of compatible Gemini models.Certain models utilize a thinking process before generating a response. This allows them to reason through complex problems and plan a more coherent and accurate answer. |
| [ToolConfig](./ai.toolconfig.md#toolconfig_interface) | Tool config. This config is shared for all tools provided in the request. |
| [UsageMetadata](./ai.usagemetadata.md#usagemetadata_interface) | Usage metadata about a [GenerateContentResponse](./ai.generatecontentresponse.md#generatecontentresponse_interface). |
| [VideoMetadata](./ai.videometadata.md#videometadata_interface) | Describes the input video content. |
@@ -135,6 +137,7 @@ The Firebase AI Web SDK.
| [ImagenAspectRatio](./ai.md#imagenaspectratio) | (Public Preview) Aspect ratios for Imagen images.To specify an aspect ratio for generated images, set the aspectRatio
property in your [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface).See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details and examples of the supported aspect ratios. |
| [ImagenPersonFilterLevel](./ai.md#imagenpersonfilterlevel) | (Public Preview) A filter level controlling whether generation of images containing people or faces is allowed.See the personGeneration documentation for more details. |
| [ImagenSafetyFilterLevel](./ai.md#imagensafetyfilterlevel) | (Public Preview) A filter level controlling how aggressively to filter sensitive content.Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI are assessed against a list of safety filters, which include 'harmful categories' (for example, violence
, sexual
, derogatory
, and toxic
). This filter level controls how aggressively to filter out potentially harmful content from responses. See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) and the [Responsible AI and usage guidelines](https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters) for more details. |
+| [InferenceMode](./ai.md#inferencemode) | EXPERIMENTAL FEATURE Determines whether inference happens on-device or in-cloud. |
| [Modality](./ai.md#modality) | Content part modality. |
| [POSSIBLE\_ROLES](./ai.md#possible_roles) | Possible roles. |
| [ResponseModality](./ai.md#responsemodality) | (Public Preview) Generation modalities to be returned in generation responses. |
@@ -157,7 +160,7 @@ The Firebase AI Web SDK.
| [ImagenAspectRatio](./ai.md#imagenaspectratio) | (Public Preview) Aspect ratios for Imagen images.To specify an aspect ratio for generated images, set the aspectRatio
property in your [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface).See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details and examples of the supported aspect ratios. |
| [ImagenPersonFilterLevel](./ai.md#imagenpersonfilterlevel) | (Public Preview) A filter level controlling whether generation of images containing people or faces is allowed.See the personGeneration documentation for more details. |
| [ImagenSafetyFilterLevel](./ai.md#imagensafetyfilterlevel) | (Public Preview) A filter level controlling how aggressively to filter sensitive content.Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI are assessed against a list of safety filters, which include 'harmful categories' (for example, violence
, sexual
, derogatory
, and toxic
). This filter level controls how aggressively to filter out potentially harmful content from responses. See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) and the [Responsible AI and usage guidelines](https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters) for more details. |
-| [InferenceMode](./ai.md#inferencemode) | Determines whether inference happens on-device or in-cloud. |
+| [InferenceMode](./ai.md#inferencemode) | EXPERIMENTAL FEATURE Determines whether inference happens on-device or in-cloud. |
| [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | |
| [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | |
| [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | |
@@ -499,6 +502,20 @@ ImagenSafetyFilterLevel: {
}
```
+## InferenceMode
+
+EXPERIMENTAL FEATURE Determines whether inference happens on-device or in-cloud.
+
+Signature:
+
+```typescript
+InferenceMode: {
+ readonly PREFER_ON_DEVICE: "prefer_on_device";
+ readonly ONLY_ON_DEVICE: "only_on_device";
+ readonly ONLY_IN_CLOUD: "only_in_cloud";
+}
+```
+
## Modality
Content part modality.
@@ -601,6 +618,7 @@ export type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];
## FunctionCallingMode
+
Signature:
```typescript
@@ -706,16 +724,17 @@ export type ImagenSafetyFilterLevel = (typeof ImagenSafetyFilterLevel)[keyof typ
## InferenceMode
-Determines whether inference happens on-device or in-cloud.
+EXPERIMENTAL FEATURE Determines whether inference happens on-device or in-cloud.
Signature:
```typescript
-export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
+export type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];
```
## LanguageModelMessageContentValue
+
Signature:
```typescript
@@ -724,6 +743,7 @@ export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer |
## LanguageModelMessageRole
+
Signature:
```typescript
@@ -732,6 +752,7 @@ export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
## LanguageModelMessageType
+
Signature:
```typescript
diff --git a/docs-devsite/ai.objectschemarequest.md b/docs-devsite/ai.objectschemarequest.md
index bde646e0ac0..267e2d43345 100644
--- a/docs-devsite/ai.objectschemarequest.md
+++ b/docs-devsite/ai.objectschemarequest.md
@@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# ObjectSchemaRequest interface
-Interface for JSON parameters in a schema of "object" when not using the `Schema.object()` helper.
+Interface for JSON parameters in a schema of [SchemaType](./ai.md#schematype) "object" when not using the `Schema.object()` helper.
Signature:
diff --git a/docs-devsite/ai.ondeviceparams.md b/docs-devsite/ai.ondeviceparams.md
index 16fed65560d..0e23d1fda98 100644
--- a/docs-devsite/ai.ondeviceparams.md
+++ b/docs-devsite/ai.ondeviceparams.md
@@ -23,7 +23,7 @@ export interface OnDeviceParams
| Property | Type | Description |
| --- | --- | --- |
| [createOptions](./ai.ondeviceparams.md#ondeviceparamscreateoptions) | [LanguageModelCreateOptions](./ai.languagemodelcreateoptions.md#languagemodelcreateoptions_interface) | |
-| [promptOptions](./ai.ondeviceparams.md#ondeviceparamspromptoptions) | LanguageModelPromptOptions | |
+| [promptOptions](./ai.ondeviceparams.md#ondeviceparamspromptoptions) | [LanguageModelPromptOptions](./ai.languagemodelpromptoptions.md#languagemodelpromptoptions_interface) | |
## OnDeviceParams.createOptions
diff --git a/docs-devsite/ai.thinkingconfig.md b/docs-devsite/ai.thinkingconfig.md
index 92e58e56c4c..ec348a20487 100644
--- a/docs-devsite/ai.thinkingconfig.md
+++ b/docs-devsite/ai.thinkingconfig.md
@@ -10,6 +10,10 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# ThinkingConfig interface
+Configuration for "thinking" behavior of compatible Gemini models.
+
+Certain models utilize a thinking process before generating a response. This allows them to reason through complex problems and plan a more coherent and accurate answer.
+
Signature:
```typescript
diff --git a/packages/ai/src/api.ts b/packages/ai/src/api.ts
index 6f97933efa1..6ae0acadf20 100644
--- a/packages/ai/src/api.ts
+++ b/packages/ai/src/api.ts
@@ -32,7 +32,7 @@ import { AIError } from './errors';
import { AIModel, GenerativeModel, ImagenModel } from './models';
import { encodeInstanceIdentifier } from './helpers';
import { GoogleAIBackend } from './backend';
-import { ChromeAdapter } from './methods/chrome-adapter';
+import { ChromeAdapterImpl } from './methods/chrome-adapter';
import { LanguageModel } from './types/language-model';
export { ChatSession } from './methods/chat-session';
@@ -117,16 +117,15 @@ export function getGenerativeModel(
`Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`
);
}
- return new GenerativeModel(
- ai,
- inCloudParams,
- new ChromeAdapter(
+ let chromeAdapter: ChromeAdapterImpl | undefined;
+ if (typeof window !== 'undefined' && hybridParams.mode) {
+ chromeAdapter = new ChromeAdapterImpl(
window.LanguageModel as LanguageModel,
hybridParams.mode,
hybridParams.onDeviceParams
- ),
- requestOptions
- );
+ );
+ }
+ return new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter);
}
/**
diff --git a/packages/ai/src/methods/chat-session.test.ts b/packages/ai/src/methods/chat-session.test.ts
index ed0b4d4877f..0883920847f 100644
--- a/packages/ai/src/methods/chat-session.test.ts
+++ b/packages/ai/src/methods/chat-session.test.ts
@@ -24,7 +24,7 @@ import { GenerateContentStreamResult } from '../types';
import { ChatSession } from './chat-session';
import { ApiSettings } from '../types/internal';
import { VertexAIBackend } from '../backend';
-import { ChromeAdapter } from './chrome-adapter';
+import { ChromeAdapterImpl } from './chrome-adapter';
use(sinonChai);
use(chaiAsPromised);
@@ -50,7 +50,7 @@ describe('ChatSession', () => {
const chatSession = new ChatSession(
fakeApiSettings,
'a-model',
- new ChromeAdapter()
+ new ChromeAdapterImpl()
);
await expect(chatSession.sendMessage('hello')).to.be.rejected;
expect(generateContentStub).to.be.calledWith(
@@ -71,7 +71,7 @@ describe('ChatSession', () => {
const chatSession = new ChatSession(
fakeApiSettings,
'a-model',
- new ChromeAdapter()
+ new ChromeAdapterImpl()
);
await expect(chatSession.sendMessageStream('hello')).to.be.rejected;
expect(generateContentStreamStub).to.be.calledWith(
@@ -94,7 +94,7 @@ describe('ChatSession', () => {
const chatSession = new ChatSession(
fakeApiSettings,
'a-model',
- new ChromeAdapter()
+ new ChromeAdapterImpl()
);
await chatSession.sendMessageStream('hello');
expect(generateContentStreamStub).to.be.calledWith(
diff --git a/packages/ai/src/methods/chat-session.ts b/packages/ai/src/methods/chat-session.ts
index 112ddf5857e..dac16430b7a 100644
--- a/packages/ai/src/methods/chat-session.ts
+++ b/packages/ai/src/methods/chat-session.ts
@@ -30,7 +30,7 @@ import { validateChatHistory } from './chat-session-helpers';
import { generateContent, generateContentStream } from './generate-content';
import { ApiSettings } from '../types/internal';
import { logger } from '../logger';
-import { ChromeAdapter } from './chrome-adapter';
+import { ChromeAdapter } from '../types/chrome-adapter';
/**
* Do not log a message for this error.
@@ -51,7 +51,7 @@ export class ChatSession {
constructor(
apiSettings: ApiSettings,
public model: string,
- private chromeAdapter: ChromeAdapter,
+ private chromeAdapter?: ChromeAdapter,
public params?: StartChatParams,
public requestOptions?: RequestOptions
) {
diff --git a/packages/ai/src/methods/chrome-adapter.test.ts b/packages/ai/src/methods/chrome-adapter.test.ts
index f8ea80b0e09..fdc84be71be 100644
--- a/packages/ai/src/methods/chrome-adapter.test.ts
+++ b/packages/ai/src/methods/chrome-adapter.test.ts
@@ -19,7 +19,7 @@ import { AIError } from '../errors';
import { expect, use } from 'chai';
import sinonChai from 'sinon-chai';
import chaiAsPromised from 'chai-as-promised';
-import { ChromeAdapter } from './chrome-adapter';
+import { ChromeAdapterImpl } from './chrome-adapter';
import {
Availability,
LanguageModel,
@@ -62,7 +62,7 @@ describe('ChromeAdapter', () => {
languageModelProvider,
'availability'
).resolves(Availability.available);
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device'
);
@@ -90,7 +90,7 @@ describe('ChromeAdapter', () => {
// Explicitly sets expected inputs.
expectedInputs: [{ type: 'text' }]
} as LanguageModelCreateOptions;
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device',
{
@@ -110,7 +110,7 @@ describe('ChromeAdapter', () => {
});
describe('isAvailable', () => {
it('returns false if mode is undefined', async () => {
- const adapter = new ChromeAdapter();
+ const adapter = new ChromeAdapterImpl();
expect(
await adapter.isAvailable({
contents: []
@@ -118,7 +118,7 @@ describe('ChromeAdapter', () => {
).to.be.false;
});
it('returns false if mode is only cloud', async () => {
- const adapter = new ChromeAdapter(undefined, 'only_in_cloud');
+ const adapter = new ChromeAdapterImpl(undefined, 'only_in_cloud');
expect(
await adapter.isAvailable({
contents: []
@@ -126,7 +126,7 @@ describe('ChromeAdapter', () => {
).to.be.false;
});
it('returns false if LanguageModel API is undefined', async () => {
- const adapter = new ChromeAdapter(undefined, 'prefer_on_device');
+ const adapter = new ChromeAdapterImpl(undefined, 'prefer_on_device');
expect(
await adapter.isAvailable({
contents: []
@@ -134,7 +134,7 @@ describe('ChromeAdapter', () => {
).to.be.false;
});
it('returns false if request contents empty', async () => {
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
{
availability: async () => Availability.available
} as LanguageModel,
@@ -147,7 +147,7 @@ describe('ChromeAdapter', () => {
).to.be.false;
});
it('returns false if request content has "function" role', async () => {
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
{
availability: async () => Availability.available
} as LanguageModel,
@@ -165,13 +165,13 @@ describe('ChromeAdapter', () => {
).to.be.false;
});
it('returns true if request has image with supported mime type', async () => {
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
{
availability: async () => Availability.available
} as LanguageModel,
'prefer_on_device'
);
- for (const mimeType of ChromeAdapter.SUPPORTED_MIME_TYPES) {
+ for (const mimeType of ChromeAdapterImpl.SUPPORTED_MIME_TYPES) {
expect(
await adapter.isAvailable({
contents: [
@@ -195,7 +195,7 @@ describe('ChromeAdapter', () => {
const languageModelProvider = {
availability: () => Promise.resolve(Availability.available)
} as LanguageModel;
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device'
);
@@ -224,7 +224,7 @@ describe('ChromeAdapter', () => {
const createOptions = {
expectedInputs: [{ type: 'image' }]
} as LanguageModelCreateOptions;
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device',
{ createOptions }
@@ -247,7 +247,7 @@ describe('ChromeAdapter', () => {
const createStub = stub(languageModelProvider, 'create').returns(
downloadPromise
);
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device'
);
@@ -271,7 +271,7 @@ describe('ChromeAdapter', () => {
const createStub = stub(languageModelProvider, 'create').returns(
downloadPromise
);
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device'
);
@@ -289,7 +289,7 @@ describe('ChromeAdapter', () => {
availability: () => Promise.resolve(Availability.unavailable),
create: () => Promise.resolve({})
} as LanguageModel;
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device'
);
@@ -302,7 +302,7 @@ describe('ChromeAdapter', () => {
});
describe('generateContent', () => {
it('throws if Chrome API is undefined', async () => {
- const adapter = new ChromeAdapter(undefined, 'only_on_device');
+ const adapter = new ChromeAdapterImpl(undefined, 'only_on_device');
await expect(
adapter.generateContent({
contents: []
@@ -331,7 +331,7 @@ describe('ChromeAdapter', () => {
systemPrompt: 'be yourself',
expectedInputs: [{ type: 'image' }]
} as LanguageModelCreateOptions;
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device',
{ createOptions }
@@ -382,7 +382,7 @@ describe('ChromeAdapter', () => {
systemPrompt: 'be yourself',
expectedInputs: [{ type: 'image' }]
} as LanguageModelCreateOptions;
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device',
{ createOptions }
@@ -448,7 +448,7 @@ describe('ChromeAdapter', () => {
properties: {}
})
};
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device',
{ promptOptions }
@@ -481,7 +481,7 @@ describe('ChromeAdapter', () => {
const languageModelProvider = {
create: () => Promise.resolve(languageModel)
} as LanguageModel;
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device'
);
@@ -517,7 +517,7 @@ describe('ChromeAdapter', () => {
languageModel
);
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device'
);
@@ -561,7 +561,7 @@ describe('ChromeAdapter', () => {
const createOptions = {
expectedInputs: [{ type: 'image' }]
} as LanguageModelCreateOptions;
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device',
{ createOptions }
@@ -609,7 +609,7 @@ describe('ChromeAdapter', () => {
const createOptions = {
expectedInputs: [{ type: 'image' }]
} as LanguageModelCreateOptions;
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device',
{ createOptions }
@@ -668,7 +668,7 @@ describe('ChromeAdapter', () => {
properties: {}
})
};
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device',
{ promptOptions }
@@ -703,7 +703,7 @@ describe('ChromeAdapter', () => {
const languageModelProvider = {
create: () => Promise.resolve(languageModel)
} as LanguageModel;
- const adapter = new ChromeAdapter(
+ const adapter = new ChromeAdapterImpl(
languageModelProvider,
'prefer_on_device'
);
diff --git a/packages/ai/src/methods/chrome-adapter.ts b/packages/ai/src/methods/chrome-adapter.ts
index e7bb39c34c8..71a87b55a46 100644
--- a/packages/ai/src/methods/chrome-adapter.ts
+++ b/packages/ai/src/methods/chrome-adapter.ts
@@ -27,6 +27,7 @@ import {
Content,
Role
} from '../types';
+import { ChromeAdapter } from '../types/chrome-adapter';
import {
Availability,
LanguageModel,
@@ -39,7 +40,7 @@ import {
* Defines an inference "backend" that uses Chrome's on-device model,
* and encapsulates logic for detecting when on-device is possible.
*/
-export class ChromeAdapter {
+export class ChromeAdapterImpl implements ChromeAdapter {
// Visible for testing
static SUPPORTED_MIME_TYPES = ['image/jpeg', 'image/png'];
private isDownloading = false;
@@ -99,7 +100,7 @@ export class ChromeAdapter {
);
return false;
}
- if (!ChromeAdapter.isOnDeviceRequest(request)) {
+ if (!ChromeAdapterImpl.isOnDeviceRequest(request)) {
logger.debug(
`On-device inference unavailable because request is incompatible.`
);
@@ -114,19 +115,19 @@ export class ChromeAdapter {
*
* This is comparable to {@link GenerativeModel.generateContent} for generating content in * Cloud.
- * @param request - a standard Vertex {@link GenerateContentRequest} + * @param request - a standard Firebase AI {@link GenerateContentRequest} * @returns {@link Response}, so we can reuse common response formatting. */ async generateContent(request: GenerateContentRequest): PromiseThis is comparable to {@link GenerativeModel.generateContentStream} for generating content in * Cloud.
- * @param request - a standard Vertex {@link GenerateContentRequest} + * @param request - a standard Firebase AI {@link GenerateContentRequest} * @returns {@link Response}, so we can reuse common response formatting. */ async generateContentStream( @@ -142,13 +143,13 @@ export class ChromeAdapter { ): PromiseChrome uses a multi-turn session for all inference. Vertex uses single-turn for all - * inference. To map the Vertex API to Chrome's API, the SDK creates a new session for all + *
Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all + * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all * inference.
* *Chrome will remove a model from memory if it's no longer in use, so this method ensures a
@@ -294,7 +296,7 @@ export class ChromeAdapter {
private async createSession(): Promise Pros: callers needn't be concerned with details of on-device availability. Cons: this method spans a few concerns and splits request validation from usage.
+ * If instance variables weren't already part of the API, we could consider a better
+ * separation of concerns. This is comparable to {@link GenerativeModel.generateContent} for generating content in
+ * Cloud. This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
+ * Cloud. This is comparable to [GenerativeModel.generateContent()](./ai.generativemodel.md#generativemodelgeneratecontent) for generating content in Cloud. This is comparable to [GenerativeModel.generateContentStream()](./ai.generativemodel.md#generativemodelgeneratecontentstream) for generating content in Cloud. Pros: callers needn't be concerned with details of on-device availability. Cons: this method spans a few concerns and splits request validation from usage. If instance variables weren't already part of the API, we could consider a better separation of concerns. This is comparable to [GenerativeModel.generateContent()](./ai.generativemodel.md#generativemodelgeneratecontent) for generating content in Cloud. This is comparable to [GenerativeModel.generateContentStream()](./ai.generativemodel.md#generativemodelgeneratecontentstream) for generating content in Cloud. Pros: callers needn't be concerned with details of on-device availability. Cons: this method spans a few concerns and splits request validation from usage. If instance variables weren't already part of the API, we could consider a better separation of concerns. This is comparable to [GenerativeModel.generateContentStream()](./ai.generativemodel.md#generativemodelgeneratecontentstream) for generating content in Cloud. Pros: callers needn't be concerned with details of on-device availability. Cons: this method spans a few concerns and splits request validation from usage. If instance variables weren't already part of the API, we could consider a better separation of concerns. This is comparable to {@link GenerativeModel.generateContent} for generating content in
* Cloud. This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
* Cloud. This is comparable to [GenerativeModel.generateContentStream()](./ai.generativemodel.md#generativemodelgeneratecontentstream) for generating content in Cloud. Pros: callers needn't be concerned with details of on-device availability. Cons: this method spans a few concerns and splits request validation from usage. If instance variables weren't already part of the API, we could consider a better separation of concerns. This is comparable to [GenerativeModel.generateContent()](./ai.generativemodel.md#generativemodelgeneratecontent) for generating content in Cloud. This is comparable to [GenerativeModel.generateContentStream()](./ai.generativemodel.md#generativemodelgeneratecontentstream) for generating content in Cloud. Pros: callers needn't be concerned with details of on-device availability. Cons: this method spans a few concerns and splits request validation from usage. If instance variables weren't already part of the API, we could consider a better separation of concerns. This is comparable to [GenerativeModel.generateContent()](./ai.generativemodel.md#generativemodelgeneratecontent) for generating content using in-cloud inference. This is comparable to [GenerativeModel.generateContentStream()](./ai.generativemodel.md#generativemodelgeneratecontentstream) for generating content using in-cloud inference. This is comparable to [GenerativeModel.generateContent()](./ai.generativemodel.md#generativemodelgeneratecontent) for generating content in Cloud. This is comparable to [GenerativeModel.generateContent()](./ai.generativemodel.md#generativemodelgeneratecontent) for generating content using in-cloud inference. This is comparable to [GenerativeModel.generateContentStream()](./ai.generativemodel.md#generativemodelgeneratecontentstream) for generating content in Cloud. This is comparable to [GenerativeModel.generateContentStream()](./ai.generativemodel.md#generativemodelgeneratecontentstream) for generating content using in-cloud inference. Pros: callers needn't be concerned with details of on-device availability. Cons: this method spans a few concerns and splits request validation from usage. If instance variables weren't already part of the API, we could consider a better separation of concerns. Pros: callers needn't be concerned with details of on-device availability. Cons: this method spans a few concerns and splits request validation from usage.
- * If instance variables weren't already part of the API, we could consider a better
- * separation of concerns. This is comparable to {@link GenerativeModel.generateContent} for generating content in
- * Cloud. This is comparable to {@link GenerativeModel.generateContent} for generating
+ * content using in-cloud inference. This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
- * Cloud. This is comparable to {@link GenerativeModel.generateContentStream} for generating
+ * content using in-cloud inference. This is comparable to [GenerativeModel.generateContent()](./ai.generativemodel.md#generativemodelgeneratecontent) for generating content using in-cloud inference. This is comparable to [GenerativeModel.generateContentStream()](./ai.generativemodel.md#generativemodelgeneratecontentstream) for generating content using in-cloud inference. This is comparable to [GenerativeModel.generateContentStream()](./ai.generativemodel.md#generativemodelgeneratecontentstream) for generating a content stream using in-cloud inference. This is comparable to [GenerativeModel.generateContentStream()](./ai.generativemodel.md#generativemodelgeneratecontentstream) for generating content using in-cloud inference. This is comparable to [GenerativeModel.generateContentStream()](./ai.generativemodel.md#generativemodelgeneratecontentstream) for generating a content stream using in-cloud inference. This is comparable to {@link GenerativeModel.generateContentStream} for generating
- * content using in-cloud inference.static
| string | Defines the name of the default in-cloud model to use for hybrid inference. |
| [generationConfig](./ai.generativemodel.md#generativemodelgenerationconfig) | | [GenerationConfig](./ai.generationconfig.md#generationconfig_interface) | |
| [requestOptions](./ai.generativemodel.md#generativemodelrequestoptions) | | [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | |
| [safetySettings](./ai.generativemodel.md#generativemodelsafetysettings) | | [SafetySetting](./ai.safetysetting.md#safetysetting_interface)\[\] | |
@@ -65,16 +64,6 @@ constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions, c
| requestOptions | [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | |
| chromeAdapter | [ChromeAdapter](./ai.chromeadapter.md#chromeadapter_interface) \| undefined | |
-## GenerativeModel.DEFAULT\_HYBRID\_IN\_CLOUD\_MODEL
-
-Defines the name of the default in-cloud model to use for hybrid inference.
-
-Signature:
-
-```typescript
-static DEFAULT_HYBRID_IN_CLOUD_MODEL: string;
-```
-
## GenerativeModel.generationConfig
Signature:
diff --git a/docs-devsite/ai.hybridparams.md b/docs-devsite/ai.hybridparams.md
index 1934b68597f..383e9baafa5 100644
--- a/docs-devsite/ai.hybridparams.md
+++ b/docs-devsite/ai.hybridparams.md
@@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# HybridParams interface
-Configures hybrid inference.
+(EXPERIMENTAL) Configures hybrid inference.
Signature:
diff --git a/docs-devsite/ai.languagemodelcreatecoreoptions.md b/docs-devsite/ai.languagemodelcreatecoreoptions.md
index 832ea3b8ca8..e6432543958 100644
--- a/docs-devsite/ai.languagemodelcreatecoreoptions.md
+++ b/docs-devsite/ai.languagemodelcreatecoreoptions.md
@@ -10,6 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# LanguageModelCreateCoreOptions interface
+(EXPERIMENTAL) Used to configure the creation of an on-device language model session.
Signature:
diff --git a/docs-devsite/ai.languagemodelcreateoptions.md b/docs-devsite/ai.languagemodelcreateoptions.md
index 54d1ecaa803..22e88cbfdcd 100644
--- a/docs-devsite/ai.languagemodelcreateoptions.md
+++ b/docs-devsite/ai.languagemodelcreateoptions.md
@@ -10,6 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# LanguageModelCreateOptions interface
+(EXPERIMENTAL) Used to configure the creation of an on-device language model session.
Signature:
diff --git a/docs-devsite/ai.languagemodelexpected.md b/docs-devsite/ai.languagemodelexpected.md
index e33d922007c..ffec338c773 100644
--- a/docs-devsite/ai.languagemodelexpected.md
+++ b/docs-devsite/ai.languagemodelexpected.md
@@ -10,6 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# LanguageModelExpected interface
+(EXPERIMENTAL) Options for an on-device language model expected inputs.
Signature:
diff --git a/docs-devsite/ai.languagemodelmessage.md b/docs-devsite/ai.languagemodelmessage.md
index efedf369945..5f9e62a8ab0 100644
--- a/docs-devsite/ai.languagemodelmessage.md
+++ b/docs-devsite/ai.languagemodelmessage.md
@@ -10,6 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# LanguageModelMessage interface
+(EXPERIMENTAL) An on-device language model message.
Signature:
diff --git a/docs-devsite/ai.languagemodelmessagecontent.md b/docs-devsite/ai.languagemodelmessagecontent.md
index b87f8a28b3a..1600d312c89 100644
--- a/docs-devsite/ai.languagemodelmessagecontent.md
+++ b/docs-devsite/ai.languagemodelmessagecontent.md
@@ -10,6 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# LanguageModelMessageContent interface
+(EXPERIMENTAL) An on-device language model content object.
Signature:
diff --git a/docs-devsite/ai.languagemodelpromptoptions.md b/docs-devsite/ai.languagemodelpromptoptions.md
index cde9b9af3be..9ff8c5d5941 100644
--- a/docs-devsite/ai.languagemodelpromptoptions.md
+++ b/docs-devsite/ai.languagemodelpromptoptions.md
@@ -10,6 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# LanguageModelPromptOptions interface
+(EXPERIMENTAL) Options for an on-device language model prompt.
Signature:
diff --git a/docs-devsite/ai.md b/docs-devsite/ai.md
index 464c14bf1be..520163a369f 100644
--- a/docs-devsite/ai.md
+++ b/docs-devsite/ai.md
@@ -82,7 +82,7 @@ The Firebase AI Web SDK.
| [GroundingChunk](./ai.groundingchunk.md#groundingchunk_interface) | Represents a chunk of retrieved data that supports a claim in the model's response. This is part of the grounding information provided when grounding is enabled. |
| [GroundingMetadata](./ai.groundingmetadata.md#groundingmetadata_interface) | Metadata returned when grounding is enabled.Currently, only Grounding with Google Search is supported (see [GoogleSearchTool](./ai.googlesearchtool.md#googlesearchtool_interface)).Important: If using Grounding with Google Search, you are required to comply with the "Grounding with Google Search" usage requirements for your chosen API provider: [Gemini Developer API](https://ai.google.dev/gemini-api/terms#grounding-with-google-search) or Vertex AI Gemini API (see [Service Terms](https://cloud.google.com/terms/service-terms) section within the Service Specific Terms). |
| [GroundingSupport](./ai.groundingsupport.md#groundingsupport_interface) | Provides information about how a specific segment of the model's response is supported by the retrieved grounding chunks. |
-| [HybridParams](./ai.hybridparams.md#hybridparams_interface) | Configures hybrid inference. |
+| [HybridParams](./ai.hybridparams.md#hybridparams_interface) | (EXPERIMENTAL) Configures hybrid inference. |
| [ImagenGCSImage](./ai.imagengcsimage.md#imagengcsimage_interface) | (Public Preview) An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.This feature is not available yet. |
| [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface) | (Public Preview) Configuration options for generating images with Imagen.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images-imagen) for more details. |
| [ImagenGenerationResponse](./ai.imagengenerationresponse.md#imagengenerationresponse_interface) | (Public Preview) The response from a request to generate images with Imagen. |
@@ -90,16 +90,16 @@ The Firebase AI Web SDK.
| [ImagenModelParams](./ai.imagenmodelparams.md#imagenmodelparams_interface) | (Public Preview) Parameters for configuring an [ImagenModel](./ai.imagenmodel.md#imagenmodel_class). |
| [ImagenSafetySettings](./ai.imagensafetysettings.md#imagensafetysettings_interface) | (Public Preview) Settings for controlling the aggressiveness of filtering out sensitive content.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details. |
| [InlineDataPart](./ai.inlinedatapart.md#inlinedatapart_interface) | Content part interface if the part represents an image. |
-| [LanguageModelCreateCoreOptions](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptions_interface) | |
-| [LanguageModelCreateOptions](./ai.languagemodelcreateoptions.md#languagemodelcreateoptions_interface) | |
-| [LanguageModelExpected](./ai.languagemodelexpected.md#languagemodelexpected_interface) | |
-| [LanguageModelMessage](./ai.languagemodelmessage.md#languagemodelmessage_interface) | |
-| [LanguageModelMessageContent](./ai.languagemodelmessagecontent.md#languagemodelmessagecontent_interface) | |
-| [LanguageModelPromptOptions](./ai.languagemodelpromptoptions.md#languagemodelpromptoptions_interface) | |
+| [LanguageModelCreateCoreOptions](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptions_interface) | (EXPERIMENTAL) Used to configure the creation of an on-device language model session. |
+| [LanguageModelCreateOptions](./ai.languagemodelcreateoptions.md#languagemodelcreateoptions_interface) | (EXPERIMENTAL) Used to configure the creation of an on-device language model session. |
+| [LanguageModelExpected](./ai.languagemodelexpected.md#languagemodelexpected_interface) | (EXPERIMENTAL) Options for an on-device language model expected inputs. |
+| [LanguageModelMessage](./ai.languagemodelmessage.md#languagemodelmessage_interface) | (EXPERIMENTAL) An on-device language model message. |
+| [LanguageModelMessageContent](./ai.languagemodelmessagecontent.md#languagemodelmessagecontent_interface) | (EXPERIMENTAL) An on-device language model content object. |
+| [LanguageModelPromptOptions](./ai.languagemodelpromptoptions.md#languagemodelpromptoptions_interface) | (EXPERIMENTAL) Options for an on-device language model prompt. |
| [ModalityTokenCount](./ai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. |
| [ModelParams](./ai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). |
| [ObjectSchemaRequest](./ai.objectschemarequest.md#objectschemarequest_interface) | Interface for JSON parameters in a schema of [SchemaType](./ai.md#schematype) "object" when not using the Schema.object()
helper. |
-| [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | Encapsulates configuration for on-device inference. |
+| [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | (EXPERIMENTAL) Encapsulates configuration for on-device inference. |
| [PromptFeedback](./ai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with blockReason
and the relevant safetyRatings
. |
| [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). |
| [RetrievedContextAttribution](./ai.retrievedcontextattribution.md#retrievedcontextattribution_interface) | |
@@ -137,7 +137,7 @@ The Firebase AI Web SDK.
| [ImagenAspectRatio](./ai.md#imagenaspectratio) | (Public Preview) Aspect ratios for Imagen images.To specify an aspect ratio for generated images, set the aspectRatio
property in your [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface).See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details and examples of the supported aspect ratios. |
| [ImagenPersonFilterLevel](./ai.md#imagenpersonfilterlevel) | (Public Preview) A filter level controlling whether generation of images containing people or faces is allowed.See the personGeneration documentation for more details. |
| [ImagenSafetyFilterLevel](./ai.md#imagensafetyfilterlevel) | (Public Preview) A filter level controlling how aggressively to filter sensitive content.Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI are assessed against a list of safety filters, which include 'harmful categories' (for example, violence
, sexual
, derogatory
, and toxic
). This filter level controls how aggressively to filter out potentially harmful content from responses. See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) and the [Responsible AI and usage guidelines](https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters) for more details. |
-| [InferenceMode](./ai.md#inferencemode) | EXPERIMENTAL FEATURE Determines whether inference happens on-device or in-cloud. |
+| [InferenceMode](./ai.md#inferencemode) | (EXPERIMENTAL) Determines whether inference happens on-device or in-cloud. |
| [Modality](./ai.md#modality) | Content part modality. |
| [POSSIBLE\_ROLES](./ai.md#possible_roles) | Possible roles. |
| [ResponseModality](./ai.md#responsemodality) | (Public Preview) Generation modalities to be returned in generation responses. |
@@ -160,10 +160,10 @@ The Firebase AI Web SDK.
| [ImagenAspectRatio](./ai.md#imagenaspectratio) | (Public Preview) Aspect ratios for Imagen images.To specify an aspect ratio for generated images, set the aspectRatio
property in your [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface).See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details and examples of the supported aspect ratios. |
| [ImagenPersonFilterLevel](./ai.md#imagenpersonfilterlevel) | (Public Preview) A filter level controlling whether generation of images containing people or faces is allowed.See the personGeneration documentation for more details. |
| [ImagenSafetyFilterLevel](./ai.md#imagensafetyfilterlevel) | (Public Preview) A filter level controlling how aggressively to filter sensitive content.Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI are assessed against a list of safety filters, which include 'harmful categories' (for example, violence
, sexual
, derogatory
, and toxic
). This filter level controls how aggressively to filter out potentially harmful content from responses. See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) and the [Responsible AI and usage guidelines](https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters) for more details. |
-| [InferenceMode](./ai.md#inferencemode) | EXPERIMENTAL FEATURE Determines whether inference happens on-device or in-cloud. |
-| [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | |
-| [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | |
-| [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | |
+| [InferenceMode](./ai.md#inferencemode) | (EXPERIMENTAL) Determines whether inference happens on-device or in-cloud. |
+| [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | (EXPERIMENTAL) Content formats that can be provided as on-device message content. |
+| [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | (EXPERIMENTAL) Allowable roles for on-device language model usage. |
+| [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | (EXPERIMENTAL) Allowable types for on-device language model messages. |
| [Modality](./ai.md#modality) | Content part modality. |
| [Part](./ai.md#part) | Content part - includes text, image/video, or function call/response part types. |
| [ResponseModality](./ai.md#responsemodality) | (Public Preview) Generation modalities to be returned in generation responses. |
@@ -504,7 +504,7 @@ ImagenSafetyFilterLevel: {
## InferenceMode
-EXPERIMENTAL FEATURE Determines whether inference happens on-device or in-cloud.
+(EXPERIMENTAL) Determines whether inference happens on-device or in-cloud.
Signature:
@@ -724,7 +724,7 @@ export type ImagenSafetyFilterLevel = (typeof ImagenSafetyFilterLevel)[keyof typ
## InferenceMode
-EXPERIMENTAL FEATURE Determines whether inference happens on-device or in-cloud.
+(EXPERIMENTAL) Determines whether inference happens on-device or in-cloud.
Signature:
@@ -734,6 +734,7 @@ export type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];
## LanguageModelMessageContentValue
+(EXPERIMENTAL) Content formats that can be provided as on-device message content.
Signature:
@@ -743,6 +744,7 @@ export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer |
## LanguageModelMessageRole
+(EXPERIMENTAL) Allowable roles for on-device language model usage.
Signature:
@@ -752,6 +754,7 @@ export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
## LanguageModelMessageType
+(EXPERIMENTAL) Allowable types for on-device language model messages.
Signature:
diff --git a/docs-devsite/ai.ondeviceparams.md b/docs-devsite/ai.ondeviceparams.md
index 0e23d1fda98..77a4b8aab85 100644
--- a/docs-devsite/ai.ondeviceparams.md
+++ b/docs-devsite/ai.ondeviceparams.md
@@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# OnDeviceParams interface
-Encapsulates configuration for on-device inference.
+(EXPERIMENTAL) Encapsulates configuration for on-device inference.
Signature:
diff --git a/packages/ai/src/api.test.ts b/packages/ai/src/api.test.ts
index 6ce353107ac..76a9b4523c2 100644
--- a/packages/ai/src/api.test.ts
+++ b/packages/ai/src/api.test.ts
@@ -21,7 +21,7 @@ import { expect } from 'chai';
import { AI } from './public-types';
import { GenerativeModel } from './models/generative-model';
import { VertexAIBackend } from './backend';
-import { AI_TYPE } from './constants';
+import { AI_TYPE, DEFAULT_HYBRID_IN_CLOUD_MODEL } from './constants';
const fakeAI: AI = {
app: {
@@ -107,7 +107,7 @@ describe('Top level API', () => {
mode: 'only_on_device'
});
expect(genModel.model).to.equal(
- `publishers/google/models/${GenerativeModel.DEFAULT_HYBRID_IN_CLOUD_MODEL}`
+ `publishers/google/models/${DEFAULT_HYBRID_IN_CLOUD_MODEL}`
);
});
it('getGenerativeModel with HybridParams honors a model override', () => {
diff --git a/packages/ai/src/api.ts b/packages/ai/src/api.ts
index 6ae0acadf20..62c7c27f07a 100644
--- a/packages/ai/src/api.ts
+++ b/packages/ai/src/api.ts
@@ -18,7 +18,7 @@
import { FirebaseApp, getApp, _getProvider } from '@firebase/app';
import { Provider } from '@firebase/component';
import { getModularInstance } from '@firebase/util';
-import { AI_TYPE } from './constants';
+import { AI_TYPE, DEFAULT_HYBRID_IN_CLOUD_MODEL } from './constants';
import { AIService } from './service';
import { AI, AIOptions } from './public-types';
import {
@@ -105,7 +105,7 @@ export function getGenerativeModel(
let inCloudParams: ModelParams;
if (hybridParams.mode) {
inCloudParams = hybridParams.inCloudParams || {
- model: GenerativeModel.DEFAULT_HYBRID_IN_CLOUD_MODEL
+ model: DEFAULT_HYBRID_IN_CLOUD_MODEL
};
} else {
inCloudParams = modelParams as ModelParams;
@@ -118,6 +118,7 @@ export function getGenerativeModel(
);
}
let chromeAdapter: ChromeAdapterImpl | undefined;
+ // Do not initialize a ChromeAdapter if we are not in hybrid mode.
if (typeof window !== 'undefined' && hybridParams.mode) {
chromeAdapter = new ChromeAdapterImpl(
window.LanguageModel as LanguageModel,
diff --git a/packages/ai/src/constants.ts b/packages/ai/src/constants.ts
index cb54567735a..b6bd8e220ad 100644
--- a/packages/ai/src/constants.ts
+++ b/packages/ai/src/constants.ts
@@ -30,3 +30,8 @@ export const PACKAGE_VERSION = version;
export const LANGUAGE_TAG = 'gl-js';
export const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000;
+
+/**
+ * Defines the name of the default in-cloud model to use for hybrid inference.
+ */
+export const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite';
diff --git a/packages/ai/src/methods/chrome-adapter.test.ts b/packages/ai/src/methods/chrome-adapter.test.ts
index fdc84be71be..fe6b7144724 100644
--- a/packages/ai/src/methods/chrome-adapter.test.ts
+++ b/packages/ai/src/methods/chrome-adapter.test.ts
@@ -27,7 +27,7 @@ import {
LanguageModelMessage
} from '../types/language-model';
import { match, stub } from 'sinon';
-import { GenerateContentRequest, AIErrorCode } from '../types';
+import { GenerateContentRequest, AIErrorCode, InferenceMode } from '../types';
import { Schema } from '../api';
use(sinonChai);
@@ -64,7 +64,7 @@ describe('ChromeAdapter', () => {
).resolves(Availability.available);
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device'
+ InferenceMode.PREFER_ON_DEVICE
);
await adapter.isAvailable({
contents: [
@@ -92,7 +92,7 @@ describe('ChromeAdapter', () => {
} as LanguageModelCreateOptions;
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device',
+ InferenceMode.PREFER_ON_DEVICE,
{
createOptions
}
@@ -126,7 +126,10 @@ describe('ChromeAdapter', () => {
).to.be.false;
});
it('returns false if LanguageModel API is undefined', async () => {
- const adapter = new ChromeAdapterImpl(undefined, 'prefer_on_device');
+ const adapter = new ChromeAdapterImpl(
+ undefined,
+ InferenceMode.PREFER_ON_DEVICE
+ );
expect(
await adapter.isAvailable({
contents: []
@@ -138,7 +141,7 @@ describe('ChromeAdapter', () => {
{
availability: async () => Availability.available
} as LanguageModel,
- 'prefer_on_device'
+ InferenceMode.PREFER_ON_DEVICE
);
expect(
await adapter.isAvailable({
@@ -151,7 +154,7 @@ describe('ChromeAdapter', () => {
{
availability: async () => Availability.available
} as LanguageModel,
- 'prefer_on_device'
+ InferenceMode.PREFER_ON_DEVICE
);
expect(
await adapter.isAvailable({
@@ -169,7 +172,7 @@ describe('ChromeAdapter', () => {
{
availability: async () => Availability.available
} as LanguageModel,
- 'prefer_on_device'
+ InferenceMode.PREFER_ON_DEVICE
);
for (const mimeType of ChromeAdapterImpl.SUPPORTED_MIME_TYPES) {
expect(
@@ -197,7 +200,7 @@ describe('ChromeAdapter', () => {
} as LanguageModel;
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device'
+ InferenceMode.PREFER_ON_DEVICE
);
expect(
await adapter.isAvailable({
@@ -226,7 +229,7 @@ describe('ChromeAdapter', () => {
} as LanguageModelCreateOptions;
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device',
+ InferenceMode.PREFER_ON_DEVICE,
{ createOptions }
);
expect(
@@ -249,7 +252,7 @@ describe('ChromeAdapter', () => {
);
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device'
+ InferenceMode.PREFER_ON_DEVICE
);
await adapter.isAvailable({
contents: [{ role: 'user', parts: [{ text: 'hi' }] }]
@@ -273,7 +276,7 @@ describe('ChromeAdapter', () => {
);
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device'
+ InferenceMode.PREFER_ON_DEVICE
);
await adapter.isAvailable({
contents: [{ role: 'user', parts: [{ text: 'hi' }] }]
@@ -291,7 +294,7 @@ describe('ChromeAdapter', () => {
} as LanguageModel;
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device'
+ InferenceMode.PREFER_ON_DEVICE
);
expect(
await adapter.isAvailable({
@@ -312,7 +315,7 @@ describe('ChromeAdapter', () => {
AIError,
'Chrome AI requested for unsupported browser version.'
)
- .and.have.property('code', AIErrorCode.REQUEST_ERROR);
+ .and.have.property('code', AIErrorCode.UNSUPPORTED);
});
it('generates content', async () => {
const languageModelProvider = {
@@ -333,7 +336,7 @@ describe('ChromeAdapter', () => {
} as LanguageModelCreateOptions;
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device',
+ InferenceMode.PREFER_ON_DEVICE,
{ createOptions }
);
const request = {
@@ -384,7 +387,7 @@ describe('ChromeAdapter', () => {
} as LanguageModelCreateOptions;
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device',
+ InferenceMode.PREFER_ON_DEVICE,
{ createOptions }
);
const request = {
@@ -450,7 +453,7 @@ describe('ChromeAdapter', () => {
};
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device',
+ InferenceMode.PREFER_ON_DEVICE,
{ promptOptions }
);
const request = {
@@ -483,7 +486,7 @@ describe('ChromeAdapter', () => {
} as LanguageModel;
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device'
+ InferenceMode.PREFER_ON_DEVICE
);
const request = {
contents: [{ role: 'model', parts: [{ text: 'unused' }] }]
@@ -519,7 +522,7 @@ describe('ChromeAdapter', () => {
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device'
+ InferenceMode.PREFER_ON_DEVICE
);
const countTokenRequest = {
@@ -563,7 +566,7 @@ describe('ChromeAdapter', () => {
} as LanguageModelCreateOptions;
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device',
+ InferenceMode.PREFER_ON_DEVICE,
{ createOptions }
);
const request = {
@@ -611,7 +614,7 @@ describe('ChromeAdapter', () => {
} as LanguageModelCreateOptions;
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device',
+ InferenceMode.PREFER_ON_DEVICE,
{ createOptions }
);
const request = {
@@ -670,7 +673,7 @@ describe('ChromeAdapter', () => {
};
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device',
+ InferenceMode.PREFER_ON_DEVICE,
{ promptOptions }
);
const request = {
@@ -705,7 +708,7 @@ describe('ChromeAdapter', () => {
} as LanguageModel;
const adapter = new ChromeAdapterImpl(
languageModelProvider,
- 'prefer_on_device'
+ InferenceMode.PREFER_ON_DEVICE
);
const request = {
contents: [{ role: 'model', parts: [{ text: 'unused' }] }]
diff --git a/packages/ai/src/models/generative-model.test.ts b/packages/ai/src/models/generative-model.test.ts
index 5c7e80436e1..6ea7470ef5f 100644
--- a/packages/ai/src/models/generative-model.test.ts
+++ b/packages/ai/src/models/generative-model.test.ts
@@ -61,6 +61,7 @@ describe('GenerativeModel', () => {
},
systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] }
},
+ {},
new ChromeAdapterImpl()
);
expect(genModel.tools?.length).to.equal(1);
@@ -99,6 +100,7 @@ describe('GenerativeModel', () => {
model: 'my-model',
systemInstruction: 'be friendly'
},
+ {},
new ChromeAdapterImpl()
);
expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly');
@@ -142,6 +144,7 @@ describe('GenerativeModel', () => {
},
systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] }
},
+ {},
new ChromeAdapterImpl()
);
expect(genModel.tools?.length).to.equal(1);
@@ -193,6 +196,7 @@ describe('GenerativeModel', () => {
topK: 1
}
},
+ {},
new ChromeAdapterImpl()
);
const chatSession = genModel.startChat();
@@ -210,6 +214,7 @@ describe('GenerativeModel', () => {
topK: 1
}
},
+ {},
new ChromeAdapterImpl()
);
const chatSession = genModel.startChat({
@@ -237,6 +242,7 @@ describe('GenerativeModel', () => {
topK: 1
}
},
+ {},
new ChromeAdapterImpl()
);
expect(genModel.tools?.length).to.equal(1);
@@ -276,6 +282,7 @@ describe('GenerativeModel', () => {
model: 'my-model',
systemInstruction: 'be friendly'
},
+ {},
new ChromeAdapterImpl()
);
expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly');
@@ -315,6 +322,7 @@ describe('GenerativeModel', () => {
responseMimeType: 'image/jpeg'
}
},
+ {},
new ChromeAdapterImpl()
);
expect(genModel.tools?.length).to.equal(1);
@@ -369,6 +377,7 @@ describe('GenerativeModel', () => {
const genModel = new GenerativeModel(
fakeAI,
{ model: 'my-model' },
+ {},
new ChromeAdapterImpl()
);
const mockResponse = getMockResponse(
diff --git a/packages/ai/src/models/generative-model.ts b/packages/ai/src/models/generative-model.ts
index d0376d9bace..ffce645eeb1 100644
--- a/packages/ai/src/models/generative-model.ts
+++ b/packages/ai/src/models/generative-model.ts
@@ -50,10 +50,6 @@ import { ChromeAdapter } from '../types/chrome-adapter';
* @public
*/
export class GenerativeModel extends AIModel {
- /**
- * Defines the name of the default in-cloud model to use for hybrid inference.
- */
- static DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite';
generationConfig: GenerationConfig;
safetySettings: SafetySetting[];
requestOptions?: RequestOptions;
diff --git a/packages/ai/src/types/chrome-adapter.ts b/packages/ai/src/types/chrome-adapter.ts
index 6e7a1313535..ec351b3d06a 100644
--- a/packages/ai/src/types/chrome-adapter.ts
+++ b/packages/ai/src/types/chrome-adapter.ts
@@ -1,16 +1,31 @@
-import { CountTokensRequest, GenerateContentRequest } from "./requests";
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { CountTokensRequest, GenerateContentRequest } from './requests';
/**
* Defines an inference "backend" that uses Chrome's on-device model,
* and encapsulates logic for detecting when on-device is possible.
- *
+ *
* @public
*/
export interface ChromeAdapter {
- isAvailable(request: GenerateContentRequest): PromiseEncapsulates a few concerns:
+ *
+ *
+ * Encapsulates a few concerns:
Encapsulates a few concerns:
+
+Encapsulates a few concerns:
Encapsulates a few concerns:
Encapsulates a few concerns:
Encapsulates a few concerns:
-
-Schema.object()
helper. |
-| [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | (EXPERIMENTAL) Encapsulates configuration for on-device inference. |
+| [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | (EXPERIMENTAL) Encapsulates configuration for on-device inference. |
| [PromptFeedback](./ai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with blockReason
and the relevant safetyRatings
. |
| [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). |
| [RetrievedContextAttribution](./ai.retrievedcontextattribution.md#retrievedcontextattribution_interface) | |
@@ -137,7 +137,7 @@ The Firebase AI Web SDK.
| [ImagenAspectRatio](./ai.md#imagenaspectratio) | (Public Preview) Aspect ratios for Imagen images.To specify an aspect ratio for generated images, set the aspectRatio
property in your [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface).See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details and examples of the supported aspect ratios. |
| [ImagenPersonFilterLevel](./ai.md#imagenpersonfilterlevel) | (Public Preview) A filter level controlling whether generation of images containing people or faces is allowed.See the personGeneration documentation for more details. |
| [ImagenSafetyFilterLevel](./ai.md#imagensafetyfilterlevel) | (Public Preview) A filter level controlling how aggressively to filter sensitive content.Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI are assessed against a list of safety filters, which include 'harmful categories' (for example, violence
, sexual
, derogatory
, and toxic
). This filter level controls how aggressively to filter out potentially harmful content from responses. See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) and the [Responsible AI and usage guidelines](https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters) for more details. |
-| [InferenceMode](./ai.md#inferencemode) | (EXPERIMENTAL) Determines whether inference happens on-device or in-cloud. |
+| [InferenceMode](./ai.md#inferencemode) | (EXPERIMENTAL) Determines whether inference happens on-device or in-cloud. |
| [Modality](./ai.md#modality) | Content part modality. |
| [POSSIBLE\_ROLES](./ai.md#possible_roles) | Possible roles. |
| [ResponseModality](./ai.md#responsemodality) | (Public Preview) Generation modalities to be returned in generation responses. |
@@ -160,10 +160,10 @@ The Firebase AI Web SDK.
| [ImagenAspectRatio](./ai.md#imagenaspectratio) | (Public Preview) Aspect ratios for Imagen images.To specify an aspect ratio for generated images, set the aspectRatio
property in your [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface).See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details and examples of the supported aspect ratios. |
| [ImagenPersonFilterLevel](./ai.md#imagenpersonfilterlevel) | (Public Preview) A filter level controlling whether generation of images containing people or faces is allowed.See the personGeneration documentation for more details. |
| [ImagenSafetyFilterLevel](./ai.md#imagensafetyfilterlevel) | (Public Preview) A filter level controlling how aggressively to filter sensitive content.Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI are assessed against a list of safety filters, which include 'harmful categories' (for example, violence
, sexual
, derogatory
, and toxic
). This filter level controls how aggressively to filter out potentially harmful content from responses. See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) and the [Responsible AI and usage guidelines](https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters) for more details. |
-| [InferenceMode](./ai.md#inferencemode) | (EXPERIMENTAL) Determines whether inference happens on-device or in-cloud. |
-| [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | (EXPERIMENTAL) Content formats that can be provided as on-device message content. |
-| [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | (EXPERIMENTAL) Allowable roles for on-device language model usage. |
-| [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | (EXPERIMENTAL) Allowable types for on-device language model messages. |
+| [InferenceMode](./ai.md#inferencemode) | (EXPERIMENTAL) Determines whether inference happens on-device or in-cloud. |
+| [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | (EXPERIMENTAL) Content formats that can be provided as on-device message content. |
+| [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | (EXPERIMENTAL) Allowable roles for on-device language model usage. |
+| [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | (EXPERIMENTAL) Allowable types for on-device language model messages. |
| [Modality](./ai.md#modality) | Content part modality. |
| [Part](./ai.md#part) | Content part - includes text, image/video, or function call/response part types. |
| [ResponseModality](./ai.md#responsemodality) | (Public Preview) Generation modalities to be returned in generation responses. |
@@ -504,7 +504,7 @@ ImagenSafetyFilterLevel: {
## InferenceMode
-(EXPERIMENTAL) Determines whether inference happens on-device or in-cloud.
+(EXPERIMENTAL) Determines whether inference happens on-device or in-cloud.
Signature:
@@ -724,7 +724,7 @@ export type ImagenSafetyFilterLevel = (typeof ImagenSafetyFilterLevel)[keyof typ
## InferenceMode
-(EXPERIMENTAL) Determines whether inference happens on-device or in-cloud.
+(EXPERIMENTAL) Determines whether inference happens on-device or in-cloud.
Signature:
@@ -734,7 +734,7 @@ export type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];
## LanguageModelMessageContentValue
-(EXPERIMENTAL) Content formats that can be provided as on-device message content.
+(EXPERIMENTAL) Content formats that can be provided as on-device message content.
Signature:
@@ -744,7 +744,7 @@ export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer |
## LanguageModelMessageRole
-(EXPERIMENTAL) Allowable roles for on-device language model usage.
+(EXPERIMENTAL) Allowable roles for on-device language model usage.
Signature:
@@ -754,7 +754,7 @@ export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
## LanguageModelMessageType
-(EXPERIMENTAL) Allowable types for on-device language model messages.
+(EXPERIMENTAL) Allowable types for on-device language model messages.
Signature:
diff --git a/docs-devsite/ai.ondeviceparams.md b/docs-devsite/ai.ondeviceparams.md
index 77a4b8aab85..bce68ff8174 100644
--- a/docs-devsite/ai.ondeviceparams.md
+++ b/docs-devsite/ai.ondeviceparams.md
@@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}
# OnDeviceParams interface
-(EXPERIMENTAL) Encapsulates configuration for on-device inference.
+(EXPERIMENTAL) Encapsulates configuration for on-device inference.
Signature:
diff --git a/packages/ai/src/methods/chrome-adapter.ts b/packages/ai/src/methods/chrome-adapter.ts
index a3c13e514d7..4dea4170c0d 100644
--- a/packages/ai/src/methods/chrome-adapter.ts
+++ b/packages/ai/src/methods/chrome-adapter.ts
@@ -38,7 +38,8 @@ import {
/**
* Defines an inference "backend" that uses Chrome's on-device model,
- * and encapsulates logic for detecting when on-device is possible.
+ * and encapsulates logic for detecting when on-device inference is
+ * possible.
*/
export class ChromeAdapterImpl implements ChromeAdapter {
// Visible for testing
diff --git a/packages/ai/src/types/chrome-adapter.ts b/packages/ai/src/types/chrome-adapter.ts
index ffc8186669e..74df8b387c4 100644
--- a/packages/ai/src/types/chrome-adapter.ts
+++ b/packages/ai/src/types/chrome-adapter.ts
@@ -15,13 +15,12 @@
* limitations under the License.
*/
-import { CountTokensRequest, GenerateContentRequest } from './requests';
+import { GenerateContentRequest } from './requests';
/**
- * (EXPERIMENTAL)
- *
- * Defines an inference "backend" that uses Chrome's on-device model,
- * and encapsulates logic for detecting when on-device is possible.
+ * (EXPERIMENTAL) Defines an inference "backend" that uses Chrome's on-device model,
+ * and encapsulates logic for detecting when on-device inference is
+ * possible.
*
* These methods should not be called directly by the user.
*
@@ -29,44 +28,27 @@ import { CountTokensRequest, GenerateContentRequest } from './requests';
*/
export interface ChromeAdapter {
/**
- * Checks if a given request can be made on-device.
- *
- * Encapsulates a few concerns:
- *
- *
- *