Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 16 additions & 1 deletion common/api-review/vertexai.api.md
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ export class GenerativeModel extends VertexAIModel {
}

// @public
export function getGenerativeModel(vertexAI: VertexAI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel;
export function getGenerativeModel(vertexAI: VertexAI, onCloudOrHybridParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;

// @beta
export function getImagenModel(vertexAI: VertexAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel;
Expand Down Expand Up @@ -416,6 +416,18 @@ export enum HarmSeverity {
HARM_SEVERITY_NEGLIGIBLE = "HARM_SEVERITY_NEGLIGIBLE"
}

// @public
export interface HybridParams {
// (undocumented)
mode?: InferenceMode;
// (undocumented)
onCloudParams?: ModelParams;
// Warning: (ae-forgotten-export) The symbol "LanguageModelCreateOptions" needs to be exported by the entry point index.d.ts
//
// (undocumented)
onDeviceParams?: LanguageModelCreateOptions;
}

// @beta
export enum ImagenAspectRatio {
LANDSCAPE_16x9 = "16:9",
Expand Down Expand Up @@ -500,6 +512,9 @@ export interface ImagenSafetySettings {
safetyFilterLevel?: ImagenSafetyFilterLevel;
}

// @public
export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';

// @public
export interface InlineDataPart {
// (undocumented)
Expand Down
2 changes: 2 additions & 0 deletions docs-devsite/_toc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -536,6 +536,8 @@ toc:
path: /docs/reference/js/vertexai.groundingattribution.md
- title: GroundingMetadata
path: /docs/reference/js/vertexai.groundingmetadata.md
- title: HybridParams
path: /docs/reference/js/vertexai.hybridparams.md
- title: ImagenGCSImage
path: /docs/reference/js/vertexai.imagengcsimage.md
- title: ImagenGenerationConfig
Expand Down
51 changes: 51 additions & 0 deletions docs-devsite/vertexai.hybridparams.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
Project: /docs/reference/js/_project.yaml
Book: /docs/reference/_book.yaml
page_type: reference

{% comment %}
DO NOT EDIT THIS FILE!
This is generated by the JS SDK team, and any local changes will be
overwritten. Changes should be made in the source code at
https://github.com/firebase/firebase-js-sdk
{% endcomment %}

# HybridParams interface
Configures on-device and on-cloud inference.

<b>Signature:</b>

```typescript
export interface HybridParams
```

## Properties

| Property | Type | Description |
| --- | --- | --- |
| [mode](./vertexai.hybridparams.md#hybridparamsmode) | [InferenceMode](./vertexai.md#inferencemode) | |
| [onCloudParams](./vertexai.hybridparams.md#hybridparamsoncloudparams) | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | |
| [onDeviceParams](./vertexai.hybridparams.md#hybridparamsondeviceparams) | LanguageModelCreateOptions | |

## HybridParams.mode

<b>Signature:</b>

```typescript
mode?: InferenceMode;
```

## HybridParams.onCloudParams

<b>Signature:</b>

```typescript
onCloudParams?: ModelParams;
```

## HybridParams.onDeviceParams

<b>Signature:</b>

```typescript
onDeviceParams?: LanguageModelCreateOptions;
```
24 changes: 18 additions & 6 deletions docs-devsite/vertexai.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ The Vertex AI in Firebase Web SDK.
| <b>function(app, ...)</b> |
| [getVertexAI(app, options)](./vertexai.md#getvertexai_04094cf) | Returns a [VertexAI](./vertexai.vertexai.md#vertexai_interface) instance for the given app. |
| <b>function(vertexAI, ...)</b> |
| [getGenerativeModel(vertexAI, modelParams, requestOptions)](./vertexai.md#getgenerativemodel_e3037c9) | Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. |
| [getGenerativeModel(vertexAI, onCloudOrHybridParams, requestOptions)](./vertexai.md#getgenerativemodel_202434f) | Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. |
| [getImagenModel(vertexAI, modelParams, requestOptions)](./vertexai.md#getimagenmodel_812c375) | <b><i>(Public Preview)</i></b> Returns an [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) class with methods for using Imagen.<!-- -->Only Imagen 3 models (named <code>imagen-3.0-*</code>) are supported. |

## Classes
Expand Down Expand Up @@ -91,6 +91,7 @@ The Vertex AI in Firebase Web SDK.
| [GenerativeContentBlob](./vertexai.generativecontentblob.md#generativecontentblob_interface) | Interface for sending an image. |
| [GroundingAttribution](./vertexai.groundingattribution.md#groundingattribution_interface) | |
| [GroundingMetadata](./vertexai.groundingmetadata.md#groundingmetadata_interface) | Metadata returned to client when grounding is enabled. |
| [HybridParams](./vertexai.hybridparams.md#hybridparams_interface) | Configures on-device and on-cloud inference. |
| [ImagenGCSImage](./vertexai.imagengcsimage.md#imagengcsimage_interface) | An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.<!-- -->This feature is not available yet. |
| [ImagenGenerationConfig](./vertexai.imagengenerationconfig.md#imagengenerationconfig_interface) | <b><i>(Public Preview)</i></b> Configuration options for generating images with Imagen.<!-- -->See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images-imagen) for more details. |
| [ImagenGenerationResponse](./vertexai.imagengenerationresponse.md#imagengenerationresponse_interface) | <b><i>(Public Preview)</i></b> The response from a request to generate images with Imagen. |
Expand All @@ -99,10 +100,10 @@ The Vertex AI in Firebase Web SDK.
| [ImagenSafetySettings](./vertexai.imagensafetysettings.md#imagensafetysettings_interface) | <b><i>(Public Preview)</i></b> Settings for controlling the aggressiveness of filtering out sensitive content.<!-- -->See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details. |
| [InlineDataPart](./vertexai.inlinedatapart.md#inlinedatapart_interface) | Content part interface if the part represents an image. |
| [ModalityTokenCount](./vertexai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. |
| [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3037c9)<!-- -->. |
| [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_202434f)<!-- -->. |
| [ObjectSchemaInterface](./vertexai.objectschemainterface.md#objectschemainterface_interface) | Interface for [ObjectSchema](./vertexai.objectschema.md#objectschema_class) class. |
| [PromptFeedback](./vertexai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with <code>blockReason</code> and the relevant <code>safetyRatings</code>. |
| [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3037c9)<!-- -->. |
| [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_202434f)<!-- -->. |
| [RetrievedContextAttribution](./vertexai.retrievedcontextattribution.md#retrievedcontextattribution_interface) | |
| [SafetyRating](./vertexai.safetyrating.md#safetyrating_interface) | A safety rating associated with a [GenerateContentCandidate](./vertexai.generatecontentcandidate.md#generatecontentcandidate_interface) |
| [SafetySetting](./vertexai.safetysetting.md#safetysetting_interface) | Safety setting that can be sent as part of request parameters. |
Expand Down Expand Up @@ -130,6 +131,7 @@ The Vertex AI in Firebase Web SDK.

| Type Alias | Description |
| --- | --- |
| [InferenceMode](./vertexai.md#inferencemode) | Determines whether inference happens on-device or in-cloud. |
| [Part](./vertexai.md#part) | Content part - includes text, image/video, or function call/response part types. |
| [Role](./vertexai.md#role) | Role is the producer of the content. |
| [Tool](./vertexai.md#tool) | Defines a tool that model can call to access external knowledge. |
Expand Down Expand Up @@ -160,22 +162,22 @@ export declare function getVertexAI(app?: FirebaseApp, options?: VertexAIOptions

## function(vertexAI, ...)

### getGenerativeModel(vertexAI, modelParams, requestOptions) {:#getgenerativemodel_e3037c9}
### getGenerativeModel(vertexAI, onCloudOrHybridParams, requestOptions) {:#getgenerativemodel_202434f}

Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality.

<b>Signature:</b>

```typescript
export declare function getGenerativeModel(vertexAI: VertexAI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel;
export declare function getGenerativeModel(vertexAI: VertexAI, onCloudOrHybridParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
```

#### Parameters

| Parameter | Type | Description |
| --- | --- | --- |
| vertexAI | [VertexAI](./vertexai.vertexai.md#vertexai_interface) | |
| modelParams | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | |
| onCloudOrHybridParams | [ModelParams](./vertexai.modelparams.md#modelparams_interface) \| [HybridParams](./vertexai.hybridparams.md#hybridparams_interface) | |
| requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | |

<b>Returns:</b>
Expand Down Expand Up @@ -223,6 +225,16 @@ Possible roles.
POSSIBLE_ROLES: readonly ["user", "model", "function", "system"]
```

## InferenceMode

Determines whether inference happens on-device or in-cloud.

<b>Signature:</b>

```typescript
export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
```

## Part

Content part - includes text, image/video, or function call/response part types.
Expand Down
2 changes: 1 addition & 1 deletion docs-devsite/vertexai.modelparams.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}

# ModelParams interface
Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3037c9)<!-- -->.
Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_202434f)<!-- -->.

<b>Signature:</b>

Expand Down
2 changes: 1 addition & 1 deletion docs-devsite/vertexai.requestoptions.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk
{% endcomment %}

# RequestOptions interface
Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3037c9)<!-- -->.
Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_202434f)<!-- -->.

<b>Signature:</b>

Expand Down
18 changes: 15 additions & 3 deletions packages/vertexai/src/api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import { VertexAIService } from './service';
import { VertexAI, VertexAIOptions } from './public-types';
import {
ImagenModelParams,
HybridParams,
ModelParams,
RequestOptions,
VertexAIErrorCode
Expand Down Expand Up @@ -70,16 +71,27 @@ export function getVertexAI(
*/
export function getGenerativeModel(
vertexAI: VertexAI,
modelParams: ModelParams,
onCloudOrHybridParams: ModelParams | HybridParams,
requestOptions?: RequestOptions
): GenerativeModel {
if (!modelParams.model) {
// Disambiguates onCloudOrHybridParams input.
const hybridParams = onCloudOrHybridParams as HybridParams;
let onCloudParams: ModelParams;
if (hybridParams.mode) {
onCloudParams = hybridParams.onCloudParams || {
model: 'gemini-2.0-flash-lite'
};
} else {
onCloudParams = onCloudOrHybridParams as ModelParams;
}

if (!onCloudParams.model) {
throw new VertexAIError(
VertexAIErrorCode.NO_MODEL,
`Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`
);
}
return new GenerativeModel(vertexAI, modelParams, requestOptions);
return new GenerativeModel(vertexAI, onCloudParams, requestOptions);
}

/**
Expand Down
84 changes: 84 additions & 0 deletions packages/vertexai/src/types/language-model.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

export interface LanguageModel extends EventTarget {
create(options?: LanguageModelCreateOptions): Promise<LanguageModel>;
availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>;
prompt(
input: LanguageModelPrompt,
options?: LanguageModelPromptOptions
): Promise<string>;
promptStreaming(
input: LanguageModelPrompt,
options?: LanguageModelPromptOptions
): ReadableStream;
measureInputUsage(
input: LanguageModelPrompt,
options?: LanguageModelPromptOptions
): Promise<number>;
destroy(): undefined;
}
enum Availability {
'unavailable',
'downloadable',
'downloading',
'available'
}
export interface LanguageModelCreateCoreOptions {
topK?: number;
temperature?: number;
expectedInputs?: LanguageModelExpectedInput[];
}
export interface LanguageModelCreateOptions
extends LanguageModelCreateCoreOptions {
signal?: AbortSignal;
systemPrompt?: string;
initialPrompts?: LanguageModelInitialPrompts;
}
interface LanguageModelPromptOptions {
signal?: AbortSignal;
}
interface LanguageModelExpectedInput {
type: LanguageModelMessageType;
languages?: string[];
}
type LanguageModelPrompt =
| LanguageModelMessage[]
| LanguageModelMessageShorthand[]
| string;
type LanguageModelInitialPrompts =
| LanguageModelMessage[]
| LanguageModelMessageShorthand[];
interface LanguageModelMessage {
role: LanguageModelMessageRole;
content: LanguageModelMessageContent[];
}
interface LanguageModelMessageShorthand {
role: LanguageModelMessageRole;
content: string;
}
interface LanguageModelMessageContent {
type: LanguageModelMessageType;
content: LanguageModelMessageContentValue;
}
type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
type LanguageModelMessageType = 'text' | 'image' | 'audio';
type LanguageModelMessageContentValue =
| ImageBitmapSource
| AudioBuffer
| BufferSource
| string;
19 changes: 19 additions & 0 deletions packages/vertexai/src/types/requests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

import { TypedSchema } from '../requests/schema-builder';
import { Content, Part } from './content';
import { LanguageModelCreateOptions } from './language-model';
import {
FunctionCallingMode,
HarmBlockMethod,
Expand Down Expand Up @@ -213,3 +214,21 @@ export interface FunctionCallingConfig {
mode?: FunctionCallingMode;
allowedFunctionNames?: string[];
}

/**
* Configures on-device and on-cloud inference.
* @public
*/
export interface HybridParams {
mode?: InferenceMode;
onDeviceParams?: LanguageModelCreateOptions;
onCloudParams?: ModelParams;
}

/**
* Determines whether inference happens on-device or in-cloud.
*/
export type InferenceMode =
| 'prefer_on_device'
| 'only_on_device'
| 'only_in_cloud';
Loading