diff --git a/.github/workflows/manual-publish.yml b/.github/workflows/manual-publish.yml index be2ce44c61..09c66d0f5f 100644 --- a/.github/workflows/manual-publish.yml +++ b/.github/workflows/manual-publish.yml @@ -35,6 +35,7 @@ on: - packages/tooling/jest - packages/sdk/browser - packages/sdk/server-ai + - packages/ai-providers/server-ai-vercel - packages/ai-providers/server-ai-langchain - packages/telemetry/browser-telemetry - packages/sdk/combined-browser diff --git a/.github/workflows/server-ai-vercel.yml b/.github/workflows/server-ai-vercel.yml new file mode 100644 index 0000000000..caf346d736 --- /dev/null +++ b/.github/workflows/server-ai-vercel.yml @@ -0,0 +1,27 @@ +name: ai-providers/server-ai-vercel + +on: + push: + branches: [main, 'feat/**'] + paths-ignore: + - '**.md' #Do not need to run CI for markdown changes. + pull_request: + branches: [main, 'feat/**'] + paths-ignore: + - '**.md' + +jobs: + build-test-vercel-provider: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 + with: + node-version: 22.x + registry-url: 'https://registry.npmjs.org' + - id: shared + name: Shared CI Steps + uses: ./actions/ci + with: + workspace_name: '@launchdarkly/server-sdk-ai-vercel' + workspace_path: packages/ai-providers/server-ai-vercel diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3b5cccdac7..8adc53c3ab 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,5 +1,6 @@ { "packages/ai-providers/server-ai-langchain": "0.1.0", + "packages/ai-providers/server-ai-vercel": "0.0.0", "packages/sdk/akamai-base": "3.0.10", "packages/sdk/akamai-edgekv": "1.4.12", "packages/sdk/browser": "0.8.1", diff --git a/README.md b/README.md index 8d8f3fdce7..6d26cac6af 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,7 @@ This includes shared libraries, used by SDKs and other tools, as well as SDKs. | AI Providers | npm | issues | tests | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------- | ------------------------------------------- | ------------------------------------------------------------------- | | [@launchdarkly/server-sdk-ai-langchain](packages/ai-providers/server-ai-langchain/README.md) | [![NPM][server-ai-langchain-npm-badge]][server-ai-langchain-npm-link] | [server-ai-langchain][package-ai-providers-server-ai-langchain-issues] | [![Actions Status][server-ai-langchain-ci-badge]][server-ai-langchain-ci] | +| [@launchdarkly/server-sdk-ai-vercel](packages/ai-providers/server-ai-vercel/README.md) | [![NPM][server-ai-vercel-npm-badge]][server-ai-vercel-npm-link] | [server-ai-vercel][package-ai-providers-server-ai-vercel-issues] | [![Actions Status][server-ai-vercel-ci-badge]][server-ai-vercel-ci] | ## Organization @@ -229,4 +230,10 @@ We encourage pull requests and other contributions from the community. Check out [server-ai-langchain-ci]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-langchain.yml [server-ai-langchain-npm-badge]: https://img.shields.io/npm/v/@launchdarkly/server-sdk-ai-langchain.svg?style=flat-square [server-ai-langchain-npm-link]: https://www.npmjs.com/package/@launchdarkly/server-sdk-ai-langchain -[package-ai-providers-server-ai-langchain-issues]: https://github.com/launchdarkly/js-core/issues?q=is%3Aissue+is%3Aopen+label%3A%22package%3A+ai-providers%2Fserver-ai-langchain%22+ \ No newline at end of file +[package-ai-providers-server-ai-langchain-issues]: https://github.com/launchdarkly/js-core/issues?q=is%3Aissue+is%3Aopen+label%3A%22package%3A+ai-providers%2Fserver-ai-langchain%22+ +[//]: # 'ai-providers/server-ai-vercel' +[server-ai-vercel-ci-badge]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-vercel.yml/badge.svg +[server-ai-vercel-ci]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-vercel.yml +[server-ai-vercel-npm-badge]: https://img.shields.io/npm/v/@launchdarkly/server-sdk-ai-vercel.svg?style=flat-square +[server-ai-vercel-npm-link]: https://www.npmjs.com/package/@launchdarkly/server-sdk-ai-vercel +[package-ai-providers-server-ai-vercel-issues]: https://github.com/launchdarkly/js-core/issues?q=is%3Aissue+is%3Aopen+label%3A%22package%3A+ai-providers%2Fserver-ai-vercel%22+ \ No newline at end of file diff --git a/package.json b/package.json index e05ef41e6b..6ecf0bb14f 100644 --- a/package.json +++ b/package.json @@ -2,6 +2,7 @@ "name": "@launchdarkly/js-core", "workspaces": [ "packages/ai-providers/server-ai-langchain", + "packages/ai-providers/server-ai-vercel", "packages/shared/common", "packages/shared/sdk-client", "packages/shared/sdk-server", diff --git a/packages/ai-providers/server-ai-vercel/README.md b/packages/ai-providers/server-ai-vercel/README.md new file mode 100644 index 0000000000..12e7dd12d2 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/README.md @@ -0,0 +1,111 @@ +# LaunchDarkly AI SDK Vercel Provider for Server-Side JavaScript + +[![NPM][server-ai-vercel-npm-badge]][server-ai-vercel-npm-link] +[![Actions Status][server-ai-vercel-ci-badge]][server-ai-vercel-ci] +[![Documentation][server-ai-vercel-ghp-badge]][server-ai-vercel-ghp-link] +[![NPM][server-ai-vercel-dm-badge]][server-ai-vercel-npm-link] +[![NPM][server-ai-vercel-dt-badge]][server-ai-vercel-npm-link] + +# ⛔️⛔️⛔️⛔️ + +> [!CAUTION] +> This library is a alpha version and should not be considered ready for production use while this message is visible. + +# ☝️☝️☝️☝️☝️☝️ + +## LaunchDarkly overview + +[LaunchDarkly](https://www.launchdarkly.com) is a feature management platform that serves over 100 billion feature flags daily to help teams build better software, faster. [Get started](https://docs.launchdarkly.com/home/getting-started) using LaunchDarkly today! + +[![Twitter Follow](https://img.shields.io/twitter/follow/launchdarkly.svg?style=social&label=Follow&maxAge=2592000)](https://twitter.com/intent/follow?screen_name=launchdarkly) + +## Quick Setup + +This package provides Vercel AI SDK integration for the LaunchDarkly AI SDK. The simplest way to use it is with the LaunchDarkly AI SDK's `initChat` method: + +1. Install the required packages: + +```shell +npm install @launchdarkly/server-sdk-ai @launchdarkly/server-sdk-ai-vercel --save +# or +yarn add @launchdarkly/server-sdk-ai @launchdarkly/server-sdk-ai-vercel +``` + +2. Create a chat session and use it: + +```typescript +import { init } from '@launchdarkly/node-server-sdk'; +import { initAi } from '@launchdarkly/server-sdk-ai'; + +// Initialize LaunchDarkly client +const ldClient = init(sdkKey); +const aiClient = initAi(ldClient); + +// Create a chat session +const defaultConfig = { + enabled: true, + model: { name: 'gpt-4' }, + provider: { name: 'openai' } +}; +const chat = await aiClient.initChat('my-chat-config', context, defaultConfig); + +if (chat) { + const response = await chat.invoke('What is the capital of France?'); + console.log(response.message.content); +} +``` + +For more information about using the LaunchDarkly AI SDK, see the [LaunchDarkly AI SDK documentation](https://github.com/launchdarkly/js-core/tree/main/packages/sdk/server-ai/README.md). + +## Advanced Usage + +For more control, you can use the Vercel AI provider package directly with LaunchDarkly configurations: + +```typescript +import { VercelProvider } from '@launchdarkly/server-sdk-ai-vercel'; +import { generateText } from 'ai'; + +// Create a Vercel AI model from LaunchDarkly configuration +const model = await VercelProvider.createVercelModel(aiConfig); + +// Convert LaunchDarkly messages and add user message +const configMessages = aiConfig.messages || []; +const userMessage = { role: 'user', content: 'What is the capital of France?' }; +const allMessages = [...configMessages, userMessage]; + +// Track the model call with LaunchDarkly tracking +const response = await aiConfig.tracker.trackMetricsOf( + (result) => VercelProvider.createAIMetrics(result), + () => generateText({ model, messages: allMessages }) +); + +console.log('AI Response:', response.text); +``` + +## Contributing + +We encourage pull requests and other contributions from the community. Check out our [contributing guidelines](CONTRIBUTING.md) for instructions on how to contribute to this SDK. + +## About LaunchDarkly + +- LaunchDarkly is a continuous delivery platform that provides feature flags as a service and allows developers to iterate quickly and safely. We allow you to easily flag your features and manage them from the LaunchDarkly dashboard. With LaunchDarkly, you can: + - Roll out a new feature to a subset of your users (like a group of users who opt-in to a beta tester group), gathering feedback and bug reports from real-world use cases. + - Gradually roll out a feature to an increasing percentage of users, and track the effect that the feature has on key metrics (for instance, how likely is a user to complete a purchase if they have feature A versus feature B?). + - Turn off a feature that you realize is causing performance problems in production, without needing to re-deploy, or even restart the application with a changed configuration file. + - Grant access to certain features based on user attributes, like payment plan (eg: users on the 'gold' plan get access to more features than users in the 'silver' plan). + - Disable parts of your application to facilitate maintenance, without taking everything offline. +- LaunchDarkly provides feature flag SDKs for a wide variety of languages and technologies. Check out [our documentation](https://docs.launchdarkly.com/sdk) for a complete list. +- Explore LaunchDarkly + - [launchdarkly.com](https://www.launchdarkly.com/ 'LaunchDarkly Main Website') for more information + - [docs.launchdarkly.com](https://docs.launchdarkly.com/ 'LaunchDarkly Documentation') for our documentation and SDK reference guides + - [apidocs.launchdarkly.com](https://apidocs.launchdarkly.com/ 'LaunchDarkly API Documentation') for our API documentation + - [blog.launchdarkly.com](https://blog.launchdarkly.com/ 'LaunchDarkly Blog Documentation') for the latest product updates + +[server-ai-vercel-ci-badge]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-vercel.yml/badge.svg +[server-ai-vercel-ci]: https://github.com/launchdarkly/js-core/actions/workflows/server-ai-vercel.yml +[server-ai-vercel-npm-badge]: https://img.shields.io/npm/v/@launchdarkly/server-sdk-ai-vercel.svg?style=flat-square +[server-ai-vercel-npm-link]: https://www.npmjs.com/package/@launchdarkly/server-sdk-ai-vercel +[server-ai-vercel-ghp-badge]: https://img.shields.io/static/v1?label=GitHub+Pages&message=API+reference&color=00add8 +[server-ai-vercel-ghp-link]: https://launchdarkly.github.io/js-core/packages/ai-providers/server-ai-vercel/docs/ +[server-ai-vercel-dm-badge]: https://img.shields.io/npm/dm/@launchdarkly/server-sdk-ai-vercel.svg?style=flat-square +[server-ai-vercel-dt-badge]: https://img.shields.io/npm/dt/@launchdarkly/server-sdk-ai-vercel.svg?style=flat-square diff --git a/packages/ai-providers/server-ai-vercel/__tests__/VercelProvider.test.ts b/packages/ai-providers/server-ai-vercel/__tests__/VercelProvider.test.ts new file mode 100644 index 0000000000..65a423a161 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/__tests__/VercelProvider.test.ts @@ -0,0 +1,203 @@ +import { generateText } from 'ai'; + +import { VercelProvider } from '../src/VercelProvider'; + +// Mock Vercel AI SDK +jest.mock('ai', () => ({ + generateText: jest.fn(), +})); + +describe('VercelProvider', () => { + let mockModel: any; + let provider: VercelProvider; + + beforeEach(() => { + mockModel = { name: 'test-model' }; + provider = new VercelProvider(mockModel, {}); + }); + + describe('createAIMetrics', () => { + it('creates metrics with success=true and token usage', () => { + const mockResponse = { + usage: { + promptTokens: 50, + completionTokens: 50, + totalTokens: 100, + }, + }; + + const result = VercelProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: { + total: 100, + input: 50, + output: 50, + }, + }); + }); + + it('creates metrics with success=true and no usage when usage is missing', () => { + const mockResponse = {}; + + const result = VercelProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: undefined, + }); + }); + + it('handles partial usage data', () => { + const mockResponse = { + usage: { + promptTokens: 30, + // completionTokens and totalTokens missing + }, + }; + + const result = VercelProvider.createAIMetrics(mockResponse); + + expect(result).toEqual({ + success: true, + usage: { + total: 0, + input: 30, + output: 0, + }, + }); + }); + }); + + describe('invokeModel', () => { + it('invokes Vercel AI generateText and returns response', async () => { + const mockResponse = { + text: 'Hello! How can I help you today?', + usage: { + promptTokens: 10, + completionTokens: 15, + totalTokens: 25, + }, + }; + + (generateText as jest.Mock).mockResolvedValue(mockResponse); + + const messages = [{ role: 'user' as const, content: 'Hello!' }]; + + const result = await provider.invokeModel(messages); + + expect(generateText).toHaveBeenCalledWith({ + model: mockModel, + messages: [{ role: 'user', content: 'Hello!' }], + }); + + expect(result).toEqual({ + message: { + role: 'assistant', + content: 'Hello! How can I help you today?', + }, + metrics: { + success: true, + usage: { + total: 25, + input: 10, + output: 15, + }, + }, + }); + }); + + it('handles response without usage data', async () => { + const mockResponse = { + text: 'Hello! How can I help you today?', + }; + + (generateText as jest.Mock).mockResolvedValue(mockResponse); + + const messages = [{ role: 'user' as const, content: 'Hello!' }]; + + const result = await provider.invokeModel(messages); + + expect(result).toEqual({ + message: { + role: 'assistant', + content: 'Hello! How can I help you today?', + }, + metrics: { + success: true, + usage: undefined, + }, + }); + }); + }); + + describe('getModel', () => { + it('returns the underlying Vercel AI model', () => { + const model = provider.getModel(); + expect(model).toBe(mockModel); + }); + }); + + describe('createVercelModel', () => { + it('creates OpenAI model for openai provider', async () => { + const mockAiConfig = { + model: { name: 'gpt-4', parameters: {} }, + provider: { name: 'openai' }, + enabled: true, + tracker: {} as any, + toVercelAISDK: jest.fn(), + }; + + // Mock the dynamic import + jest.doMock('@ai-sdk/openai', () => ({ + openai: jest.fn().mockReturnValue(mockModel), + })); + + const result = await VercelProvider.createVercelModel(mockAiConfig); + expect(result).toBe(mockModel); + }); + + it('throws error for unsupported provider', async () => { + const mockAiConfig = { + model: { name: 'test-model', parameters: {} }, + provider: { name: 'unsupported' }, + enabled: true, + tracker: {} as any, + toVercelAISDK: jest.fn(), + }; + + await expect(VercelProvider.createVercelModel(mockAiConfig)).rejects.toThrow( + 'Unsupported Vercel AI provider: unsupported', + ); + }); + }); + + describe('create', () => { + it('creates VercelProvider with correct model and parameters', async () => { + const mockAiConfig = { + model: { + name: 'gpt-4', + parameters: { + temperature: 0.7, + maxTokens: 1000, + }, + }, + provider: { name: 'openai' }, + enabled: true, + tracker: {} as any, + toVercelAISDK: jest.fn(), + }; + + // Mock the dynamic import + jest.doMock('@ai-sdk/openai', () => ({ + openai: jest.fn().mockReturnValue(mockModel), + })); + + const result = await VercelProvider.create(mockAiConfig); + + expect(result).toBeInstanceOf(VercelProvider); + expect(result.getModel()).toBeDefined(); + }); + }); +}); diff --git a/packages/ai-providers/server-ai-vercel/jest.config.js b/packages/ai-providers/server-ai-vercel/jest.config.js new file mode 100644 index 0000000000..f106eb3bc9 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/jest.config.js @@ -0,0 +1,7 @@ +module.exports = { + transform: { '^.+\\.ts?$': 'ts-jest' }, + testMatch: ['**/__tests__/**/*test.ts?(x)'], + testEnvironment: 'node', + moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'], + collectCoverageFrom: ['src/**/*.ts'], +}; diff --git a/packages/ai-providers/server-ai-vercel/package.json b/packages/ai-providers/server-ai-vercel/package.json new file mode 100644 index 0000000000..37329c1d37 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/package.json @@ -0,0 +1,67 @@ +{ + "name": "@launchdarkly/server-sdk-ai-vercel", + "version": "0.0.0", + "description": "LaunchDarkly AI SDK Vercel Provider for Server-Side JavaScript", + "homepage": "https://github.com/launchdarkly/js-core/tree/main/packages/ai-providers/server-ai-vercel", + "repository": { + "type": "git", + "url": "https://github.com/launchdarkly/js-core.git" + }, + "main": "dist/index.js", + "types": "dist/index.d.ts", + "type": "commonjs", + "scripts": { + "build": "npx tsc", + "lint": "npx eslint . --ext .ts", + "prettier": "prettier --write '**/*.@(js|ts|tsx|json|css)' --ignore-path ../../../.prettierignore", + "lint:fix": "yarn run lint --fix", + "check": "yarn prettier && yarn lint && yarn build && yarn test", + "test": "jest" + }, + "keywords": [ + "launchdarkly", + "ai", + "llm", + "vercel" + ], + "author": "LaunchDarkly", + "license": "Apache-2.0", + "dependencies": { + "@ai-sdk/provider": "^2.0.0", + "@launchdarkly/server-sdk-ai": "^0.12.0", + "ai": "^5.0.0" + }, + "optionalDependencies": { + "@ai-sdk/anthropic": "^2.0.0", + "@ai-sdk/cohere": "^2.0.0", + "@ai-sdk/google": "^2.0.0", + "@ai-sdk/mistral": "^2.0.0", + "@ai-sdk/openai": "^2.0.0" + }, + "devDependencies": { + "@ai-sdk/anthropic": "^2.0.0", + "@ai-sdk/cohere": "^2.0.0", + "@ai-sdk/google": "^2.0.0", + "@ai-sdk/mistral": "^2.0.0", + "@ai-sdk/openai": "^2.0.0", + "@launchdarkly/js-server-sdk-common": "2.16.2", + "@trivago/prettier-plugin-sort-imports": "^4.1.1", + "@types/jest": "^29.5.3", + "@typescript-eslint/eslint-plugin": "^6.20.0", + "@typescript-eslint/parser": "^6.20.0", + "eslint": "^8.45.0", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-airbnb-typescript": "^17.1.0", + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-import": "^2.27.5", + "eslint-plugin-jest": "^27.6.3", + "eslint-plugin-prettier": "^5.0.0", + "jest": "^29.6.1", + "prettier": "^3.0.0", + "ts-jest": "^29.1.1", + "typescript": "5.1.6" + }, + "peerDependencies": { + "@launchdarkly/js-server-sdk-common": "2.x" + } +} diff --git a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts new file mode 100644 index 0000000000..a2ac1695bb --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts @@ -0,0 +1,176 @@ +import { LanguageModelV2 } from '@ai-sdk/provider'; +import { generateText } from 'ai'; + +import { LDLogger } from '@launchdarkly/js-server-sdk-common'; +import { + AIProvider, + ChatResponse, + LDAIConfig, + LDAIMetrics, + LDMessage, + LDTokenUsage, +} from '@launchdarkly/server-sdk-ai'; + +/** + * Vercel AI implementation of AIProvider. + * This provider integrates Vercel AI SDK with LaunchDarkly's tracking capabilities. + */ +export class VercelProvider extends AIProvider { + private _model: LanguageModelV2; + private _parameters: Record; + + constructor(model: LanguageModelV2, parameters: Record, logger?: LDLogger) { + super(logger); + this._model = model; + this._parameters = parameters; + } + + // ============================================================================= + // MAIN FACTORY METHOD + // ============================================================================= + + /** + * Static factory method to create a Vercel AIProvider from an AI configuration. + */ + static async create(aiConfig: LDAIConfig, logger?: LDLogger): Promise { + const model = await VercelProvider.createVercelModel(aiConfig); + const parameters = aiConfig.model?.parameters || {}; + return new VercelProvider(model, parameters, logger); + } + + // ============================================================================= + // INSTANCE METHODS (AIProvider Implementation) + // ============================================================================= + + /** + * Invoke the Vercel AI model with an array of messages. + */ + async invokeModel(messages: LDMessage[]): Promise { + // Call Vercel AI generateText + const result = await generateText({ + model: this._model, + messages, + ...this._parameters, + }); + + // Create the assistant message + const assistantMessage: LDMessage = { + role: 'assistant', + content: result.text, + }; + + // Extract metrics including token usage and success status + const metrics = VercelProvider.createAIMetrics(result); + + return { + message: assistantMessage, + metrics, + }; + } + + /** + * Get the underlying Vercel AI model instance. + */ + getModel(): LanguageModelV2 { + return this._model; + } + + // ============================================================================= + // STATIC UTILITY METHODS + // ============================================================================= + + /** + * Map LaunchDarkly provider names to LangChain provider names. + * This method enables seamless integration between LaunchDarkly's standardized + * provider naming and LangChain's naming conventions. + */ + static mapProvider(ldProviderName: string): string { + const lowercasedName = ldProviderName.toLowerCase(); + + const mapping: Record = { + gemini: 'google', + }; + + return mapping[lowercasedName] || lowercasedName; + } + + /** + * Create AI metrics information from a Vercel AI response. + * This method extracts token usage information and success status from Vercel AI responses + * and returns a LaunchDarkly AIMetrics object. + */ + static createAIMetrics(vercelResponse: any): LDAIMetrics { + // Extract token usage if available + let usage: LDTokenUsage | undefined; + if (vercelResponse?.usage) { + const { promptTokens, completionTokens, totalTokens } = vercelResponse.usage; + usage = { + total: totalTokens || 0, + input: promptTokens || 0, + output: completionTokens || 0, + }; + } + + // Vercel AI responses that complete successfully are considered successful + return { + success: true, + usage, + }; + } + + /** + * Create a Vercel AI model from an AI configuration. + * This method creates a Vercel AI model based on the provider configuration. + * + * @param aiConfig The LaunchDarkly AI configuration + * @returns A Promise that resolves to a configured Vercel AI model + */ + static async createVercelModel(aiConfig: LDAIConfig): Promise { + const providerName = VercelProvider.mapProvider(aiConfig.provider?.name || ''); + const modelName = aiConfig.model?.name || ''; + // Parameters are not used in model creation but kept for future use + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const parameters = aiConfig.model?.parameters || {}; + + // Map provider names to their corresponding Vercel AI SDK imports + switch (providerName) { + case 'openai': + try { + const { openai } = await import('@ai-sdk/openai'); + return openai(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/openai: ${error}`); + } + case 'anthropic': + try { + const { anthropic } = await import('@ai-sdk/anthropic' as any); + return anthropic(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/anthropic: ${error}`); + } + case 'google': + try { + const { google } = await import('@ai-sdk/google' as any); + return google(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/google: ${error}`); + } + case 'cohere': + try { + const { cohere } = await import('@ai-sdk/cohere' as any); + return cohere(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/cohere: ${error}`); + } + case 'mistral': + try { + const { mistral } = await import('@ai-sdk/mistral' as any); + return mistral(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/mistral: ${error}`); + } + default: + throw new Error(`Unsupported Vercel AI provider: ${providerName}`); + } + } +} diff --git a/packages/ai-providers/server-ai-vercel/src/index.ts b/packages/ai-providers/server-ai-vercel/src/index.ts new file mode 100644 index 0000000000..3dde0dc683 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/src/index.ts @@ -0,0 +1 @@ +export { VercelProvider } from './VercelProvider'; diff --git a/packages/ai-providers/server-ai-vercel/tsconfig.eslint.json b/packages/ai-providers/server-ai-vercel/tsconfig.eslint.json new file mode 100644 index 0000000000..56c9b38305 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/tsconfig.eslint.json @@ -0,0 +1,5 @@ +{ + "extends": "./tsconfig.json", + "include": ["/**/*.ts"], + "exclude": ["node_modules"] +} diff --git a/packages/ai-providers/server-ai-vercel/tsconfig.json b/packages/ai-providers/server-ai-vercel/tsconfig.json new file mode 100644 index 0000000000..6238d6a0f5 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ES2020", + "module": "CommonJS", + "lib": ["ES2020"], + "moduleResolution": "node", + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "strict": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "outDir": "./dist", + "rootDir": "./src", + "declaration": true, + "declarationMap": true, + "sourceMap": true + }, + "include": ["src/**/*"], + "exclude": ["dist", "node_modules", "**/*.test.ts", "**/*.spec.ts"] +} diff --git a/packages/ai-providers/server-ai-vercel/tsconfig.ref.json b/packages/ai-providers/server-ai-vercel/tsconfig.ref.json new file mode 100644 index 0000000000..0c86b2c554 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/tsconfig.ref.json @@ -0,0 +1,7 @@ +{ + "extends": "./tsconfig.json", + "include": ["src/**/*"], + "compilerOptions": { + "composite": true + } +} diff --git a/packages/ai-providers/server-ai-vercel/typedoc.json b/packages/ai-providers/server-ai-vercel/typedoc.json new file mode 100644 index 0000000000..7ac616b544 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/typedoc.json @@ -0,0 +1,5 @@ +{ + "extends": ["../../../typedoc.base.json"], + "entryPoints": ["src/index.ts"], + "out": "docs" +} diff --git a/release-please-config.json b/release-please-config.json index 0a0725f91b..efd01f11cc 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -4,6 +4,10 @@ "bump-minor-pre-major": true, "prerelease": true }, + "packages/ai-providers/server-ai-vercel": { + "bump-minor-pre-major": true, + "prerelease": true + }, "packages/shared/common": {}, "packages/shared/sdk-client": {}, "packages/shared/sdk-server": {}, diff --git a/tsconfig.json b/tsconfig.json index ccc30f0944..183b6ba379 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -81,6 +81,9 @@ }, { "path": "./packages/ai-providers/server-ai-langchain/tsconfig.ref.json" + }, + { + "path": "./packages/ai-providers/server-ai-vercel/tsconfig.ref.json" } ] }