diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index b49ac584..81faccd5 100755 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: f42cb8e6-e2ce-4565-b975-5a9f38b94d5a management: - docChecksum: 75c4d04b99256e1e68d9940a982e1583 - docVersion: 1.1.68 - speakeasyVersion: 1.634.0 - generationVersion: 2.721.0 - releaseVersion: 0.27.0 - configChecksum: 27956e70414cd99591e801c7e4701835 + docChecksum: f69e063f6bdd37a7b29266aa34bb30a4 + docVersion: 1.2.0 + speakeasyVersion: 1.636.3 + generationVersion: 2.723.11 + releaseVersion: 0.28.0 + configChecksum: 2f7d038efc0c06515032eb8b02c062d8 repoURL: https://github.com/Unstructured-IO/unstructured-js-client.git repoSubDirectory: . installationURL: https://github.com/Unstructured-IO/unstructured-js-client @@ -53,10 +53,8 @@ generatedFiles: - docs/sdk/models/shared/security.md - docs/sdk/models/shared/strategy.md - docs/sdk/models/shared/validationerror.md - - docs/sdk/models/shared/vlmmodel.md - docs/sdk/models/shared/vlmmodelprovider.md - docs/sdks/general/README.md - - docs/sdks/unstructuredclient/README.md - eslint.config.mjs - examples/.env.template - examples/README.md @@ -141,3 +139,4 @@ examples: application/json: {"detail": "An error occurred"} examplesVersion: 1.0.2 generatedTests: {} +releaseNotes: "## Typescript SDK Changes Detected:\n* `unstructured_client.general.partition()`: \n * `request.partitionParameters.vlmModel` **Changed** **Breaking** :warning:\n" diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index f8d42045..601c181a 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,21 +1,21 @@ -speakeasyVersion: 1.634.0 +speakeasyVersion: 1.636.3 sources: my-source: sourceNamespace: my-source - sourceRevisionDigest: sha256:bb79c0280ddde01e2c792461935790617b4867ce4d03bf000bdee3dffc43b9e2 - sourceBlobDigest: sha256:3040373c9d49d3dc7a4d1a497f82579fa11252afd263ebbe6e3a3c728fefe519 + sourceRevisionDigest: sha256:05de7030a5d70689f6aa854ba186d44d83fe0951c5ae72a2310e91686946cadf + sourceBlobDigest: sha256:cfb1e6a24482556f98c7b5845db67aaf57ecbf7556a0d9904e673bfe0fc3ba7d tags: - latest - - speakeasy-sdk-regen-1759192246 - - 1.1.68 + - speakeasy-sdk-regen-1760488307 + - 1.2.0 targets: unstructed-typescript: source: my-source sourceNamespace: my-source - sourceRevisionDigest: sha256:bb79c0280ddde01e2c792461935790617b4867ce4d03bf000bdee3dffc43b9e2 - sourceBlobDigest: sha256:3040373c9d49d3dc7a4d1a497f82579fa11252afd263ebbe6e3a3c728fefe519 + sourceRevisionDigest: sha256:05de7030a5d70689f6aa854ba186d44d83fe0951c5ae72a2310e91686946cadf + sourceBlobDigest: sha256:cfb1e6a24482556f98c7b5845db67aaf57ecbf7556a0d9904e673bfe0fc3ba7d codeSamplesNamespace: my-source-typescript-code-samples - codeSamplesRevisionDigest: sha256:48b37447a8858ac12025cfd1098aa4f1e9a8e86a4c7bc1147aaad3a3bdc7c0ab + codeSamplesRevisionDigest: sha256:c138bec1c39605da1156d9adb9a7ee21d9c28952f59b3f713b5400a97316ac96 workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/FUNCTIONS.md b/FUNCTIONS.md index 03906a13..a3eedbde 100644 --- a/FUNCTIONS.md +++ b/FUNCTIONS.md @@ -22,7 +22,7 @@ specific category of applications. import { openAsBlob } from "node:fs"; import { UnstructuredClientCore } from "unstructured-client/core.js"; import { generalPartition } from "unstructured-client/funcs/generalPartition.js"; -import { Strategy, VLMModel, VLMModelProvider } from "unstructured-client/sdk/models/shared"; +import { Strategy, VLMModelProvider } from "unstructured-client/sdk/models/shared"; // Use `UnstructuredClientCore` for best tree-shaking performance. // You can create one instance of it to use across an application. @@ -38,7 +38,7 @@ async function run() { 10, ], strategy: Strategy.Auto, - vlmModel: VLMModel.Gpt4o, + vlmModel: "gpt-4o", vlmModelProvider: VLMModelProvider.Openai, }, }); diff --git a/README.md b/README.md index 5d6d2bf6..a9551ff6 100644 --- a/README.md +++ b/README.md @@ -207,7 +207,6 @@ import { openAsBlob } from "node:fs"; import { UnstructuredClient } from "unstructured-client"; import { Strategy, - VLMModel, VLMModelProvider, } from "unstructured-client/sdk/models/shared"; @@ -223,7 +222,7 @@ async function run() { 10, ], strategy: Strategy.Auto, - vlmModel: VLMModel.Gpt4o, + vlmModel: "gpt-4o", vlmModelProvider: VLMModelProvider.Openai, }, }, { @@ -252,7 +251,6 @@ import { openAsBlob } from "node:fs"; import { UnstructuredClient } from "unstructured-client"; import { Strategy, - VLMModel, VLMModelProvider, } from "unstructured-client/sdk/models/shared"; @@ -279,7 +277,7 @@ async function run() { 10, ], strategy: Strategy.Auto, - vlmModel: VLMModel.Gpt4o, + vlmModel: "gpt-4o", vlmModelProvider: VLMModelProvider.Openai, }, }); @@ -381,7 +379,6 @@ import { openAsBlob } from "node:fs"; import { UnstructuredClient } from "unstructured-client"; import { Strategy, - VLMModel, VLMModelProvider, } from "unstructured-client/sdk/models/shared"; @@ -397,7 +394,7 @@ async function run() { 10, ], strategy: Strategy.Auto, - vlmModel: VLMModel.Gpt4o, + vlmModel: "gpt-4o", vlmModelProvider: VLMModelProvider.Openai, }, }); diff --git a/RELEASES.md b/RELEASES.md index 0475ce7f..373ac3bf 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -694,4 +694,14 @@ Based on: ### Generated - [typescript v0.27.0] . ### Releases -- [NPM v0.27.0] https://www.npmjs.com/package/unstructured-client/v/0.27.0 - . \ No newline at end of file +- [NPM v0.27.0] https://www.npmjs.com/package/unstructured-client/v/0.27.0 - . + +## 2025-10-16 00:31:05 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.636.3 (2.723.11) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v0.28.0] . +### Releases +- [NPM v0.28.0] https://www.npmjs.com/package/unstructured-client/v/0.28.0 - . \ No newline at end of file diff --git a/RUNTIMES.md b/RUNTIMES.md index db7ea942..27731c3b 100644 --- a/RUNTIMES.md +++ b/RUNTIMES.md @@ -2,9 +2,9 @@ This SDK is intended to be used in JavaScript runtimes that support ECMAScript 2020 or newer. The SDK uses the following features: -* [Web Fetch API][web-fetch] -* [Web Streams API][web-streams] and in particular `ReadableStream` -* [Async iterables][async-iter] using `Symbol.asyncIterator` +- [Web Fetch API][web-fetch] +- [Web Streams API][web-streams] and in particular `ReadableStream` +- [Async iterables][async-iter] using `Symbol.asyncIterator` [web-fetch]: https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API [web-streams]: https://developer.mozilla.org/en-US/docs/Web/API/Streams_API @@ -25,7 +25,7 @@ Runtime environments that are explicitly supported are: The following `tsconfig.json` options are recommended for projects using this SDK in order to get static type support for features like async iterables, -streams and `fetch`-related APIs ([`for await...of`][for-await-of], +streams and `fetch`-related APIs ([`for await...of`][for-await-of], [`AbortSignal`][abort-signal], [`Request`][request], [`Response`][response] and so on): @@ -38,11 +38,11 @@ so on): { "compilerOptions": { "target": "es2020", // or higher - "lib": ["es2020", "dom", "dom.iterable"], + "lib": ["es2020", "dom", "dom.iterable"] } } ``` While `target` can be set to older ECMAScript versions, it may result in extra, unnecessary compatibility code being generated if you are not targeting old -runtimes. \ No newline at end of file +runtimes. diff --git a/USAGE.md b/USAGE.md index 9ea4c10b..b66509bd 100644 --- a/USAGE.md +++ b/USAGE.md @@ -4,7 +4,6 @@ import { openAsBlob } from "node:fs"; import { UnstructuredClient } from "unstructured-client"; import { Strategy, - VLMModel, VLMModelProvider, } from "unstructured-client/sdk/models/shared"; @@ -20,7 +19,7 @@ async function run() { 10, ], strategy: Strategy.Auto, - vlmModel: VLMModel.Gpt4o, + vlmModel: "gpt-4o", vlmModelProvider: VLMModelProvider.Openai, }, }); diff --git a/codeSamples.yaml b/codeSamples.yaml index 2e99eb00..b5dd8b24 100644 --- a/codeSamples.yaml +++ b/codeSamples.yaml @@ -8,4 +8,4 @@ actions: "x-codeSamples": - "lang": "typescript" "label": "partition" - "source": "import { openAsBlob } from \"node:fs\";\nimport { UnstructuredClient } from \"unstructured-client\";\nimport { Strategy, VLMModel, VLMModelProvider } from \"unstructured-client/sdk/models/shared\";\n\nconst unstructuredClient = new UnstructuredClient();\n\nasync function run() {\n const result = await unstructuredClient.general.partition({\n partitionParameters: {\n files: await openAsBlob(\"example.file\"),\n strategy: Strategy.Auto,\n vlmModelProvider: VLMModelProvider.Openai,\n vlmModel: VLMModel.Gpt4o,\n chunkingStrategy: \"by_title\",\n splitPdfPageRange: [\n 1,\n 10,\n ],\n },\n });\n\n console.log(result);\n}\n\nrun();" + "source": "import { openAsBlob } from \"node:fs\";\nimport { UnstructuredClient } from \"unstructured-client\";\nimport { Strategy, VLMModelProvider } from \"unstructured-client/sdk/models/shared\";\n\nconst unstructuredClient = new UnstructuredClient();\n\nasync function run() {\n const result = await unstructuredClient.general.partition({\n partitionParameters: {\n files: await openAsBlob(\"example.file\"),\n strategy: Strategy.Auto,\n vlmModelProvider: VLMModelProvider.Openai,\n vlmModel: \"gpt-4o\",\n chunkingStrategy: \"by_title\",\n splitPdfPageRange: [\n 1,\n 10,\n ],\n },\n });\n\n console.log(result);\n}\n\nrun();" diff --git a/docs/sdk/models/shared/partitionparameters.md b/docs/sdk/models/shared/partitionparameters.md index 4acecc49..9bb32446 100644 --- a/docs/sdk/models/shared/partitionparameters.md +++ b/docs/sdk/models/shared/partitionparameters.md @@ -47,6 +47,6 @@ import { PartitionParameters } from "unstructured-client/sdk/models/shared"; | `strategy` | [shared.Strategy](../../../sdk/models/shared/strategy.md) | :heavy_minus_sign: | The strategy to use for partitioning PDF/image. Options are fast, hi_res, auto. Default: hi_res | auto | | `tableOcrAgent` | *string* | :heavy_minus_sign: | The OCR agent to use for table ocr inference. | | | `uniqueElementIds` | *boolean* | :heavy_minus_sign: | When `True`, assign UUIDs to element IDs, which guarantees their uniqueness (useful when using them as primary keys in database). Otherwise a SHA-256 of element text is used. Default: `False` | | -| `vlmModel` | [shared.VLMModel](../../../sdk/models/shared/vlmmodel.md) | :heavy_minus_sign: | The VLM Model to use. | gpt-4o | +| `vlmModel` | *string* | :heavy_minus_sign: | The VLM Model to use. | gpt-4o | | `vlmModelProvider` | [shared.VLMModelProvider](../../../sdk/models/shared/vlmmodelprovider.md) | :heavy_minus_sign: | The VLM Model provider to use. | openai | | `xmlKeepTags` | *boolean* | :heavy_minus_sign: | If `True`, will retain the XML tags in the output. Otherwise it will simply extract the text from within the tags. Only applies to XML documents. | | \ No newline at end of file diff --git a/docs/sdk/models/shared/vlmmodel.md b/docs/sdk/models/shared/vlmmodel.md deleted file mode 100644 index 69371cc1..00000000 --- a/docs/sdk/models/shared/vlmmodel.md +++ /dev/null @@ -1,33 +0,0 @@ -# VLMModel - -The VLM Model to use. - -## Example Usage - -```typescript -import { VLMModel } from "unstructured-client/sdk/models/shared"; - -let value: VLMModel = VLMModel.Gpt4o; -``` - -## Values - -This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. - -| Name | Value | -| -------------------------------------------- | -------------------------------------------- | -| `Claude35Sonnet20241022` | claude-3-5-sonnet-20241022 | -| `Claude37Sonnet20250219` | claude-3-7-sonnet-20250219 | -| `Gpt4o` | gpt-4o | -| `Gemini15Pro` | gemini-1.5-pro | -| `UsAmazonNovaProV10` | us.amazon.nova-pro-v1:0 | -| `UsAmazonNovaLiteV10` | us.amazon.nova-lite-v1:0 | -| `UsAnthropicClaude37Sonnet20250219V10` | us.anthropic.claude-3-7-sonnet-20250219-v1:0 | -| `UsAnthropicClaude35Sonnet20241022V20` | us.anthropic.claude-3-5-sonnet-20241022-v2:0 | -| `UsAnthropicClaude3Opus20240229V10` | us.anthropic.claude-3-opus-20240229-v1:0 | -| `UsAnthropicClaude3Haiku20240307V10` | us.anthropic.claude-3-haiku-20240307-v1:0 | -| `UsAnthropicClaude3Sonnet20240229V10` | us.anthropic.claude-3-sonnet-20240229-v1:0 | -| `UsMetaLlama3290bInstructV10` | us.meta.llama3-2-90b-instruct-v1:0 | -| `UsMetaLlama3211bInstructV10` | us.meta.llama3-2-11b-instruct-v1:0 | -| `Gemini20Flash001` | gemini-2.0-flash-001 | -| - | `Unrecognized` | \ No newline at end of file diff --git a/docs/sdks/general/README.md b/docs/sdks/general/README.md index 67511c18..ffbcc392 100644 --- a/docs/sdks/general/README.md +++ b/docs/sdks/general/README.md @@ -17,7 +17,7 @@ Description ```typescript import { openAsBlob } from "node:fs"; import { UnstructuredClient } from "unstructured-client"; -import { Strategy, VLMModel, VLMModelProvider } from "unstructured-client/sdk/models/shared"; +import { Strategy, VLMModelProvider } from "unstructured-client/sdk/models/shared"; const unstructuredClient = new UnstructuredClient(); @@ -31,7 +31,7 @@ async function run() { 10, ], strategy: Strategy.Auto, - vlmModel: VLMModel.Gpt4o, + vlmModel: "gpt-4o", vlmModelProvider: VLMModelProvider.Openai, }, }); @@ -50,7 +50,7 @@ The standalone function version of this method: import { openAsBlob } from "node:fs"; import { UnstructuredClientCore } from "unstructured-client/core.js"; import { generalPartition } from "unstructured-client/funcs/generalPartition.js"; -import { Strategy, VLMModel, VLMModelProvider } from "unstructured-client/sdk/models/shared"; +import { Strategy, VLMModelProvider } from "unstructured-client/sdk/models/shared"; // Use `UnstructuredClientCore` for best tree-shaking performance. // You can create one instance of it to use across an application. @@ -66,7 +66,7 @@ async function run() { 10, ], strategy: Strategy.Auto, - vlmModel: VLMModel.Gpt4o, + vlmModel: "gpt-4o", vlmModelProvider: VLMModelProvider.Openai, }, }); diff --git a/docs/sdks/unstructuredclient/README.md b/docs/sdks/unstructuredclient/README.md deleted file mode 100644 index 895459f0..00000000 --- a/docs/sdks/unstructuredclient/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# UnstructuredClient SDK - -## Overview - -### Available Operations diff --git a/examples/generalPartition.example.ts b/examples/generalPartition.example.ts index 1e1222dc..76ab90b7 100644 --- a/examples/generalPartition.example.ts +++ b/examples/generalPartition.example.ts @@ -15,7 +15,6 @@ import { openAsBlob } from "node:fs"; import { UnstructuredClient } from "unstructured-client"; import { Strategy, - VLMModel, VLMModelProvider, } from "unstructured-client/sdk/models/shared"; @@ -31,7 +30,7 @@ async function main() { 10, ], strategy: Strategy.Auto, - vlmModel: VLMModel.Gpt4o, + vlmModel: "gpt-4o", vlmModelProvider: VLMModelProvider.Openai, }, }); diff --git a/examples/package-lock.json b/examples/package-lock.json index 290c1a87..a67ecea4 100644 --- a/examples/package-lock.json +++ b/examples/package-lock.json @@ -18,7 +18,7 @@ }, "..": { "name": "unstructured-client", - "version": "0.27.0", + "version": "0.28.0", "dependencies": { "async": "^3.2.5", "pdf-lib": "^1.17.1", diff --git a/gen.yaml b/gen.yaml index 442291dd..25bbca43 100644 --- a/gen.yaml +++ b/gen.yaml @@ -19,7 +19,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false typescript: - version: 0.27.0 + version: 0.28.0 acceptHeaderEnum: true additionalDependencies: dependencies: diff --git a/jsr.json b/jsr.json index 1d902ea6..a9959a35 100644 --- a/jsr.json +++ b/jsr.json @@ -2,7 +2,7 @@ { "name": "unstructured-client", - "version": "0.27.0", + "version": "0.28.0", "exports": { ".": "./src/index.ts", "./sdk/models/errors": "./src/sdk/models/errors/index.ts", diff --git a/package-lock.json b/package-lock.json index b3a690fb..de7b07aa 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "unstructured-client", - "version": "0.27.0", + "version": "0.28.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "unstructured-client", - "version": "0.27.0", + "version": "0.28.0", "dependencies": { "async": "^3.2.5", "pdf-lib": "^1.17.1", diff --git a/package.json b/package.json index 4bbe26cf..c2b6cb1c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "unstructured-client", - "version": "0.27.0", + "version": "0.28.0", "author": "Unstructured", "type": "module", "bin": { diff --git a/src/funcs/generalPartition.ts b/src/funcs/generalPartition.ts index 38d19649..22d3983d 100644 --- a/src/funcs/generalPartition.ts +++ b/src/funcs/generalPartition.ts @@ -377,7 +377,7 @@ async function $do( options: client._options, baseURL: options?.serverURL ?? client._baseURL ?? "", operationID: "partition", - oAuth2Scopes: [], + oAuth2Scopes: null, resolvedSecurity: requestSecurity, diff --git a/src/lib/config.ts b/src/lib/config.ts index a5e2e398..c5c934a7 100644 --- a/src/lib/config.ts +++ b/src/lib/config.ts @@ -67,9 +67,9 @@ export function serverURLFromOptions(options: SDKOptions): URL | null { export const SDK_METADATA = { language: "typescript", - openapiDocVersion: "1.1.68", - sdkVersion: "0.27.0", - genVersion: "2.721.0", + openapiDocVersion: "1.2.0", + sdkVersion: "0.28.0", + genVersion: "2.723.11", userAgent: - "speakeasy-sdk/typescript 0.27.0 2.721.0 1.1.68 unstructured-client", + "speakeasy-sdk/typescript 0.28.0 2.723.11 1.2.0 unstructured-client", } as const; diff --git a/src/mcp-server/mcp-server.ts b/src/mcp-server/mcp-server.ts index 0b452cea..0db3e576 100644 --- a/src/mcp-server/mcp-server.ts +++ b/src/mcp-server/mcp-server.ts @@ -19,7 +19,7 @@ const routes = buildRouteMap({ export const app = buildApplication(routes, { name: "mcp", versionInfo: { - currentVersion: "0.27.0", + currentVersion: "0.28.0", }, }); diff --git a/src/mcp-server/server.ts b/src/mcp-server/server.ts index da3ce40c..6288da77 100644 --- a/src/mcp-server/server.ts +++ b/src/mcp-server/server.ts @@ -27,7 +27,7 @@ export function createMCPServer(deps: { }) { const server = new McpServer({ name: "UnstructuredClient", - version: "0.27.0", + version: "0.28.0", }); const client = new UnstructuredClientCore({ diff --git a/src/sdk/models/shared/partitionparameters.ts b/src/sdk/models/shared/partitionparameters.ts index 48d694be..e82bf794 100644 --- a/src/sdk/models/shared/partitionparameters.ts +++ b/src/sdk/models/shared/partitionparameters.ts @@ -47,35 +47,6 @@ export enum Strategy { */ export type StrategyOpen = OpenEnum; -/** - * The VLM Model to use. - */ -export enum VLMModel { - Claude35Sonnet20241022 = "claude-3-5-sonnet-20241022", - Claude37Sonnet20250219 = "claude-3-7-sonnet-20250219", - Gpt4o = "gpt-4o", - Gemini15Pro = "gemini-1.5-pro", - UsAmazonNovaProV10 = "us.amazon.nova-pro-v1:0", - UsAmazonNovaLiteV10 = "us.amazon.nova-lite-v1:0", - UsAnthropicClaude37Sonnet20250219V10 = - "us.anthropic.claude-3-7-sonnet-20250219-v1:0", - UsAnthropicClaude35Sonnet20241022V20 = - "us.anthropic.claude-3-5-sonnet-20241022-v2:0", - UsAnthropicClaude3Opus20240229V10 = - "us.anthropic.claude-3-opus-20240229-v1:0", - UsAnthropicClaude3Haiku20240307V10 = - "us.anthropic.claude-3-haiku-20240307-v1:0", - UsAnthropicClaude3Sonnet20240229V10 = - "us.anthropic.claude-3-sonnet-20240229-v1:0", - UsMetaLlama3290bInstructV10 = "us.meta.llama3-2-90b-instruct-v1:0", - UsMetaLlama3211bInstructV10 = "us.meta.llama3-2-11b-instruct-v1:0", - Gemini20Flash001 = "gemini-2.0-flash-001", -} -/** - * The VLM Model to use. - */ -export type VLMModelOpen = OpenEnum; - /** * The VLM Model provider to use. */ @@ -237,7 +208,7 @@ export type PartitionParameters = { /** * The VLM Model to use. */ - vlmModel?: VLMModelOpen | undefined; + vlmModel?: string | undefined; /** * The VLM Model provider to use. */ @@ -372,38 +343,6 @@ export namespace Strategy$ { export const outboundSchema = Strategy$outboundSchema; } -/** @internal */ -export const VLMModel$inboundSchema: z.ZodType< - VLMModelOpen, - z.ZodTypeDef, - unknown -> = z - .union([ - z.nativeEnum(VLMModel), - z.string().transform(catchUnrecognizedEnum), - ]); - -/** @internal */ -export const VLMModel$outboundSchema: z.ZodType< - VLMModelOpen, - z.ZodTypeDef, - VLMModelOpen -> = z.union([ - z.nativeEnum(VLMModel), - z.string().and(z.custom>()), -]); - -/** - * @internal - * @deprecated This namespace will be removed in future versions. Use schemas and types that are exported directly from this module. - */ -export namespace VLMModel$ { - /** @deprecated use `VLMModel$inboundSchema` instead. */ - export const inboundSchema = VLMModel$inboundSchema; - /** @deprecated use `VLMModel$outboundSchema` instead. */ - export const outboundSchema = VLMModel$outboundSchema; -} - /** @internal */ export const VLMModelProvider$inboundSchema: z.ZodType< VLMModelProviderOpen, @@ -479,7 +418,7 @@ export const PartitionParameters$inboundSchema: z.ZodType< strategy: Strategy$inboundSchema.default(Strategy.HiRes), table_ocr_agent: z.nullable(z.string()).optional(), unique_element_ids: z.boolean().default(false), - vlm_model: VLMModel$inboundSchema.optional(), + vlm_model: z.string().optional(), vlm_model_provider: VLMModelProvider$inboundSchema.optional(), xml_keep_tags: z.boolean().default(false), }).transform((v) => { @@ -604,7 +543,7 @@ export const PartitionParameters$outboundSchema: z.ZodType< strategy: Strategy$outboundSchema.default(Strategy.HiRes), tableOcrAgent: z.nullable(z.string()).optional(), uniqueElementIds: z.boolean().default(false), - vlmModel: VLMModel$outboundSchema.optional(), + vlmModel: z.string().optional(), vlmModelProvider: VLMModelProvider$outboundSchema.optional(), xmlKeepTags: z.boolean().default(false), }).transform((v) => {