Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions output/typescript/types.ts

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 10 additions & 2 deletions specification/inference/put/PutRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,11 @@
* under the License.
*/

import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import { InferenceEndpoint } from '@inference/_types/Services'
import { TaskType } from '@inference/_types/TaskType'
import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import { Duration } from '@_types/Time'

/**
* Create an inference endpoint.
Expand Down Expand Up @@ -74,6 +75,13 @@ export interface Request extends RequestBase {
*/
inference_id: Id
}
query_parameters: {
/**
* Specifies the amount of time to wait for the inference endpoint to be created.
* @server_default 30s
*/
timeout?: Duration
}
/** @codegen_name inference_config */
body: InferenceEndpoint
}
12 changes: 10 additions & 2 deletions specification/inference/put_alibabacloud/PutAlibabaCloudRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,16 @@
* under the License.
*/

import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import {
AlibabaCloudServiceSettings,
AlibabaCloudServiceType,
AlibabaCloudTaskSettings,
AlibabaCloudTaskType
} from '@inference/_types/CommonTypes'
import { InferenceChunkingSettings } from '@inference/_types/Services'
import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import { Duration } from '@_types/Time'

/**
* Create an AlibabaCloud AI Search inference endpoint.
Expand Down Expand Up @@ -54,6 +55,13 @@ export interface Request extends RequestBase {
*/
alibabacloud_inference_id: Id
}
query_parameters: {
/**
* Specifies the amount of time to wait for the inference endpoint to be created.
* @server_default 30s
*/
timeout?: Duration
}
body: {
/**
* The chunking configuration object.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,16 @@
* under the License.
*/

import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import {
AmazonBedrockServiceSettings,
AmazonBedrockServiceType,
AmazonBedrockTaskSettings,
AmazonBedrockTaskType
} from '@inference/_types/CommonTypes'
import { InferenceChunkingSettings } from '@inference/_types/Services'
import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import { Duration } from '@_types/Time'

/**
* Create an Amazon Bedrock inference endpoint.
Expand Down Expand Up @@ -57,6 +58,13 @@ export interface Request extends RequestBase {
*/
amazonbedrock_inference_id: Id
}
query_parameters: {
/**
* Specifies the amount of time to wait for the inference endpoint to be created.
* @server_default 30s
*/
timeout?: Duration
}
body: {
/**
* The chunking configuration object.
Expand Down
12 changes: 10 additions & 2 deletions specification/inference/put_anthropic/PutAnthropicRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,16 @@
* under the License.
*/

import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import {
AnthropicServiceSettings,
AnthropicServiceType,
AnthropicTaskSettings,
AnthropicTaskType
} from '@inference/_types/CommonTypes'
import { InferenceChunkingSettings } from '@inference/_types/Services'
import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import { Duration } from '@_types/Time'

/**
* Create an Anthropic inference endpoint.
Expand Down Expand Up @@ -55,6 +56,13 @@ export interface Request extends RequestBase {
*/
anthropic_inference_id: Id
}
query_parameters: {
/**
* Specifies the amount of time to wait for the inference endpoint to be created.
* @server_default 30s
*/
timeout?: Duration
}
body: {
/**
* The chunking configuration object.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,16 @@
* under the License.
*/

import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import {
AzureAiStudioServiceSettings,
AzureAiStudioServiceType,
AzureAiStudioTaskSettings,
AzureAiStudioTaskType
} from '@inference/_types/CommonTypes'
import { InferenceChunkingSettings } from '@inference/_types/Services'
import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import { Duration } from '@_types/Time'

/**
* Create an Azure AI studio inference endpoint.
Expand Down Expand Up @@ -54,6 +55,13 @@ export interface Request extends RequestBase {
*/
azureaistudio_inference_id: Id
}
query_parameters: {
/**
* Specifies the amount of time to wait for the inference endpoint to be created.
* @server_default 30s
*/
timeout?: Duration
}
body: {
/**
* The chunking configuration object.
Expand Down
12 changes: 10 additions & 2 deletions specification/inference/put_azureopenai/PutAzureOpenAiRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,16 @@
* under the License.
*/

import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import {
AzureOpenAIServiceSettings,
AzureOpenAIServiceType,
AzureOpenAITaskSettings,
AzureOpenAITaskType
} from '@inference/_types/CommonTypes'
import { InferenceChunkingSettings } from '@inference/_types/Services'
import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import { Duration } from '@_types/Time'

/**
* Create an Azure OpenAI inference endpoint.
Expand Down Expand Up @@ -62,6 +63,13 @@ export interface Request extends RequestBase {
*/
azureopenai_inference_id: Id
}
query_parameters: {
/**
* Specifies the amount of time to wait for the inference endpoint to be created.
* @server_default 30s
*/
timeout?: Duration
}
body: {
/**
* The chunking configuration object.
Expand Down
12 changes: 10 additions & 2 deletions specification/inference/put_cohere/PutCohereRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,16 @@
* under the License.
*/

import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import {
CohereServiceSettings,
CohereServiceType,
CohereTaskSettings,
CohereTaskType
} from '@inference/_types/CommonTypes'
import { InferenceChunkingSettings } from '@inference/_types/Services'
import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import { Duration } from '@_types/Time'

/**
* Create a Cohere inference endpoint.
Expand Down Expand Up @@ -54,6 +55,13 @@ export interface Request extends RequestBase {
*/
cohere_inference_id: Id
}
query_parameters: {
/**
* Specifies the amount of time to wait for the inference endpoint to be created.
* @server_default 30s
*/
timeout?: Duration
}
body: {
/**
* The chunking configuration object.
Expand Down
12 changes: 10 additions & 2 deletions specification/inference/put_deepseek/PutDeepSeekRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,15 @@
* under the License.
*/

import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import {
DeepSeekServiceSettings,
DeepSeekServiceType
} from '@inference/_types/CommonTypes'
import { InferenceChunkingSettings } from '@inference/_types/Services'
import { TaskTypeDeepSeek } from '@inference/_types/TaskType'
import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import { Duration } from '@_types/Time'

/**
* Create a DeepSeek inference endpoint.
Expand Down Expand Up @@ -53,6 +54,13 @@ export interface Request extends RequestBase {
*/
deepseek_inference_id: Id
}
query_parameters: {
/**
* Specifies the amount of time to wait for the inference endpoint to be created.
* @server_default 30s
*/
timeout?: Duration
}
body: {
/**
* The chunking configuration object.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,16 @@
* under the License.
*/

import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import {
ElasticsearchServiceSettings,
ElasticsearchServiceType,
ElasticsearchTaskSettings,
ElasticsearchTaskType
} from '@inference/_types/CommonTypes'
import { InferenceChunkingSettings } from '@inference/_types/Services'
import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import { Duration } from '@_types/Time'

/**
* Create an Elasticsearch inference endpoint.
Expand Down Expand Up @@ -68,6 +69,13 @@ export interface Request extends RequestBase {
*/
elasticsearch_inference_id: Id
}
query_parameters: {
/**
* Specifies the amount of time to wait for the inference endpoint to be created.
* @server_default 30s
*/
timeout?: Duration
}
body: {
/**
* The chunking configuration object.
Expand Down
12 changes: 10 additions & 2 deletions specification/inference/put_elser/PutElserRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,15 @@
* under the License.
*/

import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import {
ElserServiceSettings,
ElserServiceType,
ElserTaskType
} from '@inference/_types/CommonTypes'
import { InferenceChunkingSettings } from '@inference/_types/Services'
import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import { Duration } from '@_types/Time'

/**
* Create an ELSER inference endpoint.
Expand Down Expand Up @@ -68,6 +69,13 @@ export interface Request extends RequestBase {
*/
elser_inference_id: Id
}
query_parameters: {
/**
* Specifies the amount of time to wait for the inference endpoint to be created.
* @server_default 30s
*/
timeout?: Duration
}
body: {
/**
* The chunking configuration object.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,15 @@
* under the License.
*/

import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import {
GoogleAiServiceType,
GoogleAiStudioServiceSettings,
GoogleAiStudioTaskType
} from '@inference/_types/CommonTypes'
import { InferenceChunkingSettings } from '@inference/_types/Services'
import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import { Duration } from '@_types/Time'

/**
* Create an Google AI Studio inference endpoint.
Expand Down Expand Up @@ -53,6 +54,13 @@ export interface Request extends RequestBase {
*/
googleaistudio_inference_id: Id
}
query_parameters: {
/**
* Specifies the amount of time to wait for the inference endpoint to be created.
* @server_default 30s
*/
timeout?: Duration
}
body: {
/**
* The chunking configuration object.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,16 @@
* under the License.
*/

import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import {
GoogleVertexAIServiceSettings,
GoogleVertexAIServiceType,
GoogleVertexAITaskSettings,
GoogleVertexAITaskType
} from '@inference/_types/CommonTypes'
import { InferenceChunkingSettings } from '@inference/_types/Services'
import { RequestBase } from '@_types/Base'
import { Id } from '@_types/common'
import { Duration } from '@_types/Time'

/**
* Create a Google Vertex AI inference endpoint.
Expand Down Expand Up @@ -54,6 +55,13 @@ export interface Request extends RequestBase {
*/
googlevertexai_inference_id: Id
}
query_parameters: {
/**
* Specifies the amount of time to wait for the inference endpoint to be created.
* @server_default 30s
*/
timeout?: Duration
}
body: {
/**
* The chunking configuration object.
Expand Down
Loading
Loading