diff --git a/fern/apis/api/openapi.json b/fern/apis/api/openapi.json index 39dbfd65f..db61683f5 100644 --- a/fern/apis/api/openapi.json +++ b/fern/apis/api/openapi.json @@ -2757,9 +2757,9 @@ } }, "/analytics": { - "post": { - "operationId": "AnalyticsController_query", - "summary": "Create Analytics Queries", + "get": { + "operationId": "AnalyticsController_getQuery", + "summary": "Get Analytics", "parameters": [], "requestBody": { "required": true, @@ -2784,9 +2784,6 @@ } } } - }, - "201": { - "description": "" } }, "tags": [ @@ -2802,7 +2799,7 @@ "/logs": { "get": { "operationId": "LoggingController_queryLogs", - "summary": "List logs", + "summary": "Get Logs", "parameters": [ { "name": "orgId", @@ -5600,6 +5597,116 @@ "provider" ] }, + "InflectionAIModel": { + "type": "object", + "properties": { + "messages": { + "description": "This is the starting state for the conversation.", + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIMessage" + } + }, + "tools": { + "type": "array", + "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/CreateDtmfToolDTO", + "title": "DtmfTool" + }, + { + "$ref": "#/components/schemas/CreateEndCallToolDTO", + "title": "EndCallTool" + }, + { + "$ref": "#/components/schemas/CreateVoicemailToolDTO", + "title": "VoicemailTool" + }, + { + "$ref": "#/components/schemas/CreateFunctionToolDTO", + "title": "FunctionTool" + }, + { + "$ref": "#/components/schemas/CreateGhlToolDTO", + "title": "GhlTool" + }, + { + "$ref": "#/components/schemas/CreateMakeToolDTO", + "title": "MakeTool" + }, + { + "$ref": "#/components/schemas/CreateTransferCallToolDTO", + "title": "TransferTool" + } + ] + } + }, + "toolIds": { + "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.", + "type": "array", + "items": { + "type": "string" + } + }, + "knowledgeBase": { + "description": "These are the options for the knowledge base.", + "oneOf": [ + { + "$ref": "#/components/schemas/CreateCanonicalKnowledgeBaseDTO", + "title": "Canonical" + }, + { + "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO", + "title": "Custom" + } + ] + }, + "knowledgeBaseId": { + "type": "string", + "description": "This is the ID of the knowledge base the model will use." + }, + "model": { + "type": "string", + "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b", + "enum": [ + "inflection_3_pi" + ] + }, + "provider": { + "type": "string", + "enum": [ + "inflection-ai" + ] + }, + "temperature": { + "type": "number", + "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.", + "minimum": 0, + "maximum": 2 + }, + "maxTokens": { + "type": "number", + "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.", + "minimum": 50, + "maximum": 10000 + }, + "emotionRecognitionEnabled": { + "type": "boolean", + "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false" + }, + "numFastTurns": { + "type": "number", + "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0", + "minimum": 0 + } + }, + "required": [ + "model", + "provider" + ] + }, "OpenAIModel": { "type": "object", "properties": { @@ -6229,6 +6336,116 @@ "model" ] }, + "XaiModel": { + "type": "object", + "properties": { + "messages": { + "description": "This is the starting state for the conversation.", + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIMessage" + } + }, + "tools": { + "type": "array", + "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/CreateDtmfToolDTO", + "title": "DtmfTool" + }, + { + "$ref": "#/components/schemas/CreateEndCallToolDTO", + "title": "EndCallTool" + }, + { + "$ref": "#/components/schemas/CreateVoicemailToolDTO", + "title": "VoicemailTool" + }, + { + "$ref": "#/components/schemas/CreateFunctionToolDTO", + "title": "FunctionTool" + }, + { + "$ref": "#/components/schemas/CreateGhlToolDTO", + "title": "GhlTool" + }, + { + "$ref": "#/components/schemas/CreateMakeToolDTO", + "title": "MakeTool" + }, + { + "$ref": "#/components/schemas/CreateTransferCallToolDTO", + "title": "TransferTool" + } + ] + } + }, + "toolIds": { + "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.", + "type": "array", + "items": { + "type": "string" + } + }, + "knowledgeBase": { + "description": "These are the options for the knowledge base.", + "oneOf": [ + { + "$ref": "#/components/schemas/CreateCanonicalKnowledgeBaseDTO", + "title": "Canonical" + }, + { + "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO", + "title": "Custom" + } + ] + }, + "knowledgeBaseId": { + "type": "string", + "description": "This is the ID of the knowledge base the model will use." + }, + "model": { + "type": "string", + "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b", + "enum": [ + "grok-beta" + ] + }, + "provider": { + "type": "string", + "enum": [ + "xai" + ] + }, + "temperature": { + "type": "number", + "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.", + "minimum": 0, + "maximum": 2 + }, + "maxTokens": { + "type": "number", + "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.", + "minimum": 50, + "maximum": 10000 + }, + "emotionRecognitionEnabled": { + "type": "boolean", + "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false" + }, + "numFastTurns": { + "type": "number", + "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0", + "minimum": 0 + } + }, + "required": [ + "model", + "provider" + ] + }, "ExactReplacement": { "type": "object", "properties": { @@ -8787,6 +9004,10 @@ "$ref": "#/components/schemas/GroqModel", "title": "Groq" }, + { + "$ref": "#/components/schemas/InflectionAIModel", + "title": "InflectionAI" + }, { "$ref": "#/components/schemas/OpenAIModel", "title": "OpenAI" @@ -8806,6 +9027,10 @@ { "$ref": "#/components/schemas/VapiModel", "title": "Vapi" + }, + { + "$ref": "#/components/schemas/XaiModel", + "title": "XAI" } ] }, @@ -9191,6 +9416,10 @@ "$ref": "#/components/schemas/GroqModel", "title": "Groq" }, + { + "$ref": "#/components/schemas/InflectionAIModel", + "title": "InflectionAI" + }, { "$ref": "#/components/schemas/OpenAIModel", "title": "OpenAI" @@ -9210,6 +9439,10 @@ { "$ref": "#/components/schemas/VapiModel", "title": "Vapi" + }, + { + "$ref": "#/components/schemas/XaiModel", + "title": "XAI" } ] }, @@ -10625,6 +10858,10 @@ "$ref": "#/components/schemas/GroqModel", "title": "Groq" }, + { + "$ref": "#/components/schemas/InflectionAIModel", + "title": "InflectionAI" + }, { "$ref": "#/components/schemas/OpenAIModel", "title": "OpenAI" @@ -10644,6 +10881,10 @@ { "$ref": "#/components/schemas/VapiModel", "title": "Vapi" + }, + { + "$ref": "#/components/schemas/XaiModel", + "title": "XAI" } ] }, @@ -11053,6 +11294,10 @@ "$ref": "#/components/schemas/GroqModel", "title": "Groq" }, + { + "$ref": "#/components/schemas/InflectionAIModel", + "title": "InflectionAI" + }, { "$ref": "#/components/schemas/OpenAIModel", "title": "OpenAI" @@ -11072,6 +11317,10 @@ { "$ref": "#/components/schemas/VapiModel", "title": "Vapi" + }, + { + "$ref": "#/components/schemas/XaiModel", + "title": "XAI" } ] }, @@ -12661,11 +12910,11 @@ "properties": { "removeStopWords": { "type": "boolean", - "description": "If true, stop words (specified in server/src/stop-words.txt in the git repo) will be removed. Queries that are entirely stop words will be preserved." + "description": "If true, stop words (specified in server/src/stop-words.txt in the git repo) will be removed. This will preserve queries that are entirely stop words." }, "scoreThreshold": { "type": "number", - "description": "Set score_threshold to a float to filter out chunks with a score below the threshold for cosine distance metric. For Manhattan Distance, Euclidean Distance, and Dot Product, it will filter out scores above the threshold distance. This threshold applies before weight and bias modifications. If not specified, this defaults to no threshold. A threshold of 0 will default to no threshold." + "description": "This is the score threshold to filter out chunks with a score below the threshold for cosine distance metric. For Manhattan Distance, Euclidean Distance, and Dot Product, it will filter out scores above the threshold distance. This threshold applies before weight and bias modifications. If not specified, this defaults to no threshold. A threshold of 0 will default to no threshold." }, "searchType": { "type": "string", @@ -12686,6 +12935,7 @@ "type": "object", "properties": { "fileIds": { + "description": "These are the file ids that will be used to create the vector store. To upload files, use the `POST /files` endpoint.", "type": "array", "items": { "type": "string" @@ -12693,10 +12943,10 @@ }, "targetSplitsPerChunk": { "type": "number", - "description": "Target splits per chunk. This is an optional field which allows you to specify the number of splits you want per chunk. If not specified, the default 20 is used. However, you may want to use a different number." + "description": "This is an optional field which allows you to specify the number of splits you want per chunk. If not specified, the default 20 is used. However, you may want to use a different number." }, "splitDelimiters": { - "description": "Split delimiters is an optional field which allows you to specify the delimiters to use when splitting the file before chunking the text. If not specified, the default [.!?\\n] are used to split into sentences. However, you may want to use spaces or other delimiters.", + "description": "This is an optional field which allows you to specify the delimiters to use when splitting the file before chunking the text. If not specified, the default [.!?\\n] are used to split into sentences. However, you may want to use spaces or other delimiters.", "type": "array", "items": { "type": "string" @@ -12704,7 +12954,7 @@ }, "rebalanceChunks": { "type": "boolean", - "description": "Rebalance chunks is an optional field which allows you to specify whether or not to rebalance the chunks created from the file. If not specified, the default true is used. If true, Trieve will evenly distribute remainder splits across chunks such that 66 splits with a target_splits_per_chunk of 20 will result in 3 chunks with 22 splits each." + "description": "This is an optional field which allows you to specify whether or not to rebalance the chunks created from the file. If not specified, the default true is used. If true, Trieve will evenly distribute remainder splits across chunks such that 66 splits with a target_splits_per_chunk of 20 will result in 3 chunks with 22 splits each." } }, "required": [ @@ -12716,7 +12966,7 @@ "properties": { "provider": { "type": "string", - "description": "This knowledge base is provided by Trieve.\nhttps://trieve.ai", + "description": "This knowledge base is provided by Trieve.\n\nTo learn more about Trieve, visit https://trieve.ai.", "enum": [ "trieve" ] @@ -12742,7 +12992,8 @@ ] }, "vectorStoreProviderId": { - "type": "string" + "type": "string", + "description": "This is an vector store that you already have on your account with the provider. To create a new vector store, use vectorStoreCreatePlan.\n\nUsage:\n- To bring your own vector store from Trieve, go to https://trieve.ai\n- Create a dataset, and use the datasetId here." }, "id": { "type": "string", @@ -12838,7 +13089,7 @@ "properties": { "provider": { "type": "string", - "description": "This knowledge base is provided by Trieve.\nhttps://trieve.ai", + "description": "This knowledge base is provided by Trieve.\n\nTo learn more about Trieve, visit https://trieve.ai.", "enum": [ "trieve" ] @@ -12864,7 +13115,8 @@ ] }, "vectorStoreProviderId": { - "type": "string" + "type": "string", + "description": "This is an vector store that you already have on your account with the provider. To create a new vector store, use vectorStoreCreatePlan.\n\nUsage:\n- To bring your own vector store from Trieve, go to https://trieve.ai\n- Create a dataset, and use the datasetId here." } }, "required": [