diff --git a/dev-docs.json b/dev-docs.json new file mode 100644 index 000000000..ad795ee99 --- /dev/null +++ b/dev-docs.json @@ -0,0 +1,9 @@ +{ + "gitHubApp": { + "approvalWorkflow": true, + "userDocsWorkflows": [ + "generateUserDocs" + ], + "issues": true + } +} \ No newline at end of file diff --git a/fern/apis/api/generators.yml b/fern/apis/api/generators.yml index 7d578f35c..783674777 100644 --- a/fern/apis/api/generators.yml +++ b/fern/apis/api/generators.yml @@ -10,7 +10,7 @@ groups: python-sdk: generators: - name: fernapi/fern-python-sdk - version: 4.3.15 + version: 4.14.2 disable-examples: true api: settings: diff --git a/fern/docs.yml b/fern/docs.yml index 07111610c..84e4b84a2 100644 --- a/fern/docs.yml +++ b/fern/docs.yml @@ -239,16 +239,18 @@ navigation: path: workflows/nodes/gather.mdx - page: API Request path: workflows/nodes/api-request.mdx + - page: Assistant + path: workflows/nodes/assistant.mdx - page: Transfer path: workflows/nodes/transfer.mdx - page: Hangup path: workflows/nodes/hangup.mdx - - section: Edges contents: - page: Logical Conditions path: workflows/edges/logical-conditions.mdx - + - page: AI Conditions + path: workflows/edges/ai-conditions.mdx - section: Squads path: squads.mdx contents: diff --git a/fern/providers/observability/langfuse.mdx b/fern/providers/observability/langfuse.mdx index 6381fe5e9..2e7c6faf5 100644 --- a/fern/providers/observability/langfuse.mdx +++ b/fern/providers/observability/langfuse.mdx @@ -76,3 +76,39 @@ To make the most out of this integration, you can now use Langfuse's [evaluation + +## Enrich Traces +Vapi allows you to enrich Langfuse traces by integrating [Metadata](https://langfuse.com/docs/tracing-features/metadata) and [Tags](https://langfuse.com/docs/tracing-features/tags). + +By default, we will add the following values to the metadata of each trace: + +- `call.metadata` +- `assistant.metadata` +- `assistantOverrides.metadata` +- `assistantOverrides.variableValues` + +### Usage +You can enhance your observability in Langfuse by adding metadata and tags: + +**Metadata** + +Use the [`assistant.observabilityPlan.metadata`](https://docs.vapi.ai/api-reference/assistants/create#request.body.observabilityPlan.metadata) field to attach custom key-value pairs. + +Examples: +- Track experiment versions ("experiment": "v2.1") +- Store user segments ("user_type": "beta_tester") +- Log environment details ("env": "production") + +**Tags** + +Use the [`assistant.observabilityPlan.tags`](https://docs.vapi.ai/api-reference/assistants/create#request.body.observabilityPlan.tags) field to add searchable labels. + +Examples: +- Mark important runs ("priority") +- Group related sessions ("onboarding", "A/B_test") +- Filter by feature ("voice_assistant") + +Adding metadata and tags makes it easier to filter, analyze, and monitor your assistants activity in Langfuse. + +### Example +![Langfuse Metadata Example](../../static/images/providers/langfuse-example.png) \ No newline at end of file diff --git a/fern/sdk/mcp-server.mdx b/fern/sdk/mcp-server.mdx index aada3e8a4..e72e8a587 100644 --- a/fern/sdk/mcp-server.mdx +++ b/fern/sdk/mcp-server.mdx @@ -163,16 +163,43 @@ async function main() { }); // Create SSE transport for connection to remote Vapi MCP server - const transport = new SSEClientTransport({ - url: 'https://mcp.vapi.ai/sse', - headers: { - 'Authorization': `Bearer ${process.env.VAPI_TOKEN}` - } - }); + const serverUrl = 'https://mcp.vapi.ai/sse'; + const headers = { + Authorization: `Bearer ${process.env.VAPI_TOKEN}`, + }; + const options = { + requestInit: { headers: headers }, + eventSourceInit: { + fetch: (url, init) => { + return fetch(url, { + ...(init || {}), + headers: { + ...(init?.headers || {}), + ...headers, + }, + }); + }, + }, + }; + const transport = new SSEClientTransport(new URL(serverUrl), options); console.log('Connecting to Vapi MCP server via SSE...'); await mcpClient.connect(transport); console.log('Connected successfully'); + + // Helper function to parse tool responses + function parseToolResponse(response) { + if (!response?.content) return response; + const textItem = response.content.find(item => item.type === 'text'); + if (textItem?.text) { + try { + return JSON.parse(textItem.text); + } catch { + return textItem.text; + } + } + return response; + } try { // List available tools @@ -189,7 +216,7 @@ async function main() { arguments: {}, }); - const assistants = assistantsResponse.content; + const assistants = parseToolResponse(assistantsResponse); if (!(Array.isArray(assistants) && assistants.length > 0)) { console.log('No assistants found. Please create an assistant in the Vapi dashboard first.'); return; @@ -207,7 +234,7 @@ async function main() { arguments: {}, }); - const phoneNumbers = phoneNumbersResponse.content; + const phoneNumbers = parseToolResponse(phoneNumbersResponse); if (!(Array.isArray(phoneNumbers) && phoneNumbers.length > 0)) { console.log('No phone numbers found. Please add a phone number in the Vapi dashboard first.'); return; @@ -236,7 +263,8 @@ async function main() { }, }); - console.log('Call created:', JSON.stringify(createCallResponse.content, null, 2)); + const createdCall = parseToolResponse(createCallResponse); + console.log('Call created:', JSON.stringify(createdCall, null, 2)); } finally { console.log('\nDisconnecting from server...'); await mcpClient.close(); diff --git a/fern/squads/silent-transfers.mdx b/fern/squads/silent-transfers.mdx index 6d28c4931..e589f2980 100644 --- a/fern/squads/silent-transfers.mdx +++ b/fern/squads/silent-transfers.mdx @@ -8,9 +8,13 @@ slug: squads/silent-transfers If you want to allow your call flow to move seamlessly from one assistant to another _without_ the caller hearing `Please hold while we transfer you` here’s what to do: 1. **Update the Destination Assistant’s First Message** - - Set the assistant’s `firstMessage` to an _empty string_. - - Make sure the `firstMessageMode` is set to `assistant-speaks-first-with-model-generated-message`. -2. **Trigger the Transfer from the Source Assistant** + - Set the assistant's `firstMessage` to an _empty string_. + - Set the assistant's `firstMessageMode` to `assistant-speaks-first-with-model-generated-message`. + +2. **Update the Squad's assistant destinations messages** + - For every `members[*].assistantDestinations[*]`, set the `message` property to an _empty string_. + +3. **Trigger the Transfer from the Source Assistant** - In that assistant’s prompt, include a line instructing it to transfer to the desired assistant: @@ -20,7 +24,7 @@ If you want to allow your call flow to move seamlessly from one assistant to ano - Replace `'assistantName'` with the exact name of the next assistant. -3. **Direct the Destination Assistant’s Behavior** +4. **Direct the Destination Assistant’s Behavior** - In that assistant’s prompt, include a line instructing it to _`Proceed directly to the Task section without any greetings or small talk.`_ - This ensures there’s no awkward greeting or “Hello!” when the next assistant begins speaking. @@ -38,6 +42,10 @@ Below are the key JSON examples you’ll need. These show how to structure your ### **HP Payment Squad With SubAgent** + + Make sure the `members[*].assistantDestinations[*].message` properties are set to an _empty string_. + + ```json { "members": [ @@ -97,6 +105,7 @@ Below are the key JSON examples you’ll need. These show how to structure your "temperature": 0.3 }, "firstMessage": "", + "firstMessageMode": "assistant-speaks-first-with-model-generated-message", "transcriber": { "model": "nova-2", "language": "en", diff --git a/fern/static/images/providers/langfuse-example.png b/fern/static/images/providers/langfuse-example.png new file mode 100644 index 000000000..28a488376 Binary files /dev/null and b/fern/static/images/providers/langfuse-example.png differ diff --git a/fern/static/images/workflows/logic-condition.png b/fern/static/images/workflows/logic-condition.png new file mode 100644 index 000000000..224100b57 Binary files /dev/null and b/fern/static/images/workflows/logic-condition.png differ diff --git a/fern/tools/default-tools.mdx b/fern/tools/default-tools.mdx index 1bfce8080..3d98b7720 100644 --- a/fern/tools/default-tools.mdx +++ b/fern/tools/default-tools.mdx @@ -62,9 +62,9 @@ This function is provided when `endCall` is included in the assistant's list of } ``` -#### Dial Keypad (DTMF) +#### Send Text -This function is provided when `dtmf` is included in the assistant's list of available tools (see configuration options [here](/api-reference/assistants/create#request.body.model.openai.tools.dtmf)). The assistant will be able to enter digits on the keypad. +This function is provided when `sms` is included in the assistant's list of available tool (see configuration options [here](/api-reference/assistants/create#request.body.model.openai.tools.sms)). The assistant can use this function to send SMS messages using a configured Twilio account. ```json { @@ -74,21 +74,24 @@ This function is provided when `dtmf` is included in the assistant's list of ava "messages": [ { "role": "system", - "content": "You are an assistant at a law firm. When you hit a menu, use the dtmf function to enter the digits." + "content": "You are an assistant. When the user asks you to send a text message, use the sms function." } ], "tools": [ { - "type": "dtmf" + "type": "sms", + "metadata": { + "from": "+15551234567" + } } ] } } ``` -#### Send Text +#### Dial Keypad (DTMF) -This function is provided when `sms` is included in the assistant’s list of available tool (see configuration options [here](/api-reference/assistants/create#request.body.model.openai.tools.sms)). The assistant can use this function to send SMS messages using a configured Twilio account. +This function is provided when `dtmf` is included in the assistant's list of available tools (see configuration options [here](/api-reference/assistants/create#request.body.model.openai.tools.dtmf)). The assistant will be able to enter digits on the keypad. ```json { @@ -98,21 +101,60 @@ This function is provided when `sms` is included in the assistant’s list of av "messages": [ { "role": "system", - "content": "You are an assistant. When the user asks you to send a text message, use the sms function." + "content": "You are an assistant at a law firm. When you hit a menu, use the dtmf function to enter the digits." } ], "tools": [ { - "type": "sms", - "metadata": { - "from": "+15551234567" - } + "type": "dtmf" } ] } } ``` + +There are three methods for sending DTMF in a phone call: + +1. **In-band DTMF**: DTMF tones are transmitted as part of the regular audio stream. This is the simplest method, but it can suffer from quality issues if the audio stream is compressed or degraded. +2. **Out-of-band DTMF via RFC 2833**: This method sends DTMF tones separately from the audio stream, within RTP (Real-Time Protocol) packets. It's typically more reliable than in-band DTMF, particularly for VoIP applications where the audio stream might be compressed. RFC 2833 is the standard that initially defined this method. It is now replaced by RFC 4733 but this method is still referred by RFC 2833. +3. **Out-of-band DTMF via SIP INFO messages**: In this approach, DTMF tones are sent as separate SIP INFO messages. While this can be more reliable than in-band DTMF, it's not as widely supported as the RFC 2833 method. + +As of writing, Vapi's DTMF tool uses in-band DTMF. Please note that this method may not work with certain IVRs. If you are running into this issue, the recommended approach is to have your assistant say the options out loud if available. For example, when an IVR says "Press 1 or say Sales for the Sales department," prefer having the assistant say "Sales." + + +#### Send Text + +1. **In-band**: tones are transmitted as part of the regular audio stream. This is the simplest method, but it can suffer from quality issues if the audio stream is compressed or degraded. +2. **Out-of-band via RFC 2833**: tones are transmitted separately from the audio stream, within RTP (Real-Time Protocol) packets. It's typically more reliable than in-band DTMF, particularly for VoIP applications where the audio stream might be compressed. RFC 2833 is the standard that initially defined this method. It is now replaced by RFC 4733 but this method is still referred by RFC 2833. +3. **Out-of-band via SIP INFO messages**: tones are sent as separate SIP INFO messages. While this can be more reliable than in-band DTMF, it's not as widely supported as the RFC 2833 method. + + +Vapi's DTMF tool uses in-band method. Please note that this method may not work with certain IVRs. If you are running into this issue, the recommended approach is to have your assistant say the options out loud if available. For example, when an IVR says "Press 1 or say Sales for the Sales department," prefer having the assistant say "Sales." + + +##### Tool Effectiveness + +To evaluate this tool, we set up a Vapi assistant with the DTMF tool enabled and conducted calls to a range of IVR systems, including a Twilio IVR (configured via Studio Flows) and several third-party IVRs such as pharmacies and insurance companies. + +**Testing Methodology** + +We called and navigated through the IVRs using three different strategies: + +1. **Direct Dialpad**: calling from a personal phone and dialing options using the dialpad. +2. **Vapi DTMF Tool**: an assistant configured with the DTMF tool. +3. **Manual DTMF Sound**: calling from a personal phone and playing DTMF tones generated by software. _(similar approach as the Vapi DTMF Tool)_ + +**Key Findings** + +- The assistant successfully navigated some of the third-party IVRs. +- The assistant encountered issues with Twilio IVRs, likely due to Twilio’s preference for RFC 2833. +- Observed occasional delays in DTMF tone transmission, which may affect effectiveness with IVRs that have short timeouts. + +**Conclusion** + +The tool's effectiveness depends on the IVR system's configuration and DTMF capturing method. We are working to improve compatibility and reduce transmission delays for broader and more reliable support. + ### Custom Functions diff --git a/fern/workflows/edges/ai-conditions.mdx b/fern/workflows/edges/ai-conditions.mdx index 8c141b747..48474b481 100644 --- a/fern/workflows/edges/ai-conditions.mdx +++ b/fern/workflows/edges/ai-conditions.mdx @@ -1,32 +1,29 @@ --- title: AI Conditions -subtitle: Dynamic AI-driven branching in workflows +subtitle: Smart workflow branching powered by AI slug: /workflows/edges/ai-conditions --- ## Overview -The **AI Conditions** feature leverages artificial intelligence to determine the next step in your workflow based on conversation context. Unlike traditional logical conditions—which rely on explicit rules—AI Conditions allow your voice agent to evaluate complex or ambiguous scenarios, making branching decisions dynamically. +AI Conditions use artificial intelligence to decide the next step in your workflow based on the conversation. Instead of using fixed rules, they can understand complex situations and make smart decisions in real-time. ## How It Works -- **Contextual Evaluation:** The AI considers data from previous steps (e.g., user input, API responses) to gauge the conversation context. -- **Adaptive Decision-Making:** It uses its judgment to choose the most appropriate branch without relying solely on fixed comparisons. -- **Seamless Integration:** AI Conditions can complement existing logical conditions, offering a balance between predictable rules and adaptive behavior. +1. The AI looks at the conversation history and context +2. It makes a smart decision about which path to take, based on variables collected from Gather verbs and data returned from API requests. +3. Works alongside your existing rules for maximum flexibility -## Configuration -- **Activation:** Enable AI Conditions on a condition node where you want the AI to drive the branching logic. -- **Context Input:** The AI will utilize variables collected from Gather verbs and data returned from API requests. -- **Decision Logic:** No manual rules are required—the AI interprets context in real time to select the optimal branch. -- **Fallback:** You can combine AI Conditions with traditional logical conditions for added control. +## Configuration +- **Condition Node:** Start by inserting a condition node into your workflow. +- **Branch Setup:** Attach one or more nodes to the condition node. +- **AI Tag:** Click on the connecting edge and choose `AI` from the `Condition Type` dropdown +- **AI Condition** Use the input to define when the chosen branch should be taken. ## Usage -Deploy AI Conditions when your workflow requires flexibility and context-sensitive decision-making, such as: - -- Handling ambiguous or multi-faceted user responses. -- Addressing scenarios where strict rules may not capture the conversation's nuances. -- Enhancing the user experience by providing more natural, human-like interactions. - -For detailed configuration instructions and best practices, please refer to our dedicated documentation on AI-driven workflows. \ No newline at end of file +Use AI Conditions when you need: +- To handle unclear or complex user responses +- More flexibility than traditional rules can provide +- More natural, human-like conversations diff --git a/fern/workflows/edges/logical-conditions.mdx b/fern/workflows/edges/logical-conditions.mdx index ce1a96d0f..c352e3b65 100644 --- a/fern/workflows/edges/logical-conditions.mdx +++ b/fern/workflows/edges/logical-conditions.mdx @@ -12,10 +12,22 @@ Logical Conditions enable you to create branching paths within your workflow. Th - **Condition Node:** Start by inserting a condition node into your workflow. - **Branch Setup:** Attach one or more nodes to the condition node. -- **Logic Tag:** Click the "Logic" tag on each connecting edge to define rules or comparisons (e.g., equals, greater than) using variables collected from previous steps. +- **Logic Tag:** Click the "Logic" tag on each connecting edge and select `Logic` from the `Condition Type` dropdown. +- **Condition Type:** Choose between requiring ALL conditions to be met (AND logic) or ANY condition to be met (OR logic) +- **Logic Conditions** Use the panel to define one or more rules or comparisons (e.g., equals, greater than) using variables collected from previous steps. + + +To remove a comparison, click on the Trash icon to the right of the comparison. + + + + + ## Usage Implement Logical Conditions to guide your conversation dynamically. They allow your workflow to adjust its path based on real-time data, ensuring more personalized and responsive interactions. -For detailed configuration instructions and advanced usage, please refer to our dedicated documentation on condition configuration. \ No newline at end of file + +When [`Gathering`](/workflows/nodes/gather) string values that will be used in conditions, consider using `enum` types to ensure consistent value comparison. This helps prevent issues with case sensitivity, whitespace, or formatting differences that could affect condition evaluation. + \ No newline at end of file diff --git a/fern/workflows/nodes/assistant.mdx b/fern/workflows/nodes/assistant.mdx new file mode 100644 index 000000000..d55b3938a --- /dev/null +++ b/fern/workflows/nodes/assistant.mdx @@ -0,0 +1,23 @@ +--- +title: Assistant +subtitle: Speak to a configured assistant +slug: /workflows/nodes/assistant +--- + +## Overview + +The **Assistant** node enables a persistent conversation with one of your configured assistants. + +## Configuration + +- **Select an Assistant** Use the dropdown to select a pre-configured assistant. + +## Usage + +Add **Assistant** nodes as leaf nodes to enable ongoing conversations with your configured assistants. Currently, Assistant nodes must be placed at the end of a workflow branch, as they don't support transitioning to other nodes. This means the conversation with the assistant will continue until either the user ends the call or the assistant reaches a natural conclusion point. + +The assistant will use its configured system prompt while inheriting the transcriber and voice settings from the global workflow assistant. + + +Assistant nodes are currently designed as terminal nodes - they cannot be connected to other nodes in the workflow. This means the conversation will remain with the assistant until the call ends. Future updates will add support for AI-powered conditional branching. + \ No newline at end of file diff --git a/fern/workflows/nodes/gather.mdx b/fern/workflows/nodes/gather.mdx index cfc87d4ee..94d626788 100644 --- a/fern/workflows/nodes/gather.mdx +++ b/fern/workflows/nodes/gather.mdx @@ -20,4 +20,6 @@ Define one or more variables to gather from the user with: Use **Gather** to extract specific details from user responses—such as their name, email, or ZIP code—to inform subsequent steps in your conversation. The Gather node doesn't directly prompt users; instead, it analyzes the conversation history to find the requested information and will ask follow-up questions if the user's response isn't clear. Make sure to precede it with a [`Say`](/workflows/nodes/say) node that explicitly prompts the user for the information you want to gather. -To use an extracted string variable in a [`Conditional`](/workflows/edges/logical-conditions) branch, we recommend using the `enum` option. This ensures the extracted value will reliably match your conditions later in the workflow. \ No newline at end of file + +To use an extracted string variable in a [`Conditional`](/workflows/edges/logical-conditions) branch, we recommend using the `enum` option. This ensures the extracted value will reliably match your conditions later in the workflow. + \ No newline at end of file