- 
                Notifications
    You must be signed in to change notification settings 
- Fork 5.5k
New Components - vapi #15469
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
          
     Merged
      
      
    
  
     Merged
                    New Components - vapi #15469
Changes from all commits
      Commits
    
    
            Show all changes
          
          
            6 commits
          
        
        Select commit
          Hold shift + click to select a range
      
      
    File filter
Filter by extension
Conversations
          Failed to load comments.   
        
        
          
      Loading
        
  Jump to
        
          Jump to file
        
      
      
          Failed to load files.   
        
        
          
      Loading
        
  Diff view
Diff view
There are no files selected for viewing
  
    
      This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
      Learn more about bidirectional Unicode characters
    
  
  
    
              | Original file line number | Diff line number | Diff line change | 
|---|---|---|
| @@ -0,0 +1,63 @@ | ||
| import { ConfigurationError } from "@pipedream/platform"; | ||
| import vapi from "../../vapi.app.mjs"; | ||
|  | ||
| export default { | ||
| key: "vapi-create-call", | ||
| name: "Create Call", | ||
| description: "Starts a new conversation with an assistant. [See the documentation](https://docs.vapi.ai/api-reference/calls/create)", | ||
| version: "0.0.1", | ||
| type: "action", | ||
| props: { | ||
| vapi, | ||
| name: { | ||
| type: "string", | ||
| label: "Conversation Name", | ||
| description: "Name of the new conversation", | ||
| optional: true, | ||
| }, | ||
| assistantId: { | ||
| propDefinition: [ | ||
| vapi, | ||
| "assistantId", | ||
| ], | ||
| optional: true, | ||
| }, | ||
| squadId: { | ||
| propDefinition: [ | ||
| vapi, | ||
| "squadId", | ||
| ], | ||
| optional: true, | ||
| }, | ||
| phoneNumberId: { | ||
| propDefinition: [ | ||
| vapi, | ||
| "phoneNumberId", | ||
| ], | ||
| }, | ||
| customerId: { | ||
| type: "string", | ||
| label: "Customer ID", | ||
| description: "ID of the customer for the conversation", | ||
| optional: true, | ||
| }, | ||
| }, | ||
| async run({ $ }) { | ||
| if (!this.assistantId && !this.squadId) { | ||
| throw new ConfigurationError("Specify either `Assistant Id` or `Squad Id`"); | ||
| } | ||
|  | ||
| const response = await this.vapi.startConversation({ | ||
| $, | ||
| data: { | ||
| assistantId: this.assistantId, | ||
| squadId: this.squadId, | ||
| phoneNumberId: this.phoneNumberId, | ||
| name: this.name, | ||
| customerId: this.customerId, | ||
| }, | ||
| }); | ||
| $.export("$summary", `Conversation created with ID ${response.id}`); | ||
| return response; | ||
| }, | ||
| }; | 
        
          
          
            265 changes: 265 additions & 0 deletions
          
          265 
        
  components/vapi/actions/update-assistant-settings/update-assistant-settings.mjs
  
  
      
      
   
        
      
      
    
  
    
      This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
      Learn more about bidirectional Unicode characters
    
  
  
    
              | Original file line number | Diff line number | Diff line change | 
|---|---|---|
| @@ -0,0 +1,265 @@ | ||
| import { | ||
| BACKGROUND_SOUND, | ||
| CLIENT_MESSAGE_OPTIONS, | ||
| FIRST_MESSAGE_MODE_OPTIONS, | ||
| SERVER_MESSAGE_OPTIONS, | ||
| } from "../../common/constants.mjs"; | ||
| import { | ||
| clearObj, | ||
| parseObject, | ||
| } from "../../common/utils.mjs"; | ||
| import vapi from "../../vapi.app.mjs"; | ||
|  | ||
| export default { | ||
| key: "vapi-update-assistant-settings", | ||
| name: "Update Assistant Settings", | ||
| description: "Updates the configuration settings for a specific assistant. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update)", | ||
| version: "0.0.1", | ||
| type: "action", | ||
| props: { | ||
| vapi, | ||
| assistantId: { | ||
| propDefinition: [ | ||
| vapi, | ||
| "assistantId", | ||
| ], | ||
| }, | ||
| transcriber: { | ||
| type: "object", | ||
| label: "Transcriber", | ||
| description: "A formatted JSON object for the assistant's transcriber. **Example: { \"provider\": \"talkscriber\", \"language\": \"en\", \"model\": \"whisper\" }**. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update) for further details", | ||
| optional: true, | ||
| }, | ||
| model: { | ||
| type: "object", | ||
| label: "Model", | ||
| description: "A formatted JSON object for the assistant's LLM. **Example: {\"provider\": \"xai\", \"model\": \"grok-beta\", \"emotionRecognitionEnabled\": true, \"knowledgeBase\": {\"server\": {\"url\": \"url\", \"timeoutSeconds\": 20}}, \"knowledgeBaseId\": \"model\", \"maxTokens\": 1.1, \"messages\": [{\"role\": \"assistant\"}], \"numFastTurns\": 1.1, \"temperature\": 1.1, \"toolIds\": [\"model\"], \"tools\": [{\"type\": \"transferCall\", \"async\": false}]}**. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update) for further details", | ||
| optional: true, | ||
| }, | ||
| voice: { | ||
| type: "object", | ||
| label: "Voice", | ||
| description: "A formatted JSON object for the assistant's voice. **Example: {\"provider\":\"tavus\",\"voiceId\":\"r52da2535a\",\"callbackUrl\":\"voice\",\"chunkPlan\":{\"enabled\":true,\"minCharacters\":30,\"punctuationBoundaries\":[\"。\",\",\",\".\",\"!\",\"?\",\";\",\"،\",\",\",\"।\",\"॥\",\"|\",\"||\",\",\",\":\"],\"formatPlan\":{\"enabled\":true,\"numberToDigitsCutoff\":2025}},\"conversationName\":\"voice\",\"conversationalContext\":\"voice\",\"customGreeting\":\"voice\",\"fallbackPlan\":{\"voices\":[{\"provider\":\"tavus\",\"voiceId\":\"r52da2535a\"}]},\"personaId\":\"voice\",\"properties\":{\"maxCallDuration\":1.1,\"participantLeftTimeout\":1.1,\"participantAbsentTimeout\":1.1,\"enableRecording\":true,\"enableTranscription\":true,\"applyGreenscreen\":true,\"language\":\"language\",\"recordingS3BucketName\":\"recordingS3BucketName\",\"recordingS3BucketRegion\":\"recordingS3BucketRegion\",\"awsAssumeRoleArn\":\"awsAssumeRoleArn\"}}**. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update) for further details", | ||
| optional: true, | ||
| }, | ||
| firstMessage: { | ||
| type: "string", | ||
| label: "First Message", | ||
| description: "The first message the assistant will say or a URL to an audio file. If unspecified, assistant will wait for user to speak and use the model to respond once they speak.", | ||
| optional: true, | ||
| }, | ||
| firstMessageMode: { | ||
| type: "string", | ||
| label: "First Message Mode", | ||
| description: "Mode for the first message", | ||
| optional: true, | ||
| options: FIRST_MESSAGE_MODE_OPTIONS, | ||
| }, | ||
| hipaaEnabled: { | ||
| type: "boolean", | ||
| label: "HIPAA Enabled", | ||
| description: "When this is enabled, no logs, recordings, or transcriptions will be stored. At the end of the call, you will still receive an end-of-call-report message to store on your server.", | ||
| optional: true, | ||
| }, | ||
| clientMessages: { | ||
| type: "string[]", | ||
| label: "Client Messages", | ||
| description: "These are the messages that will be sent to your Client SDKs", | ||
| options: CLIENT_MESSAGE_OPTIONS, | ||
| optional: true, | ||
| }, | ||
| serverMessages: { | ||
| type: "string[]", | ||
| label: "Server Messages", | ||
| description: "These are the messages that will be sent to your Server URL", | ||
| options: SERVER_MESSAGE_OPTIONS, | ||
| optional: true, | ||
| }, | ||
| silenceTimeoutSeconds: { | ||
| type: "integer", | ||
| label: "Silence Timeout Seconds", | ||
| description: "How many seconds of silence to wait before ending the call.", | ||
| optional: true, | ||
| default: 30, | ||
| min: 10, | ||
| max: 3600, | ||
| }, | ||
| maxDurationSeconds: { | ||
| type: "integer", | ||
| label: "Max Duration Seconds", | ||
| description: "This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended.", | ||
| optional: true, | ||
| default: 600, | ||
| min: 10, | ||
| max: 43200, | ||
| }, | ||
| backgroundSound: { | ||
| type: "string", | ||
| label: "Background Sound", | ||
| description: "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.", | ||
| optional: true, | ||
| options: BACKGROUND_SOUND, | ||
| }, | ||
| backgroundDenoisingEnabled: { | ||
| type: "boolean", | ||
| label: "Background Denoising Enabled", | ||
| description: "This enables filtering of noise and background speech while the user is talking. Default false while in beta.", | ||
| optional: true, | ||
| }, | ||
| modelOutputInMessagesEnabled: { | ||
| type: "boolean", | ||
| label: "Model Output in Messages Enabled", | ||
| description: "This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech. Default false while in beta.", | ||
| optional: true, | ||
| }, | ||
| transportConfigurations: { | ||
| type: "string[]", | ||
| label: "Transport Configurations", | ||
| description: "These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used. **Example: [{\"provider\":\"twilio\",\"timeout\":60,\"record\":false,\"recordingChannels\":\"mono\"}]**. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update) for further details", | ||
| optional: true, | ||
| }, | ||
| credentials: { | ||
| type: "string[]", | ||
| label: "Credentials", | ||
| description: "These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials. **Example: [{\"provider\":\"xai\",\"apiKey\":\"credentials\",\"name\":\"credentials\"}]**. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update) for further details", | ||
| optional: true, | ||
| }, | ||
| name: { | ||
| type: "string", | ||
| label: "Name", | ||
| description: "Name of the assistant. This is required when you want to transfer between assistants in a call.", | ||
| optional: true, | ||
| }, | ||
| voicemailDetection: { | ||
| type: "object", | ||
| label: "Voicemail Detection", | ||
| description: "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool]. This uses Twilio's built-in detection while the VoicemailTool relies on the model to detect if a voicemail was reached. You can use neither of them, one of them, or both of them. By default, Twilio built-in detection is enabled while VoicemailTool is not. **Example: {\"provider\":\"twilio\",\"voicemailDetectionTypes\":[\"machine_end_beep\",\"machine_end_silence\"],\"enabled\":true,\"machineDetectionTimeout\":1.1,\"machineDetectionSpeechThreshold\":1.1,\"machineDetectionSpeechEndThreshold\":1.1,\"machineDetectionSilenceTimeout\":1.1}**. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update) for further details", | ||
| optional: true, | ||
| }, | ||
| voicemailMessage: { | ||
| type: "string", | ||
| label: "Voicemail Message", | ||
| description: "This is the message that the assistant will say if the call is forwarded to voicemail. If unspecified, it will hang up", | ||
| optional: true, | ||
| }, | ||
| endCallMessage: { | ||
| type: "string", | ||
| label: "End Call Message", | ||
| description: "This is the message that the assistant will say if it ends the call. If unspecified, it will hang up without saying anything", | ||
| optional: true, | ||
| }, | ||
| endCallPhrases: { | ||
| type: "string[]", | ||
| label: "End Call Phrases", | ||
| description: "A list containing phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive.", | ||
| optional: true, | ||
| }, | ||
| metadata: { | ||
| type: "object", | ||
| label: "Metadata", | ||
| description: "This is for metadata you want to store on the assistant.", | ||
| optional: true, | ||
| }, | ||
| analysisPlan: { | ||
| type: "object", | ||
| label: "Analysis Plan", | ||
| description: "This is the plan for analysis of assistant's calls. Stored in `call.analysis`. **Example: {\"summaryPlan\":{\"messages\":[{\"key\":\"value\"}],\"enabled\":true,\"timeoutSeconds\":1.1},\"structuredDataPlan\":{\"messages\":[{\"key\":\"value\"}],\"enabled\":true,\"schema\":{\"type\":\"string\"},\"timeoutSeconds\":1.1},\"successEvaluationPlan\":{\"rubric\":\"NumericScale\",\"messages\":[{\"key\":\"value\"}],\"enabled\":true,\"timeoutSeconds\":1.1}}**. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update) for further details", | ||
| optional: true, | ||
| }, | ||
| artifactPlan: { | ||
| type: "object", | ||
| label: "Artifact Plan", | ||
| description: "This is the plan for artifacts generated during assistant's calls. Stored in call.artifact. **Note:** `recordingEnabled` is currently at the root level. It will be moved to `artifactPlan` in the future, but will remain backwards compatible. **Example: {\"recordingEnabled\":true,\"videoRecordingEnabled\":false,\"transcriptPlan\":{\"enabled\":true,\"assistantName\":\"assistantName\",\"userName\":\"userName\"},\"recordingPath\":\"recordingPath\"}**. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update) for further details", | ||
| optional: true, | ||
| }, | ||
| messagePlan: { | ||
| type: "object", | ||
| label: "Message Plan", | ||
| description: "This is the plan for static predefined messages that can be spoken by the assistant during the call, like idleMessages. **Note:** `firstMessage`, `voicemailMessage`, and `endCallMessage` are currently at the root level. They will be moved to `messagePlan` in the future, but will remain backwards compatible. **Example: {\"idleMessages\":[\"idleMessages\"],\"idleMessageMaxSpokenCount\":1.1,\"idleTimeoutSeconds\":1.1}**. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update) for further details", | ||
| optional: true, | ||
| }, | ||
| startSpeakingPlan: { | ||
| type: "object", | ||
| label: "Start Speaking Plan", | ||
| description: "This is the plan for when the assistant should start talking. **Example: {\"waitSeconds\":0.4,\"smartEndpointingEnabled\":false,\"customEndpointingRules\":[{\"type\":\"both\",\"assistantRegex\":\"customEndpointingRules\",\"customerRegex\":\"customEndpointingRules\",\"timeoutSeconds\":1.1}],\"transcriptionEndpointingPlan\":{\"onPunctuationSeconds\":0.1,\"onNoPunctuationSeconds\":1.5,\"onNumberSeconds\":0.5}}**. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update) for further details", | ||
| optional: true, | ||
| }, | ||
| stopSpeakingPlan: { | ||
| type: "object", | ||
| label: "Stop Speaking Plan", | ||
| description: "This is the plan for when assistant should stop talking on customer interruption. **Example: {\"numWords\":0,\"voiceSeconds\":0.2,\"backoffSeconds\":1}**. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update) for further details", | ||
| optional: true, | ||
| }, | ||
| monitorPlan: { | ||
| type: "object", | ||
| label: "Monitor Plan", | ||
| description: "This is the plan for real-time monitoring of the assistant's calls. **Note:** `serverMessages`, `clientMessages`, `serverUrl` and `serverUrlSecret` are currently at the root level but will be moved to `monitorPlan` in the future. Will remain backwards compatible. **Example: {\"listenEnabled\":false,\"controlEnabled\":false}**. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update) for further details", | ||
| optional: true, | ||
| }, | ||
| credentialIds: { | ||
| type: "string[]", | ||
| label: "Credential IDs", | ||
| description: "These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this.", | ||
| optional: true, | ||
| }, | ||
| server: { | ||
| type: "object", | ||
| label: "Server", | ||
| description: "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema. **Example: {\"url\":\"url\",\"timeoutSeconds\":20,\"secret\":\"secret\",\"headers\":{\"key\":\"value\"}}**. [See the documentation](https://docs.vapi.ai/api-reference/assistants/update) for further details", | ||
| optional: true, | ||
| }, | ||
| }, | ||
| async run({ $ }) { | ||
| const { | ||
| vapi, | ||
| assistantId, | ||
| transcriber, | ||
| model, | ||
| voice, | ||
| clientMessages, | ||
| serverMessages, | ||
| transportConfigurations, | ||
| credentials, | ||
| voicemailDetection, | ||
| endCallPhrases, | ||
| metadata, | ||
| analysisPlan, | ||
| artifactPlan, | ||
| messagePlan, | ||
| startSpeakingPlan, | ||
| stopSpeakingPlan, | ||
| monitorPlan, | ||
| credentialIds, | ||
| server, | ||
| ...data | ||
| } = this; | ||
|  | ||
| const response = await vapi.updateAssistant({ | ||
| $, | ||
| assistantId, | ||
| data: clearObj({ | ||
| ...data, | ||
| transcriber: parseObject(transcriber), | ||
| model: parseObject(model), | ||
| voice: parseObject(voice), | ||
| clientMessages: parseObject(clientMessages), | ||
| serverMessages: parseObject(serverMessages), | ||
| transportConfigurations: parseObject(transportConfigurations), | ||
| credentials: parseObject(credentials), | ||
| voicemailDetection: parseObject(voicemailDetection), | ||
| endCallPhrases: parseObject(endCallPhrases), | ||
| metadata: parseObject(metadata), | ||
| analysisPlan: parseObject(analysisPlan), | ||
| artifactPlan: parseObject(artifactPlan), | ||
| messagePlan: parseObject(messagePlan), | ||
| startSpeakingPlan: parseObject(startSpeakingPlan), | ||
| stopSpeakingPlan: parseObject(stopSpeakingPlan), | ||
| monitorPlan: parseObject(monitorPlan), | ||
| credentialIds: parseObject(credentialIds), | ||
| server: parseObject(server), | ||
| }), | ||
| }); | ||
| $.export("$summary", `Updated assistant ${this.assistantId} successfully`); | ||
| return response; | ||
| }, | ||
| }; | ||
  
    
      This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
      Learn more about bidirectional Unicode characters
    
  
  
    
              | Original file line number | Diff line number | Diff line change | 
|---|---|---|
| @@ -0,0 +1,34 @@ | ||
| import FormData from "form-data"; | ||
| import fs from "fs"; | ||
| import { checkTmp } from "../../common/utils.mjs"; | ||
| import vapi from "../../vapi.app.mjs"; | ||
|  | ||
| export default { | ||
| key: "vapi-upload-file", | ||
| name: "Upload File", | ||
| description: "Uploads a new file. [See the documentation](https://docs.vapi.ai/api-reference)", | ||
| version: "0.0.1", | ||
| type: "action", | ||
| props: { | ||
| vapi, | ||
| file: { | ||
| type: "string", | ||
| label: "File", | ||
| description: "The path to the file saved to the `/tmp` directory (e.g. `/tmp/example.txt`). [See the documentation](https://pipedream.com/docs/workflows/steps/code/nodejs/working-with-files/#the-tmp-directory).", | ||
| }, | ||
| }, | ||
| async run({ $ }) { | ||
| const formData = new FormData(); | ||
| const filePath = checkTmp(this.file); | ||
|  | ||
| formData.append("file", fs.createReadStream(filePath)); | ||
|  | ||
| const response = await this.vapi.uploadFile({ | ||
| $, | ||
| data: formData, | ||
| headers: formData.getHeaders(), | ||
| }); | ||
| $.export("$summary", `File uploaded successfully with ID ${response.id} and status ${response.status}`); | ||
| return response; | ||
| }, | ||
|         
                  GTFalcao marked this conversation as resolved.
              Show resolved
            Hide resolved | ||
| }; | ||
      
      Oops, something went wrong.
        
    
  
      
      Oops, something went wrong.
        
    
  
  Add this suggestion to a batch that can be applied as a single commit.
  This suggestion is invalid because no changes were made to the code.
  Suggestions cannot be applied while the pull request is closed.
  Suggestions cannot be applied while viewing a subset of changes.
  Only one suggestion per line can be applied in a batch.
  Add this suggestion to a batch that can be applied as a single commit.
  Applying suggestions on deleted lines is not supported.
  You must change the existing code in this line in order to create a valid suggestion.
  Outdated suggestions cannot be applied.
  This suggestion has been applied or marked resolved.
  Suggestions cannot be applied from pending reviews.
  Suggestions cannot be applied on multi-line comments.
  Suggestions cannot be applied while the pull request is queued to merge.
  Suggestion cannot be applied right now. Please check back later.
  
    
  
    
Uh oh!
There was an error while loading. Please reload this page.