@@ -1210,6 +1210,99 @@ app.post('/api/chat', requireAuth, async (req, res) => {
12101210 }
12111211} ) ;
12121212
1213+ // Planner workflow generation endpoint
1214+ app . post ( '/api/planner/generate-from-context' , requireAuth , async ( req , res ) => {
1215+ try {
1216+ const { context } = req . body ;
1217+ if ( ! context ) {
1218+ return res . status ( 400 ) . json ( { error : 'Context is required' } ) ;
1219+ }
1220+
1221+ const connections = getUserConnections ( req . user . email ) ;
1222+
1223+ // Create a workflow generation prompt
1224+ const systemPrompt = `You are a workflow generation assistant. Based on the provided application context, generate a workflow graph with nodes and connections that would be useful for the user's current situation.
1225+
1226+ Return a JSON object with:
1227+ - title: A descriptive title for the workflow
1228+ - nodes: Array of node objects with id, type, position {x, y}, and data {label, description}
1229+ - edges: Array of edge objects with id, source, target
1230+
1231+ Available node types: module, assistant, task, tool, workflow, connector, source, model-provider
1232+
1233+ Focus on creating practical, actionable workflows that relate to the user's current context.` ;
1234+
1235+ const userPrompt = `Current Application Context:
1236+ - Active App: ${ context . activeApp }
1237+ - Active Module: ${ context . activeModule ? `${ context . activeModule . id } (${ context . activeModule [ 'Module Title' ] } )` : 'None' }
1238+ - Connected Services: ${ context . connectedServices ?. join ( ', ' ) || 'None' }
1239+ - Recent Chat History: ${ context . orchestratorHistory ?. map ( h => `${ h . role } : ${ h . parts [ 0 ] ?. text } ` ) . slice ( - 3 ) . join ( '\n' ) || 'No recent messages' }
1240+
1241+ Generate a workflow that would be helpful for this context.` ;
1242+
1243+ // Use the chat endpoint internally to generate the workflow
1244+ const chatResponse = await fetch ( `${ req . protocol } ://${ req . get ( 'host' ) } /api/chat` , {
1245+ method : 'POST' ,
1246+ headers : {
1247+ 'Content-Type' : 'application/json' ,
1248+ 'Cookie' : req . headers . cookie || ''
1249+ } ,
1250+ body : JSON . stringify ( {
1251+ model : 'gemini-2.5-flash' ,
1252+ messages : [
1253+ { role : 'system' , content : systemPrompt } ,
1254+ { role : 'user' , content : userPrompt }
1255+ ]
1256+ } )
1257+ } ) ;
1258+
1259+ if ( ! chatResponse . ok ) {
1260+ throw new Error ( 'Failed to generate workflow via AI' ) ;
1261+ }
1262+
1263+ const aiResponse = await chatResponse . text ( ) ;
1264+
1265+ // Try to extract JSON from the response
1266+ let workflowData ;
1267+ try {
1268+ const jsonMatch = aiResponse . match ( / ` ` ` j s o n \n ( [ \s \S ] * ?) \n ` ` ` / ) || aiResponse . match ( / \{ [ \s \S ] * \} / ) ;
1269+ if ( jsonMatch ) {
1270+ workflowData = JSON . parse ( jsonMatch [ 1 ] || jsonMatch [ 0 ] ) ;
1271+ } else {
1272+ throw new Error ( 'No JSON found in AI response' ) ;
1273+ }
1274+ } catch ( parseError ) {
1275+ // Fallback: create a simple default workflow
1276+ workflowData = {
1277+ title : `Workflow for ${ context . activeApp || 'Current Context' } ` ,
1278+ nodes : [
1279+ {
1280+ id : 'start-1' ,
1281+ type : 'default' ,
1282+ position : { x : 100 , y : 100 } ,
1283+ data : { label : 'Start' , description : 'Generated workflow starting point' }
1284+ } ,
1285+ {
1286+ id : 'context-1' ,
1287+ type : 'default' ,
1288+ position : { x : 300 , y : 100 } ,
1289+ data : { label : 'Current Context' , description : `Working with ${ context . activeApp } ` }
1290+ }
1291+ ] ,
1292+ edges : [
1293+ { id : 'e1-2' , source : 'start-1' , target : 'context-1' }
1294+ ]
1295+ } ;
1296+ }
1297+
1298+ res . json ( workflowData ) ;
1299+
1300+ } catch ( error ) {
1301+ logger . error ( 'Workflow generation failed:' , error ) ;
1302+ res . status ( 500 ) . json ( { error : 'Failed to generate workflow' } ) ;
1303+ }
1304+ } ) ;
1305+
12131306// Universal endpoint to get available models from all connected services
12141307app . get ( '/api/models' , requireAuth , async ( req , res ) => {
12151308 try {
@@ -1277,6 +1370,87 @@ app.get('/api/models', requireAuth, async (req, res) => {
12771370 }
12781371 }
12791372
1373+ // Hugging Face models (if connected)
1374+ if ( connections . huggingface ?. apiKey ) {
1375+ availableModels . push (
1376+ { id : 'microsoft/DialoGPT-medium' , name : 'DialoGPT Medium' , provider : 'Hugging Face' , category : 'text' , available : true } ,
1377+ { id : 'facebook/blenderbot-400M-distill' , name : 'BlenderBot 400M' , provider : 'Hugging Face' , category : 'text' , available : true } ,
1378+ { id : 'microsoft/CodeBERT-base' , name : 'CodeBERT Base' , provider : 'Hugging Face' , category : 'code' , available : true }
1379+ ) ;
1380+ }
1381+
1382+ // Replicate models (if connected)
1383+ if ( connections . replicate ?. apiKey ) {
1384+ availableModels . push (
1385+ { id : 'meta/llama-2-70b-chat' , name : 'Llama 2 70B Chat' , provider : 'Replicate' , category : 'text' , available : true } ,
1386+ { id : 'stability-ai/stable-diffusion' , name : 'Stable Diffusion' , provider : 'Replicate' , category : 'image' , available : true } ,
1387+ { id : 'replicate/musicgen' , name : 'MusicGen' , provider : 'Replicate' , category : 'audio' , available : true }
1388+ ) ;
1389+ }
1390+
1391+ // Together AI models (if connected)
1392+ if ( connections . together ?. apiKey ) {
1393+ availableModels . push (
1394+ { id : 'togethercomputer/llama-2-70b-chat' , name : 'Llama 2 70B Chat' , provider : 'Together AI' , category : 'text' , available : true } ,
1395+ { id : 'togethercomputer/falcon-40b-instruct' , name : 'Falcon 40B Instruct' , provider : 'Together AI' , category : 'text' , available : true } ,
1396+ { id : 'togethercomputer/RedPajama-INCITE-7B-Chat' , name : 'RedPajama 7B Chat' , provider : 'Together AI' , category : 'text' , available : true }
1397+ ) ;
1398+ }
1399+
1400+ // Mistral AI models (if connected)
1401+ if ( connections . mistral ?. apiKey ) {
1402+ availableModels . push (
1403+ { id : 'mistral-large-latest' , name : 'Mistral Large' , provider : 'Mistral AI' , category : 'text' , available : true } ,
1404+ { id : 'mistral-medium-latest' , name : 'Mistral Medium' , provider : 'Mistral AI' , category : 'text' , available : true } ,
1405+ { id : 'mistral-small-latest' , name : 'Mistral Small' , provider : 'Mistral AI' , category : 'text' , available : true }
1406+ ) ;
1407+ }
1408+
1409+ // Cohere models (if connected)
1410+ if ( connections . cohere ?. apiKey ) {
1411+ availableModels . push (
1412+ { id : 'command' , name : 'Command' , provider : 'Cohere' , category : 'text' , available : true } ,
1413+ { id : 'command-light' , name : 'Command Light' , provider : 'Cohere' , category : 'text' , available : true } ,
1414+ { id : 'summarize-xlarge' , name : 'Summarize XLarge' , provider : 'Cohere' , category : 'text' , available : true }
1415+ ) ;
1416+ }
1417+
1418+ // vLLM models (if connected)
1419+ if ( connections . vllm ?. url ) {
1420+ availableModels . push (
1421+ { id : 'vllm-hosted-model' , name : 'vLLM Hosted Model' , provider : 'vLLM' , category : 'text' , available : true }
1422+ ) ;
1423+ }
1424+
1425+ // LocalAI models (if connected)
1426+ if ( connections . localai ?. url ) {
1427+ availableModels . push (
1428+ { id : 'localai-model' , name : 'LocalAI Model' , provider : 'LocalAI' , category : 'text' , available : true }
1429+ ) ;
1430+ }
1431+
1432+ // Stability AI models (if connected)
1433+ if ( connections . stability ?. apiKey ) {
1434+ availableModels . push (
1435+ { id : 'stable-diffusion-xl-1024-v1-0' , name : 'SDXL 1.0' , provider : 'Stability AI' , category : 'image' , available : true } ,
1436+ { id : 'stable-diffusion-v1-6' , name : 'Stable Diffusion 1.6' , provider : 'Stability AI' , category : 'image' , available : true }
1437+ ) ;
1438+ }
1439+
1440+ // Midjourney models (if connected)
1441+ if ( connections . midjourney ?. apiKey ) {
1442+ availableModels . push (
1443+ { id : 'midjourney-v6' , name : 'Midjourney v6' , provider : 'Midjourney' , category : 'image' , available : true }
1444+ ) ;
1445+ }
1446+
1447+ // Runway models (if connected)
1448+ if ( connections . runway ?. apiKey ) {
1449+ availableModels . push (
1450+ { id : 'runway-gen3' , name : 'Runway Gen-3' , provider : 'Runway ML' , category : 'video' , available : true }
1451+ ) ;
1452+ }
1453+
12801454 res . json ( { models : availableModels } ) ;
12811455 } catch ( error ) {
12821456 logger . error ( 'Error fetching available models:' , { errorMessage : error . message } ) ;
0 commit comments