Skip to content

Commit 90f5e0e

Browse files
committed
fixing deepresearch error
1 parent 64f1915 commit 90f5e0e

File tree

2 files changed

+30
-13
lines changed

2 files changed

+30
-13
lines changed

lib/AIAssistant/AIAssistant-Backend.js

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1142,9 +1142,9 @@ When user asks for something, generate creative, compact p5.js code that creates
11421142
// Console log the formatted prompt for debugging
11431143
console.log('🦙 Ollama Query (Creative):', chatPrompt);
11441144

1145-
// Ollama API call for creative coding with timeout
1146-
const controller = new AbortController();
1147-
const timeoutId = setTimeout(() => controller.abort(), 30000); // 30 second timeout for generation
1145+
// Ollama API call for creative coding with timeout
1146+
const controller = new AbortController();
1147+
const timeoutId = setTimeout(() => controller.abort(), 180000); // 3 minute timeout for generation
11481148

11491149
const response = await fetch(`${this.aiSession.baseURL}/api/generate`, {
11501150
method: 'POST',
@@ -1160,7 +1160,7 @@ When user asks for something, generate creative, compact p5.js code that creates
11601160
options: {
11611161
temperature: 0.7,
11621162
top_p: 0.9,
1163-
max_tokens: 1200,
1163+
num_predict: 1200,
11641164
stop: ['<|im_end|>']
11651165
}
11661166
})
@@ -1214,7 +1214,7 @@ When user asks for something, generate creative, compact p5.js code that creates
12141214
} catch (error) {
12151215
console.error('Ollama generation error:', error);
12161216
if (error.name === 'AbortError') {
1217-
throw new Error('Ollama generation timeout (>30s). The model may be too slow or overloaded. Try using a smaller model or check system resources.');
1217+
throw new Error('Ollama generation timeout (>3 minutes). The model may be too slow or overloaded. Try using a smaller model or check system resources.');
12181218
} else {
12191219
throw new Error(`Ollama generation failed: ${error.message}`);
12201220
}
@@ -1400,9 +1400,9 @@ ${userMessage}<|im_end|>
14001400
<|im_start|>assistant
14011401
`;
14021402

1403-
// Add timeout for generation
1404-
const controller = new AbortController();
1405-
const timeoutId = setTimeout(() => controller.abort(), 60000); // 1 minute timeout
1403+
// Add timeout for generation
1404+
const controller = new AbortController();
1405+
const timeoutId = setTimeout(() => controller.abort(), 180000); // 3 minute timeout
14061406

14071407
const response = await fetch(`${this.aiSession.baseURL}/api/generate`, {
14081408
method: 'POST',
@@ -1417,7 +1417,7 @@ ${userMessage}<|im_end|>
14171417
options: {
14181418
temperature: 0.7,
14191419
top_p: 0.9,
1420-
max_tokens: 2000,
1420+
num_predict: 2000,
14211421
stop: ['<|im_end|>']
14221422
}
14231423
})
@@ -1441,7 +1441,7 @@ ${userMessage}<|im_end|>
14411441
} catch (error) {
14421442
console.error('General content generation error:', error);
14431443
if (error.name === 'AbortError') {
1444-
throw new Error('Content generation timeout (>1 minute). The model may be too slow or overloaded. Try using a smaller/faster model like llama3.2:3b or check system resources.');
1444+
throw new Error('Content generation timeout (>3 minutes). The model may be too slow or overloaded. Try using a smaller/faster model like llama3.2:3b or check system resources.');
14451445
} else {
14461446
throw error;
14471447
}

lib/Pages/DeepResearch/deepresearch.js

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -673,14 +673,30 @@ class DeepResearchApp {
673673
// Generate research using AI
674674
const response = await this.aiAssistant.generateContent(prompt, 'research');
675675

676-
if (response && response.content) {
677-
this.researchResults.research = response.content;
676+
console.log('🔍 Generated response:', {
677+
type: typeof response,
678+
hasContent: !!response?.content,
679+
responseLength: typeof response === 'string' ? response.length : 'N/A',
680+
preview: typeof response === 'string' ? response.substring(0, 100) + '...' : response
681+
});
682+
683+
// Handle both string response and object response formats
684+
let content = '';
685+
if (typeof response === 'string' && response.trim()) {
686+
content = response.trim();
687+
} else if (response && response.content) {
688+
content = response.content;
689+
}
690+
691+
if (content) {
692+
this.researchResults.research = content;
678693
this.researchResults.sources = this.generateSources(hasDocuments);
679694
this.researchResults.notes = this.generateNotes(hasDocuments);
680695
this.researchResults.timestamp = new Date().toISOString();
681696
this.researchResults.hasDocuments = hasDocuments;
682697
this.researchResults.documentCount = hasDocuments ? this.vectorStore.collections.documents.size : 0;
683698

699+
console.log('✅ Research results stored, calling renderOutput()');
684700
this.renderOutput();
685701
this.saveToStorage();
686702

@@ -691,7 +707,8 @@ class DeepResearchApp {
691707
this.updateStatus('✅ Research generated successfully!');
692708
}
693709
} else {
694-
throw new Error('No content generated');
710+
console.error('❌ No valid content found in response:', response);
711+
throw new Error('No content generated - response was empty or invalid');
695712
}
696713
} catch (error) {
697714
console.error('Research generation failed:', error);

0 commit comments

Comments
 (0)