-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcli-app-commonjs-fixed.cjs
More file actions
255 lines (218 loc) · 8.28 KB
/
cli-app-commonjs-fixed.cjs
File metadata and controls
255 lines (218 loc) · 8.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
const fs = require('fs');
const path = require('path');
const crypto = require('crypto');
const readline = require('readline');
const { Pinecone } = require('@pinecone-database/pinecone');
// Configuration
const PINECONE_API_KEY = 'pcsk_52AbGy_CDjDs6zTiABmKYor3xnxxh6qkYQFQEwsNmg9XCQwprpfv4NCWmk6TDnJ3jTECE4';
const PINECONE_INDEX = 'code-embedddings'; // Note the three d's as per test files
const OLLAMA_URL = 'http://localhost:11434/api/generate';
const OLLAMA_MODEL = 'deepseek-r1:8b';
// Initialize the CLI interface
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
class CodeAIAssistant {
constructor() {
this.pineconeClient = null;
this.pineconeIndex = null;
this.isInitialized = false;
// We won't use the embedding model from this code to avoid issues
}
async initialize() {
console.log('Initializing Code AI Assistant...');
try {
// Initialize Pinecone
console.log('Connecting to Pinecone...');
this.pineconeClient = new Pinecone({
apiKey: PINECONE_API_KEY,
});
// Get index
try {
const indexes = await this.pineconeClient.listIndexes();
console.log('Available indexes:', JSON.stringify(indexes, null, 2));
const indexExists = indexes.indexes?.some(idx => idx.name === PINECONE_INDEX);
if (!indexExists) {
console.log(`Warning: Pinecone index '${PINECONE_INDEX}' not found. Please create it in the Pinecone dashboard with dimension 768 (CodeT5)`);
console.log('Will attempt to use the index anyway');
} else {
console.log(`Connected to Pinecone index '${PINECONE_INDEX}'`);
}
this.pineconeIndex = this.pineconeClient.index(PINECONE_INDEX);
const stats = await this.pineconeIndex.describeIndexStats();
console.log('Index statistics:', JSON.stringify(stats, null, 2));
console.log(`Total vectors in index: ${stats.totalRecordCount}`);
} catch (error) {
console.error(`Error with Pinecone index: ${error.message}`);
throw error;
}
this.isInitialized = true;
console.log('Initialization complete!');
} catch (error) {
console.error('Failed to initialize:', error);
throw error;
}
}
async testOllama() {
try {
console.log('Testing Ollama connection...');
const fetch = (...args) => import('node-fetch').then(({default: fetch}) => fetch(...args));
const response = await fetch(OLLAMA_URL, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: OLLAMA_MODEL,
prompt: 'Say hello and introduce yourself in one sentence',
stream: false
}),
});
if (!response.ok) {
throw new Error(`HTTP error! Status: ${response.status}`);
}
const data = await response.json();
console.log('Ollama response:', data.response);
return true;
} catch (error) {
console.error('Failed to connect to Ollama:', error);
console.log('Make sure Ollama is running and the deepseek-r1:8b model is available');
console.log('Install Ollama from https://ollama.ai/download');
console.log('Then run: ollama pull deepseek-r1:8b');
return false;
}
}
async querySimilarCode(code, question) {
if (!this.isInitialized) {
throw new Error('AI Assistant not initialized');
}
try {
// Since we're not using the embedding model directly, we'll use mock embeddings
// In a real implementation, you would use the proper embeddings from CodeT5
console.log('Generating mock embedding for the query code...');
// Create a deterministic mock embedding based on the code content
// This is just for testing - in production you'd use actual embeddings
const mockEmbedding = Array(768).fill(0).map((_, i) =>
Math.sin(i + code.length % 10) // Simple deterministic function
);
// Query similar code
console.log('Querying similar code from Pinecone...');
const queryResult = await this.pineconeIndex.query({
vector: mockEmbedding,
topK: 5,
includeMetadata: true
});
console.log('Query result:', JSON.stringify(queryResult, null, 2));
// Prepare context from matched chunks
let context = '';
if (queryResult.matches && queryResult.matches.length > 0) {
context = queryResult.matches.map(match => {
const metadata = match.metadata;
return `From ${metadata.filePath} (lines ${metadata.startLine}-${metadata.endLine}):\n${metadata.content}`;
}).join('\n\n');
console.log(`Found ${queryResult.matches.length} similar code segments`);
} else {
context = 'No similar code found in the indexed files.';
console.log('No similar code found');
}
// Call Ollama with the context and question
console.log('Asking Ollama for explanation...');
const fetch = (...args) => import('node-fetch').then(({default: fetch}) => fetch(...args));
const response = await fetch(OLLAMA_URL, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: OLLAMA_MODEL,
prompt: `You are a programming assistant. Based on this context:\n\n${context}\n\nPlease answer: ${question}`,
stream: false
}),
});
if (!response.ok) {
throw new Error(`HTTP error! Status: ${response.status}`);
}
const data = await response.json();
console.log('\n------ AI Response ------');
console.log(data.response);
console.log('-------------------------\n');
return data.response;
} catch (error) {
console.error('Error querying similar code:', error);
throw error;
}
}
}
// Main function to run the CLI
async function main() {
const assistant = new CodeAIAssistant();
try {
await assistant.initialize();
await showMenu();
} catch (error) {
console.error('Application failed:', error);
rl.close();
process.exit(1);
}
}
async function showMenu() {
console.log('\n------ Code AI Assistant CLI (CommonJS Fixed) ------');
console.log('1. Test Pinecone Connection (Already Tested)');
console.log('2. Test Ollama Connection');
console.log('3. Query Similar Code (Demo Mode)');
console.log('4. Exit');
console.log('----------------------------------------------\n');
rl.question('Choose an option (1-4): ', async (answer) => {
switch (answer) {
case '1':
// Already tested during initialization
console.log('Pinecone connection already tested during initialization');
await showMenu();
break;
case '2':
const assistant = new CodeAIAssistant();
await assistant.testOllama();
await showMenu();
break;
case '3':
rl.question('Enter path to the file with code you want to find similar code for: ', async (filePath) => {
try {
if (!fs.existsSync(filePath)) {
throw new Error(`File not found: ${filePath}`);
}
const code = fs.readFileSync(filePath, 'utf8');
rl.question('Enter your question about this code: ', async (question) => {
try {
const assistant = new CodeAIAssistant();
await assistant.initialize();
await assistant.querySimilarCode(code, question);
await showMenu();
} catch (error) {
console.error('Error querying similar code:', error);
await showMenu();
}
});
} catch (error) {
console.error('Error reading file:', error);
await showMenu();
}
});
break;
case '4':
console.log('Goodbye!');
rl.close();
process.exit(0);
break;
default:
console.log('Invalid option. Please try again.');
await showMenu();
break;
}
});
}
// Run the application
main().catch(error => {
console.error('Unhandled error:', error);
process.exit(1);
});