Skip to content

Commit d2362be

Browse files
committed
add test script for ollama, install and test ollama latest with llama3.2, verify error handling, fix error handling, regression test with Google gemini-2.5-pro.
1 parent c5ada1e commit d2362be

File tree

2 files changed

+45
-11
lines changed

2 files changed

+45
-11
lines changed

pi2llm-main.js

Lines changed: 29 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -107,29 +107,45 @@ LLMCommunicator.prototype.sendMessage = function (payload, onComplete, onError)
107107
// 2. if we have a responseObject but failed to find "choices" in JSON,
108108
// then try to handle as Cloudflare AI Gateway's simpler format: {"result":{"response":"foo bar baz" ...
109109
messageContent = responseObject.result.response;
110-
} else if (responseObject && transfer.responseCode > 200) { // response contains an error message
111-
let errorObject = responseObject[0];
112-
var error = errorObject.error;
113-
var errorMsg = error.message
114-
var errorCode = error.code;
115-
var errorStatus = error.status;
116-
117-
messageContent = "AI Error: " + errorMsg + ", code: " + errorCode + ", status: " + errorStatus;
110+
} else {
111+
// Now we know it's not a success response, so we parse for known error structures.
112+
let errorMessage = "An unknown error occurred.";
113+
114+
// ** THE FIX IS HERE: Check for Ollama-style errors first **
115+
// This is for errors like: {"error":{"message":"..."}}
116+
if (responseObject.error && typeof responseObject.error === 'object' && responseObject.error.message) {
117+
errorMessage = responseObject.error.message;
118+
// ** THE FIX IS HERE: Then, check for OpenAI-style array errors **
119+
// This is for errors like: [{"error":{"message":"..."}}]
120+
} else if (Array.isArray(responseObject) && responseObject.length > 0 && responseObject[0].error) {
121+
errorMessage = responseObject[0].error.message;
122+
// ** THE FIX IS HERE: And for simpler Ollama errors **
123+
// This is for errors like: {"error":"model '...' is not loaded"}
124+
} else if (responseObject.error && typeof responseObject.error === 'string') {
125+
errorMessage = responseObject.error;
126+
}
127+
128+
messageContent = "AI Error: " + errorMessage;
129+
console.criticalln("Parsed error from LLM backend: ", errorMessage);
118130
}
119131
if (onComplete) {
120132
onComplete(messageContent);
121133
}
122134
} catch (e) {
123135
let errorMsg = "Error parsing LLM JSON response: " + e.message;
124-
messageContent = errorMsg;
125136
console.criticalln(errorMsg);
126-
137+
console.writeln("Raw Response String: ", responseString); // Log the raw string on parse failure
127138
if (onError) {
128139
onError(errorMsg);
129140
}
130141
}
131142
} else {
132143
let errorMsg = "NetworkTransfer POST failed. HTTP Status: " + transfer.responseCode + "\nError Info: " + transfer.errorInformation;
144+
// Attempt to read the response body even on network failure, as it often contains useful error details.
145+
let responseString = transfer.response.utf8ToString();
146+
if (responseString) {
147+
errorMsg += "\nResponse Body: " + responseString;
148+
}
133149
console.criticalln(errorMsg);
134150
if (onError) {
135151
onError(errorMsg);
@@ -139,12 +155,14 @@ LLMCommunicator.prototype.sendMessage = function (payload, onComplete, onError)
139155
transfer.closeConnection();
140156
};
141157

142-
// common api response errors:
158+
// Google api response errors:
143159
// 1. wrong / non-existent model name:
144160
// [{"error":{"code":404,"message":"models/foo is not found for API version v1main, or is not supported for generateContent. Call ListModels to see the list of available models and their supported methods.","status":"NOT_FOUND"}}]
145161
// 2. no model name supplied:
146162
// [{"error":{"code":400,"message":"model is not specified","status":"INVALID_ARGUMENT"}}]
147163

164+
// ollama error example:
165+
// {"error":{"message":"model \"llama3.2\" not found, try pulling it first","type":"api_error","param":null,"code":null}}
148166

149167
function pi2llmMain() {
150168
console.show();

tests/test-ollama.sh

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
#!/bin/bash
2+
# install ollama per instructions.
3+
# run:
4+
# pull llama3.2
5+
# To pull down a sample model to test.
6+
7+
echo "Listing models deployed on ollama .."
8+
curl http://localhost:11434/v1/models
9+
10+
echo "Calling ollama non-streaming /v1/chat/completions .."
11+
curl -v http://localhost:11434/v1/chat/completions -d '{
12+
"model": "llama3.2",
13+
"messages": [
14+
{ "role": "user", "content": "why is the sky blue?" }
15+
]
16+
}'

0 commit comments

Comments
 (0)