Skip to content

Commit 8522bcb

Browse files
authored
Merge pull request #137 from attogram/fix-streaming-newline-bug
Fix incorrect newline handling in streaming functions
2 parents a1cb014 + 45553ae commit 8522bcb

File tree

6 files changed

+258
-62
lines changed

6 files changed

+258
-62
lines changed

ollama_bash_lib.sh

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -773,24 +773,17 @@ ollama_generate_stream() {
773773
# first thinking input received
774774
is_thinking=true
775775
printf '\n#### %b' "$thinking"
776-
else
776+
else
777777
# subsequent thinking input received
778778
printf '%b' "$thinking"
779779
fi
780780
fi
781-
781+
782782
response="$(jq '.response // empty' <<<"$line")"
783783
response=${response#\"} # strip first "
784784
response=${response%\"} # strip last "
785785
if [[ -n "$response" ]]; then
786-
if [[ "$is_responding" == 'false' ]]; then
787-
# first response input received
788-
is_responding=true
789-
printf '\n\n%b' "$response"
790-
else
791-
# subsequent response input received
792-
printf '%b' "$response"
793-
fi
786+
printf '%b' "$response"
794787
fi
795788
done
796789
rc=$? # exit status of the whole pipeline
@@ -1361,8 +1354,12 @@ EOF
13611354
if [[ "$OBL_THINKING" == 'on' ]]; then
13621355
printf '%s' "$(jq -r '.thinking // empty' <<<"$line")" >&2
13631356
fi
1364-
read -r -d '' content < <(jq -r '.message.content // empty' <<<"$line")
1365-
printf '%s' "$content"
1357+
content="$(jq '.message.content // empty' <<<"$line")"
1358+
content=${content#\"} # strip first "
1359+
content=${content%\"} # strip last "
1360+
if [[ -n "$content" && "$content" != "null" ]]; then
1361+
printf '%b' "$content"
1362+
fi
13661363
done
13671364
exit "${PIPESTATUS[0]}"
13681365
) 2> >( _ollama_thinking_stream )

tests/call_curl.bats

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,18 @@
11
#!/usr/bin/env bats
22

33
source ./ollama_bash_lib.sh
4+
source tests/test_helper.bash
5+
6+
setup() {
7+
if [[ "$OLLAMA_TEST_MODE" != "mock" ]]; then
8+
if ! ollama_app_installed; then
9+
skip "Ollama is not installed"
10+
fi
11+
if ! ollama_api_ping; then
12+
skip "Ollama API is not reachable"
13+
fi
14+
fi
15+
}
416

517
@test "_call_curl: should return error for invalid method" {
618
run _call_curl "INVALID_METHOD" "/api/tags"
@@ -18,19 +30,23 @@ source ./ollama_bash_lib.sh
1830
}
1931

2032
@test "_call_curl: should make a successful GET request" {
21-
# This will make a real request to the running Ollama instance
2233
run _call_curl "GET" "/api/tags"
2334
[ "$status" -eq 0 ]
2435
_is_valid_json "$output"
2536
local is_valid_json_status=$?
2637
[ "$is_valid_json_status" -eq 0 ]
38+
if [[ "$OLLAMA_TEST_MODE" == "mock" ]]; then
39+
[[ "$output" == '{"models":[{"name":"mock-model:latest","modified_at":"2023-11-20T15:07:52.871123-08:00","size":123456789,"digest":"abcdef1234567890"}]}' ]]
40+
fi
2741
}
2842

2943
@test "_call_curl: should make a successful POST request" {
30-
# This will make a real request to the running Ollama instance
31-
run _call_curl "POST" "/api/show" '{"model": "phi3"}'
44+
run _call_curl "POST" "/api/show" '{"model": "mock-model:latest"}'
3245
[ "$status" -eq 0 ]
3346
_is_valid_json "$output"
3447
local is_valid_json_status=$?
3548
[ "$is_valid_json_status" -eq 0 ]
49+
if [[ "$OLLAMA_TEST_MODE" == "mock" ]]; then
50+
[[ "$output" == '{"modelfile":"FROM mock-model:latest\n","parameters":"stop [INST]\nstop [/INST]\nstop <<SYS>>\nstop <</SYS>>\n","template":"[INST] {{ .Prompt }} [/INST] "}' ]]
51+
fi
3652
}

tests/multiline_test.bats

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
#!/usr/bin/env bats
2+
3+
@test "multiline string assertion" {
4+
multiline_string="hello
5+
world"
6+
7+
run echo "$multiline_string"
8+
9+
[ "$status" -eq 0 ]
10+
[[ "$output" == "hello
11+
world" ]]
12+
}

tests/ollama_chat.bats

100644100755
Lines changed: 87 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
#!/usr/bin/env bats
22

33
source ./ollama_bash_lib.sh
4+
if [ -n "$OLLAMA_TEST_MODE" ]; then
5+
source ./tests/test_helper.bash
6+
fi
47

58
setup() {
69
ollama_messages_clear
@@ -13,9 +16,9 @@ setup() {
1316
[ "$status" -eq 0 ]
1417
[ "$output" -eq 1 ]
1518

16-
run ollama_messages_last
19+
run ollama_messages_last_json
1720
[ "$status" -eq 0 ]
18-
[[ "$output" =~ "hello" ]]
21+
[[ $(echo "$output" | jq -r '.content') == "hello" ]]
1922

2023
ollama_messages_clear
2124

@@ -27,58 +30,103 @@ setup() {
2730
@test "ollama_chat: should have a conversation" {
2831
ollama_messages_add -r "user" -c "what is 1+1?"
2932

30-
local tmp_file
31-
tmp_file=$(mktemp)
32-
ollama_chat -m phi3 > "$tmp_file"
33-
local chat_status=$?
34-
local chat_output
35-
chat_output=$(cat "$tmp_file")
36-
rm "$tmp_file"
37-
38-
[ "$chat_status" -eq 0 ]
39-
[ -n "$chat_output" ]
40-
[[ "$chat_output" =~ "2" ]]
41-
42-
run ollama_messages_count
43-
[ "$status" -eq 0 ]
44-
[ "$output" -eq 2 ] # user message + assistant response
33+
if [[ "$OLLAMA_TEST_MODE" == "mock" ]]; then
34+
run ollama_chat -m "mock-model:latest"
35+
[ "$status" -eq 0 ]
36+
[[ "$output" == "This is a mock chat response." ]]
37+
else
38+
local tmp_file
39+
tmp_file=$(mktemp)
40+
ollama_chat -m phi3 > "$tmp_file"
41+
local chat_status=$?
42+
local chat_output
43+
chat_output=$(cat "$tmp_file")
44+
rm "$tmp_file"
45+
46+
[ "$chat_status" -eq 0 ]
47+
[ -n "$chat_output" ]
48+
[[ "$chat_output" =~ "2" ]]
49+
fi
4550
}
4651

47-
@test "ollama_chat_json: should have a conversation and return json" {
52+
@test "ollama_chat_json: should have a conversation (non-streaming)" {
4853
ollama_messages_add -r "user" -c "what is 1+1?"
4954
OBL_STREAM=0 # ensure we get a single json response back
5055

51-
ollama_chat_json -m phi3
52-
local chat_json_status=$?
53-
[ "$chat_json_status" -eq 0 ]
54-
55-
run ollama_messages_last
56-
[ "$status" -eq 0 ]
57-
[[ "$output" =~ "2" ]]
56+
if [[ "$OLLAMA_TEST_MODE" == "mock" ]]; then
57+
# This function modifies history but doesn't print to stdout
58+
ollama_chat_json -m "mock-model:latest"
59+
local status=$?
60+
[ "$status" -eq 0 ]
61+
62+
# Check that history was modified correctly
63+
run ollama_messages_count
64+
[ "$output" -eq 2 ]
65+
66+
run ollama_messages_last_json
67+
[ "$status" -eq 0 ]
68+
_is_valid_json "$output"
69+
[ $? -eq 0 ]
70+
[[ $(echo "$output" | jq -r '.content') == "This is a mock chat response." ]]
71+
else
72+
ollama_chat_json -m phi3
73+
local chat_json_status=$?
74+
[ "$chat_json_status" -eq 0 ]
75+
fi
5876
}
5977

78+
6079
@test "ollama_chat_stream: should have a streaming conversation" {
6180
ollama_messages_add -r "user" -c "what is 1+1?"
62-
run ollama_chat_stream -m phi3
63-
[ "$status" -eq 0 ]
64-
[ -n "$output" ]
65-
[[ "$output" =~ "2" ]]
81+
if [[ "$OLLAMA_TEST_MODE" == "mock" ]]; then
82+
run ollama_chat_stream -m "mock-model:latest"
83+
[ "$status" -eq 0 ]
84+
[[ "$output" == "This is a mock streaming chat response." ]]
85+
else
86+
run ollama_chat_stream -m phi3
87+
[ "$status" -eq 0 ]
88+
[ -n "$output" ]
89+
[[ "$output" =~ "2" ]]
90+
fi
6691
}
6792

6893
@test "ollama_chat_stream_json: should have a streaming conversation and return json" {
6994
ollama_messages_add -r "user" -c "what is 1+1?"
70-
run ollama_chat_stream_json -m phi3
71-
[ "$status" -eq 0 ]
72-
[ -n "$output" ]
73-
first_line=$(echo "$output" | head -n 1)
74-
_is_valid_json "$first_line"
75-
local is_valid_json_status=$?
76-
[ "$is_valid_json_status" -eq 0 ]
95+
if [[ "$OLLAMA_TEST_MODE" == "mock" ]]; then
96+
run ollama_chat_stream_json -m "mock-model:latest"
97+
[ "$status" -eq 0 ]
98+
[ -n "$output" ]
99+
first_line=$(echo "$output" | head -n 1)
100+
_is_valid_json "$first_line"
101+
local is_valid_json_status=$?
102+
[ "$is_valid_json_status" -eq 0 ]
103+
[[ $(echo "$first_line" | jq -r '.message.content') == "This is a mock streaming chat response." ]]
104+
else
105+
run ollama_chat_stream_json -m phi3
106+
[ "$status" -eq 0 ]
107+
[ -n "$output" ]
108+
first_line=$(echo "$output" | head -n 1)
109+
_is_valid_json "$first_line"
110+
local is_valid_json_status=$?
111+
[ "$is_valid_json_status" -eq 0 ]
112+
fi
77113
}
78114

79115
@test "ollama_chat_stream: should handle newlines correctly" {
80116
ollama_messages_add -r "user" -c "generate a list of three fruits, each on a new line."
81-
run ollama_chat_stream -m phi3
82-
[ "$status" -eq 0 ]
83-
[[ "$output" =~ .*\n.* ]]
117+
if [[ "$OLLAMA_TEST_MODE" == "mock" ]]; then
118+
run ollama_chat_stream -m "mock-model:latest"
119+
[ "$status" -eq 0 ]
120+
expected_output=$'Apple\nBanana\nCherry'
121+
echo "---Actual Output---"
122+
echo "$output" | cat -A
123+
echo "---Expected Output---"
124+
echo "$expected_output" | cat -A
125+
echo "-------------------"
126+
[[ "$output" == "$expected_output" ]]
127+
else
128+
run ollama_chat_stream -m phi3
129+
[ "$status" -eq 0 ]
130+
[[ "$output" =~ .*\n.* ]]
131+
fi
84132
}

tests/ollama_generate.bats

100644100755
Lines changed: 44 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,71 @@
11
#!/usr/bin/env bats
22

33
source ./ollama_bash_lib.sh
4+
if [ -n "$OLLAMA_TEST_MODE" ]; then
5+
source ./tests/test_helper.bash
6+
fi
7+
8+
setup() {
9+
if [[ "$OLLAMA_TEST_MODE" != "mock" ]]; then
10+
if ! ollama_app_installed; then
11+
skip "Ollama is not installed"
12+
fi
13+
if ! ollama_api_ping; then
14+
skip "Ollama API is not reachable"
15+
fi
16+
fi
17+
}
418

519
@test "ollama_generate: should generate a response" {
6-
run ollama_generate -m phi3 -p "why is the sky blue?"
20+
run ollama_generate -m "mock-model:latest" -p "why is the sky blue?"
721
[ "$status" -eq 0 ]
8-
[ -n "$output" ]
22+
if [[ "$OLLAMA_TEST_MODE" == "mock" ]]; then
23+
[[ "$output" == "This is a mock response." ]]
24+
else
25+
[ -n "$output" ]
26+
fi
927
}
1028

1129
@test "ollama_generate_json: should generate a response in JSON format" {
12-
run ollama_generate_json -m phi3 -p "why is the sky blue?"
30+
OBL_STREAM=0
31+
run ollama_generate_json -m "mock-model:latest" -p "why is the sky blue?"
1332
[ "$status" -eq 0 ]
1433
_is_valid_json "$output"
1534
local is_valid_json_status=$?
1635
[ "$is_valid_json_status" -eq 0 ]
1736
}
1837

1938
@test "ollama_generate_stream: should handle newlines correctly" {
20-
run ollama_generate_stream -m phi3 -p "generate a list of three fruits, each on a new line."
39+
run ollama_generate_stream -m "mock-model:latest" -p "generate a list of three fruits, each on a new line."
2140
[ "$status" -eq 0 ]
22-
[[ "$output" =~ .*\n.* ]]
41+
if [[ "$OLLAMA_TEST_MODE" == "mock" ]]; then
42+
expected_output=$'Apple\nBanana\nCherry'
43+
echo "---Actual Output for test 3---"
44+
echo "$output" | cat -A
45+
echo "---Expected Output for test 3---"
46+
echo "$expected_output" | cat -A
47+
echo "--------------------------------"
48+
[[ "$output" == "$expected_output" ]]
49+
else
50+
[[ "$output" =~ .*\n.* ]]
51+
fi
2352
}
2453

2554
@test "ollama_generate_stream: should generate a streaming response" {
26-
run ollama_generate_stream -m phi3 -p "why is the sky blue?"
55+
run ollama_generate_stream -m "mock-model:latest" -p "why is the sky blue?"
2756
[ "$status" -eq 0 ]
28-
[ -n "$output" ]
57+
if [[ "$OLLAMA_TEST_MODE" == "mock" ]]; then
58+
echo "---Actual Output for test 4---"
59+
echo "$output" | cat -A
60+
echo "--------------------------------"
61+
[[ "$output" == "This is a mock streaming response." ]]
62+
else
63+
[ -n "$output" ]
64+
fi
2965
}
3066

3167
@test "ollama_generate_stream_json: should generate a streaming response in JSON format" {
32-
run ollama_generate_stream_json -m phi3 -p "why is the sky blue?"
68+
run ollama_generate_stream_json -m "mock-model:latest" -p "why is the sky blue?"
3369
[ "$status" -eq 0 ]
3470
# In a stream, we get multiple JSON objects. We can check the first one.
3571
first_line=$(echo "$output" | head -n 1)

0 commit comments

Comments
 (0)