1212// Note that this is static. We will use these options for other generations.
1313static ollama::options options;
1414
15+ static std::string test_model = " llama3:8b" , image_test_model = " llava" ;
16+
1517TEST_SUITE (" Ollama Tests" ) {
1618
1719 TEST_CASE (" Initialize Options" ) {
@@ -52,19 +54,19 @@ TEST_SUITE("Ollama Tests") {
5254
5355 TEST_CASE (" Load Model" ) {
5456
55- CHECK ( ollama::load_model (" llama3:8b " ) );
57+ CHECK ( ollama::load_model (test_model ) );
5658 }
5759
5860 TEST_CASE (" Pull, Copy, and Delete Models" ) {
5961
6062 // Pull a model by specifying a model name.
61- CHECK ( ollama::pull_model (" llama3:8b " ) == true );
63+ CHECK ( ollama::pull_model (test_model ) == true );
6264
6365 // Copy a model by specifying a source model and destination model name.
64- CHECK ( ollama::copy_model (" llama3:8b " , " llama3_copy " ) ==true );
66+ CHECK ( ollama::copy_model (test_model, test_model+ " _copy " ) ==true );
6567
6668 // Delete a model by specifying a model name.
67- CHECK ( ollama::delete_model (" llama3_copy " ) == true );
69+ CHECK ( ollama::delete_model (test_model+ " _copy " ) == true );
6870 }
6971
7072 TEST_CASE (" Model Info" ) {
@@ -81,7 +83,7 @@ TEST_SUITE("Ollama Tests") {
8183 // List the models available locally in the ollama server
8284 std::vector<std::string> models = ollama::list_models ();
8385
84- bool contains_model = (std::find (models.begin (), models.end (), " llama3:8b " ) != models.end () );
86+ bool contains_model = (std::find (models.begin (), models.end (), test_model ) != models.end () );
8587
8688 CHECK ( contains_model );
8789 }
@@ -101,12 +103,9 @@ TEST_SUITE("Ollama Tests") {
101103
102104 TEST_CASE (" Basic Generation" ) {
103105
104- ollama::response response = ollama::generate (" llama3:8b" , " Why is the sky blue?" , options);
105- // std::cout << response << std::endl;
106-
107- std::string expected_response = " What a great question!\n\n The sky appears blue because of a phenomenon called Rayleigh scattering," ;
106+ ollama::response response = ollama::generate (test_model, " Why is the sky blue?" , options);
108107
109- CHECK (response.as_simple_string () == expected_response );
108+ CHECK ( response.as_json (). contains ( " response " ) == true );
110109 }
111110
112111
@@ -124,35 +123,34 @@ TEST_SUITE("Ollama Tests") {
124123 TEST_CASE (" Streaming Generation" ) {
125124
126125 std::function<void (const ollama::response&)> response_callback = on_receive_response;
127- ollama::generate (" llama3:8b " , " Why is the sky blue?" , response_callback, options);
126+ ollama::generate (test_model , " Why is the sky blue?" , response_callback, options);
128127
129128 std::string expected_response = " What a great question!\n\n The sky appears blue because of a phenomenon called Rayleigh scattering," ;
130129
131- CHECK ( streamed_response == expected_response );
130+ CHECK ( streamed_response != " " );
132131 }
133132
134133 TEST_CASE (" Non-Singleton Generation" ) {
135134
136135 Ollama my_ollama_server (" http://localhost:11434" );
137136
138137 // You can use all of the same functions from this instanced version of the class.
139- ollama::response response = my_ollama_server.generate (" llama3:8b" , " Why is the sky blue?" , options);
140- // std::cout << response << std::endl;
138+ ollama::response response = my_ollama_server.generate (test_model, " Why is the sky blue?" , options);
141139
142140 std::string expected_response = " What a great question!\n\n The sky appears blue because of a phenomenon called Rayleigh scattering," ;
143141
144- CHECK (response.as_simple_string () == expected_response );
142+ CHECK (response.as_json (). contains ( " response " ) == true );
145143 }
146144
147145 TEST_CASE (" Single-Message Chat" ) {
148146
149147 ollama::message message (" user" , " Why is the sky blue?" );
150148
151- ollama::response response = ollama::chat (" llama3:8b " , message, options);
149+ ollama::response response = ollama::chat (test_model , message, options);
152150
153151 std::string expected_response = " What a great question!\n\n The sky appears blue because of a phenomenon called Rayleigh scattering," ;
154152
155- CHECK (response.as_simple_string ()!= " " );
153+ CHECK (response.as_json (). contains ( " message " ) == true );
156154 }
157155
158156 TEST_CASE (" Multi-Message Chat" ) {
@@ -163,11 +161,11 @@ TEST_SUITE("Ollama Tests") {
163161
164162 ollama::messages messages = {message1, message2, message3};
165163
166- ollama::response response = ollama::chat (" llama3:8b " , messages, options);
164+ ollama::response response = ollama::chat (test_model , messages, options);
167165
168166 std::string expected_response = " " ;
169167
170- CHECK (response.as_simple_string ()!= " " );
168+ CHECK (response.as_json (). contains ( " message " ) == true );
171169 }
172170
173171 TEST_CASE (" Chat with Streaming Response" ) {
@@ -182,7 +180,7 @@ TEST_SUITE("Ollama Tests") {
182180
183181 ollama::message message (" user" , " Why is the sky blue?" );
184182
185- ollama::chat (" llama3:8b " , message, response_callback, options);
183+ ollama::chat (test_model , message, response_callback, options);
186184
187185 CHECK (streamed_response!=" " );
188186 }
@@ -195,12 +193,9 @@ TEST_SUITE("Ollama Tests") {
195193
196194 ollama::image image = ollama::image::from_file (" llama.jpg" );
197195
198- // ollama::images images={image};
199-
200- ollama::response response = ollama::generate (" llava" , " What do you see in this image?" , options, image);
201- std::string expected_response = " The image features a large, fluffy white llama" ;
196+ ollama::response response = ollama::generate (image_test_model, " What do you see in this image?" , options, image);
202197
203- CHECK (response.as_simple_string () == expected_response );
198+ CHECK ( response.as_json (). contains ( " response " ) == true );
204199 }
205200
206201 TEST_CASE (" Generation with Multiple Images" ) {
@@ -214,10 +209,10 @@ TEST_SUITE("Ollama Tests") {
214209
215210 ollama::images images={image, base64_image};
216211
217- ollama::response response = ollama::generate (" llava " , " What do you see in this image?" , options, images);
212+ ollama::response response = ollama::generate (image_test_model , " What do you see in this image?" , options, images);
218213 std::string expected_response = " The image features a large, fluffy white and gray llama" ;
219214
220- CHECK (response.as_simple_string () == expected_response );
215+ CHECK (response.as_json (). contains ( " response " ) == true );
221216 }
222217
223218 TEST_CASE (" Chat with Image" ) {
@@ -230,18 +225,18 @@ TEST_SUITE("Ollama Tests") {
230225
231226 // We can optionally include images with each message. Vision-enabled models will be able to utilize these.
232227 ollama::message message_with_image (" user" , " What do you see in this image?" , image);
233- ollama::response response = ollama::chat (" llava " , message_with_image, options);
228+ ollama::response response = ollama::chat (image_test_model , message_with_image, options);
234229
235230 std::string expected_response = " The image features a large, fluffy white llama" ;
236231
237- CHECK (response.as_simple_string ()!= " " );
232+ CHECK (response.as_json (). contains ( " message " ) == true );
238233 }
239234
240235 TEST_CASE (" Embedding Generation" ) {
241236
242237 options[" num_predict" ] = 18 ;
243238
244- ollama::response response = ollama::generate_embeddings (" llama3:8b " , " Why is the sky blue?" );
239+ ollama::response response = ollama::generate_embeddings (test_model , " Why is the sky blue?" );
245240
246241 CHECK (response.as_json ().contains (" embeddings" ) == true );
247242 }
0 commit comments