@@ -265,8 +265,8 @@ void convert_tensor(void* src,
265265 } else {
266266 auto qtype = ggml_get_type_traits (src_type);
267267 if (qtype->to_float == nullptr ) {
268- throw std::runtime_error (format (" type %s unsupported for integer quantization: no dequantization available" ,
269- ggml_type_name (src_type)));
268+ throw std::runtime_error (sd_format (" type %s unsupported for integer quantization: no dequantization available" ,
269+ ggml_type_name (src_type)));
270270 }
271271 qtype->to_float (src, (float *)dst, n);
272272 }
@@ -275,8 +275,8 @@ void convert_tensor(void* src,
275275 // src_type is quantized => dst_type == GGML_TYPE_F16 or dst_type is quantized
276276 auto qtype = ggml_get_type_traits (src_type);
277277 if (qtype->to_float == nullptr ) {
278- throw std::runtime_error (format (" type %s unsupported for integer quantization: no dequantization available" ,
279- ggml_type_name (src_type)));
278+ throw std::runtime_error (sd_format (" type %s unsupported for integer quantization: no dequantization available" ,
279+ ggml_type_name (src_type)));
280280 }
281281 std::vector<char > buf;
282282 buf.resize (sizeof (float ) * n);
@@ -1355,7 +1355,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
13551355 std::atomic<int64_t > copy_to_backend_time_ms (0 );
13561356 std::atomic<int64_t > convert_time_ms (0 );
13571357
1358- int num_threads_to_use = n_threads_p > 0 ? n_threads_p : get_num_physical_cores ();
1358+ int num_threads_to_use = n_threads_p > 0 ? n_threads_p : sd_get_num_physical_cores ();
13591359 LOG_DEBUG (" using %d threads for model loading" , num_threads_to_use);
13601360
13611361 int64_t start_time = ggml_time_ms ();
0 commit comments