2727#endif
2828#include < cstring>
2929
30- #define UNUSED GGML_UNUSED
31-
32- #define GGML_DEBUG 0
33- #if (GGML_DEBUG >= 1)
34- #define GGML_PRINT_DEBUG (...) printf(__VA_ARGS__)
35- #else
36- #define GGML_PRINT_DEBUG (...)
37- #endif
38-
3930#ifdef _WIN32
4031typedef SOCKET sockfd_t ;
4132using ssize_t = __int64;
@@ -411,7 +402,7 @@ static std::shared_ptr<socket_t> get_socket(const std::string & endpoint) {
411402 initialized = true ;
412403 }
413404#else
414- UNUSED (initialized);
405+ GGML_UNUSED (initialized);
415406#endif
416407 auto sock = socket_connect (host.c_str (), port);
417408 if (sock == nullptr ) {
@@ -640,7 +631,7 @@ static void ggml_backend_rpc_free(ggml_backend_t backend) {
640631}
641632
642633static void ggml_backend_rpc_synchronize (ggml_backend_t backend) {
643- UNUSED (backend);
634+ GGML_UNUSED (backend);
644635 // this is no-op because we don't have any async operations
645636}
646637
@@ -850,7 +841,7 @@ void rpc_server::alloc_buffer(const rpc_msg_alloc_buffer_req & request, rpc_msg_
850841 GGML_PRINT_DEBUG (" [%s] size: %" PRIu64 " -> remote_ptr: %" PRIx64 " , remote_size: %" PRIu64 " \n " , __func__, request.size , response.remote_ptr , response.remote_size );
851842 buffers.insert (buffer);
852843 } else {
853- GGML_PRINT_DEBUG (" [%s] size: %" PRIu64 " -> failed\n " , __func__, request.size );
844+ GGML_LOG_ERROR (" [%s] size: %" PRIu64 " -> failed\n " , __func__, request.size );
854845 }
855846}
856847
@@ -872,7 +863,7 @@ bool rpc_server::buffer_get_base(const rpc_msg_buffer_get_base_req & request, rp
872863 GGML_PRINT_DEBUG (" [%s] remote_ptr: %" PRIx64 " \n " , __func__, request.remote_ptr );
873864 ggml_backend_buffer_t buffer = reinterpret_cast <ggml_backend_buffer_t >(request.remote_ptr );
874865 if (buffers.find (buffer) == buffers.end ()) {
875- GGML_PRINT_DEBUG (" [%s] buffer not found\n " , __func__);
866+ GGML_LOG_ERROR (" [%s] buffer not found\n " , __func__);
876867 return false ;
877868 }
878869 void * base = ggml_backend_buffer_get_base (buffer);
@@ -884,7 +875,7 @@ bool rpc_server::free_buffer(const rpc_msg_free_buffer_req & request) {
884875 GGML_PRINT_DEBUG (" [%s] remote_ptr: %" PRIx64 " \n " , __func__, request.remote_ptr );
885876 ggml_backend_buffer_t buffer = reinterpret_cast <ggml_backend_buffer_t >(request.remote_ptr );
886877 if (buffers.find (buffer) == buffers.end ()) {
887- GGML_PRINT_DEBUG (" [%s] buffer not found\n " , __func__);
878+ GGML_LOG_ERROR (" [%s] buffer not found\n " , __func__);
888879 return false ;
889880 }
890881 ggml_backend_buffer_free (buffer);
@@ -896,7 +887,7 @@ bool rpc_server::buffer_clear(const rpc_msg_buffer_clear_req & request) {
896887 GGML_PRINT_DEBUG (" [%s] remote_ptr: %" PRIx64 " , value: %u\n " , __func__, request.remote_ptr , request.value );
897888 ggml_backend_buffer_t buffer = reinterpret_cast <ggml_backend_buffer_t >(request.remote_ptr );
898889 if (buffers.find (buffer) == buffers.end ()) {
899- GGML_PRINT_DEBUG (" [%s] buffer not found\n " , __func__);
890+ GGML_LOG_ERROR (" [%s] buffer not found\n " , __func__);
900891 return false ;
901892 }
902893 ggml_backend_buffer_clear (buffer, request.value );
@@ -952,7 +943,7 @@ bool rpc_server::set_tensor(const std::vector<uint8_t> & input) {
952943 struct ggml_context * ctx = ggml_init (params);
953944 ggml_tensor * tensor = deserialize_tensor (ctx, in_tensor);
954945 if (tensor == nullptr ) {
955- GGML_PRINT_DEBUG (" [%s] error deserializing tensor\n " , __func__);
946+ GGML_LOG_ERROR (" [%s] error deserializing tensor\n " , __func__);
956947 ggml_free (ctx);
957948 return false ;
958949 }
@@ -1017,7 +1008,7 @@ bool rpc_server::get_tensor(const rpc_msg_get_tensor_req & request, std::vector<
10171008 struct ggml_context * ctx = ggml_init (params);
10181009 ggml_tensor * tensor = deserialize_tensor (ctx, &request.tensor );
10191010 if (tensor == nullptr ) {
1020- GGML_PRINT_DEBUG (" [%s] error deserializing tensor\n " , __func__);
1011+ GGML_LOG_ERROR (" [%s] error deserializing tensor\n " , __func__);
10211012 ggml_free (ctx);
10221013 return false ;
10231014 }
@@ -1051,7 +1042,7 @@ bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_co
10511042 ggml_tensor * src = deserialize_tensor(ctx, &request.src);
10521043 ggml_tensor * dst = deserialize_tensor(ctx, &request.dst);
10531044 if (src == nullptr || dst == nullptr ) {
1054- GGML_PRINT_DEBUG (" [%s] error deserializing tensors\n " , __func__);
1045+ GGML_LOG_ERROR (" [%s] error deserializing tensors\n " , __func__);
10551046 ggml_free (ctx);
10561047 return false ;
10571048 }
@@ -1385,14 +1376,14 @@ static void ggml_backend_rpc_device_get_memory(ggml_backend_dev_t dev, size_t *
13851376
13861377 ggml_backend_rpc_get_device_memory (ctx->endpoint .c_str (), free, total);
13871378
1388- UNUSED (dev);
1379+ GGML_UNUSED (dev);
13891380}
13901381
13911382static enum ggml_backend_dev_type ggml_backend_rpc_device_get_type (ggml_backend_dev_t dev) {
13921383 // TODO: obtain value from the server
13931384 return GGML_BACKEND_DEVICE_TYPE_GPU;
13941385
1395- UNUSED (dev);
1386+ GGML_UNUSED (dev);
13961387}
13971388
13981389static void ggml_backend_rpc_device_get_props (ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
@@ -1413,20 +1404,20 @@ static ggml_backend_t ggml_backend_rpc_device_init(ggml_backend_dev_t dev, const
14131404
14141405 return ggml_backend_rpc_init (ctx->endpoint .c_str ());
14151406
1416- UNUSED (params);
1407+ GGML_UNUSED (params);
14171408}
14181409
14191410static ggml_backend_buffer_type_t ggml_backend_rpc_device_get_buffer_type (ggml_backend_dev_t dev) {
14201411 ggml_backend_rpc_device_context * ctx = (ggml_backend_rpc_device_context *)dev->context ;
14211412
14221413 return ggml_backend_rpc_buffer_type (ctx->endpoint .c_str ());
14231414
1424- UNUSED (dev);
1415+ GGML_UNUSED (dev);
14251416}
14261417
14271418static bool ggml_backend_rpc_device_supports_op (ggml_backend_dev_t dev, const struct ggml_tensor * op) {
1428- UNUSED (dev);
1429- UNUSED (op);
1419+ GGML_UNUSED (dev);
1420+ GGML_UNUSED (op);
14301421 // TODO: call the remote backend and cache the results
14311422 return true ;
14321423}
@@ -1463,20 +1454,20 @@ static const struct ggml_backend_device_i ggml_backend_rpc_device_i = {
14631454static const char * ggml_backend_rpc_reg_get_name (ggml_backend_reg_t reg) {
14641455 return " RPC" ;
14651456
1466- UNUSED (reg);
1457+ GGML_UNUSED (reg);
14671458}
14681459
14691460static size_t ggml_backend_rpc_reg_get_device_count (ggml_backend_reg_t reg) {
14701461 return 0 ;
14711462
1472- UNUSED (reg);
1463+ GGML_UNUSED (reg);
14731464}
14741465
14751466static ggml_backend_dev_t ggml_backend_rpc_reg_get_device (ggml_backend_reg_t reg, size_t index) {
14761467 GGML_ABORT (" The RPC backend does not have enumerated devices - use ggml_backend_add_device instead" );
14771468
1478- UNUSED (reg);
1479- UNUSED (index);
1469+ GGML_UNUSED (reg);
1470+ GGML_UNUSED (index);
14801471}
14811472
14821473static void * ggml_backend_rpc_get_proc_address (ggml_backend_reg_t reg, const char * name) {
@@ -1485,7 +1476,7 @@ static void * ggml_backend_rpc_get_proc_address(ggml_backend_reg_t reg, const ch
14851476 }
14861477 return NULL ;
14871478
1488- UNUSED (reg);
1479+ GGML_UNUSED (reg);
14891480}
14901481
14911482static const struct ggml_backend_reg_i ggml_backend_rpc_reg_i = {
0 commit comments