@@ -1047,7 +1047,6 @@ bool rpc_server::set_tensor(const std::vector<uint8_t> & input) {
10471047        const  size_t  p1 = p0 + ggml_backend_buffer_get_size (tensor->buffer );
10481048
10491049        if  (in_tensor->data  + offset < p0 || in_tensor->data  + offset >= p1 || size > (p1 - in_tensor->data  - offset)) {
1050-             //  Replace GGML_ABORT with logging and error return
10511050            GGML_LOG_ERROR (" [%s] tensor data region (data=0x%"   PRIx64 " , offset=%"   PRIu64 " , size=%zu) out of buffer bounds [0x%zx, 0x%zx)\n "  ,
10521051                           __func__, in_tensor->data , offset, size, p0, p1);
10531052            return  false ;
@@ -1125,10 +1124,8 @@ bool rpc_server::set_tensor_hash(const std::vector<uint8_t> & input, rpc_msg_set
11251124        const  size_t  p1 = p0 + ggml_backend_buffer_get_size (tensor->buffer );
11261125
11271126        if  (in_tensor->data  + offset < p0 || in_tensor->data  + offset >= p1 || size > (p1 - in_tensor->data  - offset)) {
1128-             //  Replace GGML_ABORT with logging and error return
11291127            GGML_LOG_ERROR (" [%s] tensor data region (data=0x%"   PRIx64 " , offset=%"   PRIu64 " , size=%zu, hash=0x%"   PRIx64 " ) out of buffer bounds [0x%zx, 0x%zx)\n "  ,
11301128                           __func__, in_tensor->data , offset, size, *hash, p0, p1);
1131-             response.result  = 0 ;
11321129            return  false ;
11331130        }
11341131    }
@@ -1194,7 +1191,6 @@ bool rpc_server::get_tensor(const rpc_msg_get_tensor_req & request, std::vector<
11941191        if  (request.tensor .data  + request.offset  < p0 ||
11951192            request.tensor .data  + request.offset  >= p1 ||
11961193            request.size  > (p1 - request.tensor .data  - request.offset )) {
1197-                 //  Replace GGML_ABORT with logging and error return
11981194                GGML_LOG_ERROR (" [%s] requested tensor region (data=0x%"   PRIx64 " , offset=%"   PRIu64 " , size=%"   PRIu64 " ) out of buffer bounds [0x%zx, 0x%zx)\n "  ,
11991195                               __func__, request.tensor .data , request.offset , request.size , p0, p1);
12001196                return  false ;
@@ -1220,7 +1216,6 @@ bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_co
12201216    ggml_tensor * dst = deserialize_tensor(ctx, &request.dst);
12211217    if  (src == nullptr  || dst == nullptr ) {
12221218        GGML_LOG_ERROR (" [%s] error deserializing tensors\n "  , __func__);
1223-         response.result  = 0 ;
12241219        return  false ;
12251220    }
12261221
@@ -1238,7 +1233,6 @@ bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_co
12381233                         dst_data + src_size,
12391234                         dst_base,
12401235                         dst_base + dst_buf_sz);
1241-         response.result  = 0 ;
12421236        return  false ;
12431237    }
12441238
@@ -1398,8 +1392,7 @@ bool rpc_server::graph_compute(const std::vector<uint8_t> & input, rpc_msg_graph
13981392    }
13991393    ggml_status status = ggml_backend_graph_compute (backend, graph);
14001394    response.result  = status;
1401-     //  Return true only if computation succeeded
1402-     return  status == GGML_STATUS_SUCCESS;
1395+     return  true ;
14031396}
14041397
14051398rpc_server::~rpc_server () {
0 commit comments