@@ -698,7 +698,7 @@ extern "C" {
698698
699699 GGML_API size_t ggml_used_mem (const struct ggml_context * ctx );
700700
701- GGML_API bool ggml_get_no_alloc (struct ggml_context * ctx );
701+ GGML_API bool ggml_get_no_alloc (const struct ggml_context * ctx );
702702 GGML_API void ggml_set_no_alloc (struct ggml_context * ctx , bool no_alloc );
703703
704704 GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx );
@@ -745,7 +745,7 @@ extern "C" {
745745 // Context tensor enumeration and lookup
746746 GGML_API struct ggml_tensor * ggml_get_first_tensor (const struct ggml_context * ctx );
747747 GGML_API struct ggml_tensor * ggml_get_next_tensor (const struct ggml_context * ctx , struct ggml_tensor * tensor );
748- GGML_API struct ggml_tensor * ggml_get_tensor (struct ggml_context * ctx , const char * name );
748+ GGML_API struct ggml_tensor * ggml_get_tensor (const struct ggml_context * ctx , const char * name );
749749
750750 // Converts a flat index into coordinates
751751 GGML_API void ggml_unravel_index (const struct ggml_tensor * tensor , int64_t i , int64_t * i0 , int64_t * i1 , int64_t * i2 , int64_t * i3 );
@@ -763,7 +763,7 @@ extern "C" {
763763 // Tensor flags
764764 GGML_API void ggml_set_input (struct ggml_tensor * tensor );
765765 GGML_API void ggml_set_output (struct ggml_tensor * tensor );
766- GGML_API void ggml_set_param (struct ggml_context * ctx , struct ggml_tensor * tensor );
766+ GGML_API void ggml_set_param (const struct ggml_context * ctx , struct ggml_tensor * tensor );
767767 GGML_API void ggml_set_loss (struct ggml_tensor * tensor );
768768
769769 //
@@ -927,13 +927,13 @@ extern "C" {
927927 GGML_API struct ggml_tensor * ggml_repeat (
928928 struct ggml_context * ctx ,
929929 struct ggml_tensor * a ,
930- struct ggml_tensor * b );
930+ const struct ggml_tensor * b );
931931
932932 // sums repetitions in a into shape of b
933933 GGML_API struct ggml_tensor * ggml_repeat_back (
934934 struct ggml_context * ctx ,
935935 struct ggml_tensor * a ,
936- struct ggml_tensor * b );
936+ const struct ggml_tensor * b );
937937
938938 // concat a and b along dim
939939 // used in stable-diffusion
@@ -1243,7 +1243,7 @@ extern "C" {
12431243 GGML_API struct ggml_tensor * ggml_reshape (
12441244 struct ggml_context * ctx ,
12451245 struct ggml_tensor * a ,
1246- struct ggml_tensor * b );
1246+ const struct ggml_tensor * b );
12471247
12481248 // return view(a)
12491249 // TODO: when we start computing gradient, make a copy instead of view
@@ -1335,7 +1335,7 @@ extern "C" {
13351335 struct ggml_context * ctx ,
13361336 struct ggml_tensor * a , // gradients of ggml_get_rows result
13371337 struct ggml_tensor * b , // row indices
1338- struct ggml_tensor * c ); // data for ggml_get_rows, only used for its shape
1338+ const struct ggml_tensor * c ); // data for ggml_get_rows, only used for its shape
13391339
13401340 GGML_API struct ggml_tensor * ggml_diag (
13411341 struct ggml_context * ctx ,
@@ -1563,7 +1563,7 @@ extern "C" {
15631563 struct ggml_context * ctx ,
15641564 struct ggml_tensor * a , // convolution kernel
15651565 struct ggml_tensor * b , // gradient of im2col output
1566- int64_t * ne , // shape of im2col input
1566+ const int64_t * ne , // shape of im2col input
15671567 int s0 , // stride dimension 0
15681568 int s1 , // stride dimension 1
15691569 int p0 , // padding dimension 0
@@ -2062,15 +2062,16 @@ extern "C" {
20622062 // graph allocation in a context
20632063 GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx ); // size = GGML_DEFAULT_GRAPH_SIZE, grads = false
20642064 GGML_API struct ggml_cgraph * ggml_new_graph_custom (struct ggml_context * ctx , size_t size , bool grads );
2065- GGML_API struct ggml_cgraph * ggml_graph_dup (struct ggml_context * ctx , struct ggml_cgraph * cgraph );
2066- GGML_API void ggml_graph_cpy (struct ggml_cgraph * src , struct ggml_cgraph * dst );
2067- GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph ); // set regular grads + optimizer momenta to 0, set loss grad to 1
2065+ GGML_API struct ggml_cgraph * ggml_graph_dup (struct ggml_context * ctx , const struct ggml_cgraph * cgraph );
2066+ GGML_API void ggml_graph_cpy (const struct ggml_cgraph * src , struct ggml_cgraph * dst );
2067+ GGML_API void ggml_graph_reset (
2068+ const struct ggml_cgraph * cgraph ); // set regular grads + optimizer momenta to 0, set loss grad to 1
20682069 GGML_API void ggml_graph_clear (struct ggml_cgraph * cgraph );
20692070
2070- GGML_API int ggml_graph_size (struct ggml_cgraph * cgraph );
2071- GGML_API struct ggml_tensor * ggml_graph_node (struct ggml_cgraph * cgraph , int i ); // if i < 0, returns nodes[n_nodes + i]
2072- GGML_API struct ggml_tensor * * ggml_graph_nodes (struct ggml_cgraph * cgraph );
2073- GGML_API int ggml_graph_n_nodes (struct ggml_cgraph * cgraph );
2071+ GGML_API int ggml_graph_size (const struct ggml_cgraph * cgraph );
2072+ GGML_API struct ggml_tensor * ggml_graph_node (const struct ggml_cgraph * cgraph , int i ); // if i < 0, returns nodes[n_nodes + i]
2073+ GGML_API struct ggml_tensor * * ggml_graph_nodes (const struct ggml_cgraph * cgraph );
2074+ GGML_API int ggml_graph_n_nodes (const struct ggml_cgraph * cgraph );
20742075
20752076 GGML_API void ggml_graph_add_node (struct ggml_cgraph * cgraph , struct ggml_tensor * tensor );
20762077
0 commit comments