@@ -2018,15 +2018,11 @@ struct ggml_context {
20182018 void * mem_buffer;
20192019 bool mem_buffer_owned;
20202020 bool no_alloc;
2021- bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers
20222021
20232022 int n_objects;
20242023
20252024 struct ggml_object * objects_begin;
20262025 struct ggml_object * objects_end;
2027-
2028- struct ggml_scratch scratch;
2029- struct ggml_scratch scratch_save;
20302026};
20312027
20322028struct ggml_context_container {
@@ -3879,12 +3875,9 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
38793875 /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : ggml_aligned_malloc(mem_size),
38803876 /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
38813877 /*.no_alloc =*/ params.no_alloc,
3882- /*.no_alloc_save =*/ params.no_alloc,
38833878 /*.n_objects =*/ 0,
38843879 /*.objects_begin =*/ NULL,
38853880 /*.objects_end =*/ NULL,
3886- /*.scratch =*/ { 0, 0, NULL, },
3887- /*.scratch_save =*/ { 0, 0, NULL, },
38883881 };
38893882
38903883 GGML_ASSERT(ctx->mem_buffer != NULL);
@@ -3904,8 +3897,6 @@ void ggml_reset(struct ggml_context * ctx) {
39043897 ctx->n_objects = 0;
39053898 ctx->objects_begin = NULL;
39063899 ctx->objects_end = NULL;
3907- ctx->scratch = (struct ggml_scratch) { 0, 0, NULL, };
3908- ctx->scratch_save = (struct ggml_scratch) { 0, 0, NULL, };
39093900}
39103901
39113902void ggml_free(struct ggml_context * ctx) {
@@ -3924,14 +3915,6 @@ size_t ggml_used_mem(const struct ggml_context * ctx) {
39243915 return ctx->objects_end == NULL ? 0 : ctx->objects_end->offs + ctx->objects_end->size;
39253916}
39263917
3927- size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
3928- const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
3929-
3930- ctx->scratch = scratch;
3931-
3932- return result;
3933- }
3934-
39353918bool ggml_get_no_alloc(struct ggml_context * ctx) {
39363919 return ctx->no_alloc;
39373920}
@@ -3959,27 +3942,6 @@ size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) {
39593942 return max_size;
39603943}
39613944
3962- // IMPORTANT:
3963- // when creating "opt" tensors, always save and load the scratch buffer
3964- // this is an error prone process, but it is necessary to support inplace
3965- // operators when using scratch buffers
3966- // TODO: implement a better way
3967- static void ggml_scratch_save(struct ggml_context * ctx) {
3968- // this is needed to allow opt tensors to store their data
3969- // TODO: again, need to find a better way
3970- ctx->no_alloc_save = ctx->no_alloc;
3971- ctx->no_alloc = false;
3972-
3973- ctx->scratch_save = ctx->scratch;
3974- ctx->scratch.data = NULL;
3975- }
3976-
3977- static void ggml_scratch_load(struct ggml_context * ctx) {
3978- ctx->no_alloc = ctx->no_alloc_save;
3979-
3980- ctx->scratch = ctx->scratch_save;
3981- }
3982-
39833945////////////////////////////////////////////////////////////////////////////////
39843946
39853947static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml_object_type type, size_t size) {
@@ -4060,29 +4022,13 @@ static struct ggml_tensor * ggml_new_tensor_impl(
40604022 size_t obj_alloc_size = 0;
40614023
40624024 if (view_src == NULL && !ctx->no_alloc) {
4063- if (ctx->scratch.data != NULL) {
4064- // allocate tensor data in the scratch buffer
4065- if (ctx->scratch.offs + data_size > ctx->scratch.size) {
4066- GGML_LOG_WARN("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n",
4067- __func__, ctx->scratch.offs + data_size, ctx->scratch.size);
4068- assert(false);
4069- return NULL;
4070- }
4071-
4072- data = (char * const) ctx->scratch.data + ctx->scratch.offs;
4073-
4074- ctx->scratch.offs += data_size;
4075- } else {
4076- // allocate tensor data in the context's memory pool
4077- obj_alloc_size = data_size;
4078- }
4025+ // allocate tensor data in the context's memory pool
4026+ obj_alloc_size = data_size;
40794027 }
40804028
40814029 struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TYPE_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size);
40824030 GGML_ASSERT(obj_new);
40834031
4084- // TODO: for recoverable errors, we would need to free the data allocated from the scratch buffer here
4085-
40864032 struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs);
40874033
40884034#ifdef __clang__
@@ -4178,24 +4124,16 @@ struct ggml_tensor * ggml_new_tensor_4d(
41784124}
41794125
41804126struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
4181- ggml_scratch_save(ctx);
4182-
41834127 struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
41844128
4185- ggml_scratch_load(ctx);
4186-
41874129 ggml_set_i32(result, value);
41884130
41894131 return result;
41904132}
41914133
41924134struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
4193- ggml_scratch_save(ctx);
4194-
41954135 struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
41964136
4197- ggml_scratch_load(ctx);
4198-
41994137 ggml_set_f32(result, value);
42004138
42014139 return result;
@@ -20263,7 +20201,6 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
2026320201 uint64_t size_eval = 0;
2026420202
2026520203 // compute size of intermediate results
20266- // TODO: does not take into account scratch buffers !!!!
2026720204 for (int i = 0; i < cgraph->n_nodes; ++i) {
2026820205 size_eval += ggml_nbytes_pad(cgraph->nodes[i]);
2026920206 }
0 commit comments