@@ -239,24 +239,30 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops,
239
239
return ret ;
240
240
}
241
241
242
- void umfPoolDestroy (umf_memory_pool_handle_t hPool ) {
242
+ umf_result_t umfPoolDestroy (umf_memory_pool_handle_t hPool ) {
243
243
if (umf_ba_global_is_destroyed ()) {
244
- return ;
244
+ return UMF_RESULT_ERROR_UNKNOWN ;
245
245
}
246
246
247
- hPool -> ops .finalize (hPool -> pool_priv );
247
+ umf_result_t ret = hPool -> ops .finalize (hPool -> pool_priv );
248
248
249
249
umf_memory_provider_handle_t hUpstreamProvider = NULL ;
250
250
umfPoolGetMemoryProvider (hPool , & hUpstreamProvider );
251
251
252
252
if (!(hPool -> flags & UMF_POOL_CREATE_FLAG_DISABLE_TRACKING )) {
253
253
// Destroy tracking provider.
254
- umfMemoryProviderDestroy (hPool -> provider );
254
+ umf_result_t ret2 = umfMemoryProviderDestroy (hPool -> provider );
255
+ if (ret == UMF_RESULT_SUCCESS ) {
256
+ ret = ret2 ;
257
+ }
255
258
}
256
259
257
260
if (hPool -> flags & UMF_POOL_CREATE_FLAG_OWN_PROVIDER ) {
258
261
// Destroy associated memory provider.
259
- umfMemoryProviderDestroy (hUpstreamProvider );
262
+ umf_result_t ret2 = umfMemoryProviderDestroy (hUpstreamProvider );
263
+ if (ret == UMF_RESULT_SUCCESS ) {
264
+ ret = ret2 ;
265
+ }
260
266
}
261
267
262
268
utils_mutex_destroy_not_free (& hPool -> lock );
@@ -265,6 +271,7 @@ void umfPoolDestroy(umf_memory_pool_handle_t hPool) {
265
271
266
272
// TODO: this free keeps memory in base allocator, so it can lead to OOM in some scenarios (it should be optimized)
267
273
umf_ba_global_free (hPool );
274
+ return ret ;
268
275
}
269
276
270
277
umf_result_t umfFree (void * ptr ) {
0 commit comments