Skip to content

Commit 064c369

Browse files
authored
Make global scope not thread-safe (#4850)
* Make global scope not thread-safe 1. It is no need to make global scope thread-safe, since it will be invoked in Python main thread. 2. Do not free the global scope when C++ exit. Let the OS free memories, otherwise, we need to handle the destroy dependencies. See https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables * Revert "FIX: Release CPU/GPU memory via deleter" This reverts commit 8f80f5b.
1 parent f43b1a9 commit 064c369

File tree

2 files changed

+20
-45
lines changed

2 files changed

+20
-45
lines changed

paddle/framework/scope.cc

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -65,16 +65,12 @@ void Scope::DropKids() {
6565
kids_.clear();
6666
}
6767

68-
std::once_flag feed_variable_flag;
69-
7068
framework::Scope& GetGlobalScope() {
71-
static std::unique_ptr<framework::Scope> g_scope{nullptr};
72-
std::call_once(feed_variable_flag, [&]() {
73-
g_scope.reset(new framework::Scope());
74-
g_scope->Var("feed_value");
75-
g_scope->Var("fetch_value");
76-
});
77-
return *(g_scope.get());
69+
static framework::Scope* g_scope = nullptr;
70+
if (g_scope == nullptr) {
71+
g_scope = new framework::Scope();
72+
}
73+
return *g_scope;
7874
}
7975

8076
} // namespace framework

paddle/memory/memory.cc

Lines changed: 15 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,6 @@ limitations under the License. */
1414

1515
#include "paddle/memory/memory.h"
1616

17-
#include <algorithm> // for transform
18-
#include <cstring> // for memcpy
19-
#include <memory> // for unique_ptr
20-
#include <mutex> // for call_once
21-
2217
#include "glog/logging.h"
2318

2419
#include "paddle/memory/detail/buddy_allocator.h"
@@ -32,19 +27,14 @@ namespace memory {
3227

3328
using BuddyAllocator = detail::BuddyAllocator;
3429

35-
std::once_flag cpu_allocator_flag;
36-
std::once_flag gpu_allocator_flag;
37-
3830
BuddyAllocator* GetCPUBuddyAllocator() {
39-
static std::unique_ptr<BuddyAllocator> a{nullptr};
40-
41-
std::call_once(cpu_allocator_flag, [&]() {
42-
a.reset(new BuddyAllocator(new detail::CPUAllocator,
43-
platform::CpuMinChunkSize(),
44-
platform::CpuMaxChunkSize()));
45-
});
46-
47-
return a.get();
31+
static detail::BuddyAllocator* a = nullptr;
32+
if (a == nullptr) {
33+
a = new detail::BuddyAllocator(new detail::CPUAllocator,
34+
platform::CpuMinChunkSize(),
35+
platform::CpuMaxChunkSize());
36+
}
37+
return a;
4838
}
4939

5040
template <>
@@ -65,35 +55,24 @@ size_t Used<platform::CPUPlace>(platform::CPUPlace place) {
6555
#ifdef PADDLE_WITH_CUDA
6656

6757
BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) {
68-
using BuddyAllocVec = std::vector<BuddyAllocator*>;
69-
static std::unique_ptr<BuddyAllocVec, void (*)(BuddyAllocVec * p)> as{
70-
new BuddyAllocVec, [](BuddyAllocVec* p) {
71-
std::for_each(p->begin(), p->end(),
72-
[](BuddyAllocator* p) { delete p; });
73-
}};
74-
75-
// GPU buddy allocators
76-
auto& allocators = *as.get();
77-
78-
// GPU buddy allocator initialization
79-
std::call_once(gpu_allocator_flag, [&]() {
58+
static BuddyAllocator** as = NULL;
59+
if (as == NULL) {
8060
int gpu_num = platform::GetCUDADeviceCount();
81-
allocators.reserve(gpu_num);
61+
as = new BuddyAllocator*[gpu_num];
8262
for (int gpu = 0; gpu < gpu_num; gpu++) {
8363
platform::SetDeviceId(gpu);
84-
allocators.emplace_back(new BuddyAllocator(new detail::GPUAllocator,
85-
platform::GpuMinChunkSize(),
86-
platform::GpuMaxChunkSize()));
64+
as[gpu] = new BuddyAllocator(new detail::GPUAllocator,
65+
platform::GpuMinChunkSize(),
66+
platform::GpuMaxChunkSize());
8767
}
8868
VLOG(3) << "\n\nNOTE: each GPU device use "
8969
<< FLAGS_fraction_of_gpu_memory_to_use * 100 << "% of GPU memory.\n"
9070
<< "You can set environment variable '"
9171
<< platform::kEnvFractionGpuMemoryToUse
9272
<< "' to change the fraction of GPU usage.\n\n";
93-
});
94-
73+
}
9574
platform::SetDeviceId(gpu_id);
96-
return allocators[gpu_id];
75+
return as[gpu_id];
9776
}
9877

9978
template <>

0 commit comments

Comments
 (0)