Skip to content

Commit 0c43a37

Browse files
authored
Fix cpplint errors with paddle/fluid/platform/gpu_info.* (#9710)
* Fix cpplint errors with paddle/fluid/platform/gpu_info.* * Update
1 parent 55ffcea commit 0c43a37

File tree

3 files changed

+9
-9
lines changed

3 files changed

+9
-9
lines changed

paddle/fluid/memory/memory.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ void* Alloc<platform::CUDAPlace>(platform::CUDAPlace place, size_t size) {
9595
int cur_dev = platform::GetCurrentDeviceId();
9696
platform::SetDeviceId(place.device);
9797
size_t avail, total;
98-
platform::GpuMemoryUsage(avail, total);
98+
platform::GpuMemoryUsage(&avail, &total);
9999
LOG(WARNING) << "Cannot allocate " << size << " bytes in GPU "
100100
<< place.device << ", available " << avail << " bytes";
101101
LOG(WARNING) << "total " << total;

paddle/fluid/platform/gpu_info.cc

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,9 @@ limitations under the License. */
1414

1515
#include "paddle/fluid/platform/gpu_info.h"
1616

17-
#include "gflags/gflags.h"
17+
#include <algorithm>
1818

19+
#include "gflags/gflags.h"
1920
#include "paddle/fluid/platform/enforce.h"
2021

2122
DEFINE_double(fraction_of_gpu_memory_to_use, 0.92,
@@ -77,16 +78,16 @@ void SetDeviceId(int id) {
7778
"cudaSetDevice failed in paddle::platform::SetDeviceId");
7879
}
7980

80-
void GpuMemoryUsage(size_t &available, size_t &total) {
81-
PADDLE_ENFORCE(cudaMemGetInfo(&available, &total),
81+
void GpuMemoryUsage(size_t *available, size_t *total) {
82+
PADDLE_ENFORCE(cudaMemGetInfo(available, total),
8283
"cudaMemGetInfo failed in paddle::platform::GetMemoryUsage");
8384
}
8485

8586
size_t GpuMaxAllocSize() {
8687
size_t total = 0;
8788
size_t available = 0;
8889

89-
GpuMemoryUsage(available, total);
90+
GpuMemoryUsage(&available, &total);
9091

9192
// Reserve the rest for page tables, etc.
9293
return static_cast<size_t>(total * FLAGS_fraction_of_gpu_memory_to_use);
@@ -101,7 +102,7 @@ size_t GpuMaxChunkSize() {
101102
size_t total = 0;
102103
size_t available = 0;
103104

104-
GpuMemoryUsage(available, total);
105+
GpuMemoryUsage(&available, &total);
105106
VLOG(10) << "GPU Usage " << available / 1024 / 1024 << "M/"
106107
<< total / 1024 / 1024 << "M";
107108
size_t reserving = static_cast<size_t>(0.05 * total);

paddle/fluid/platform/gpu_info.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,7 @@ namespace paddle {
2424
namespace platform {
2525

2626
//! Environment variable: fraction of GPU memory to use on each device.
27-
const std::string kEnvFractionGpuMemoryToUse =
28-
"PADDLE_FRACTION_GPU_MEMORY_TO_USE";
27+
const char kEnvFractionGpuMemoryToUse[] = "PADDLE_FRACTION_GPU_MEMORY_TO_USE";
2928

3029
//! Get the total number of GPU devices in system.
3130
int GetCUDADeviceCount();
@@ -46,7 +45,7 @@ int GetCurrentDeviceId();
4645
void SetDeviceId(int device_id);
4746

4847
//! Get the memory usage of current GPU device.
49-
void GpuMemoryUsage(size_t &available, size_t &total);
48+
void GpuMemoryUsage(size_t *available, size_t *total);
5049

5150
//! Get the maximum allocation size of current GPU device.
5251
size_t GpuMaxAllocSize();

0 commit comments

Comments
 (0)