Skip to content

Commit b9995a7

Browse files
committed
Merge branch 'windows/build' into windows/online
test=develop
2 parents eb57780 + abbf1eb commit b9995a7

File tree

6 files changed

+2
-34
lines changed

6 files changed

+2
-34
lines changed

paddle/fluid/framework/lod_tensor.cc

Lines changed: 1 addition & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,8 @@ limitations under the License. */
2626
#include "paddle/fluid/memory/memcpy.h"
2727
#include "paddle/fluid/memory/memory.h"
2828

29-
#if !defined(_WIN32)
3029
#include "paddle/fluid/recordio/scanner.h"
3130
#include "paddle/fluid/recordio/writer.h"
32-
#endif // _WIN32
3331

3432
namespace paddle {
3533
namespace framework {
@@ -305,7 +303,6 @@ void DeserializeFromStream(std::istream &is, LoDTensor *tensor,
305303
TensorFromStream(is, static_cast<Tensor *>(tensor), dev_ctx);
306304
}
307305

308-
#if !defined(_WIN32)
309306
void WriteToRecordIO(recordio::Writer *writer,
310307
const std::vector<LoDTensor> &tensor,
311308
const platform::DeviceContext &dev_ctx) {
@@ -335,19 +332,7 @@ bool ReadFromRecordIO(recordio::Scanner *scanner,
335332

336333
return true;
337334
}
338-
#else
339-
class Writer {};
340-
class Scanner {};
341-
void WriteToRecordIO(recordio::Writer *writer,
342-
const std::vector<LoDTensor> &tensor,
343-
const platform::DeviceContext &dev_ctx) {}
344-
bool ReadFromRecordIO(recordio::Scanner *scanner,
345-
const platform::DeviceContext &dev_ctx,
346-
std::vector<LoDTensor> *result_ptr) {
347-
PADDLE_ENFORCE("windows didn't supported recordio!.");
348-
return true;
349-
}
350-
#endif // _WIN32
335+
351336
std::vector<LoDTensor> LoDTensor::SplitLoDTensor(
352337
const std::vector<platform::Place> places) const {
353338
check_memory_size();

paddle/fluid/framework/lod_tensor_test.cc

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,6 @@ TEST(LoD, ConvertToOffsetBasedLoD) {
274274
EXPECT_EQ(offset_lod, expected);
275275
}
276276

277-
#if !defined(_WIN32)
278277
template <typename T>
279278
static void TestRecordIO() {
280279
LoDTensor tensor;
@@ -321,7 +320,6 @@ TEST(LoDTensor, RecordIO) {
321320
TestRecordIO<float>();
322321
TestRecordIO<double>();
323322
}
324-
#endif // !defined(_WIN32)
325323

326324
} // namespace framework
327325
} // namespace paddle

paddle/fluid/framework/operator.cc

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -156,14 +156,11 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
156156
// The profile has a process-wide mutex, results in serious performance issue
157157
// in concurrency scenerio. Here use an `if` to fix this issue.
158158
// Please not remove the `if`, ask @Superjomn if there are any concern.
159-
#ifndef _WIN32
160159
if (platform::IsProfileEnabled()) {
161160
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
162161
platform::RecordEvent record_event(Type(), pool.Get(place));
163162
RunImpl(scope, place);
164-
} else
165-
#endif
166-
{
163+
} else {
167164
RunImpl(scope, place);
168165
}
169166
VLOG(30) << place << " " << DebugStringEx(&scope);

paddle/fluid/inference/api/analysis_predictor.cc

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -56,15 +56,13 @@ bool AnalysisPredictor::Init(
5656
const std::shared_ptr<framework::Scope> &parent_scope,
5757
const std::shared_ptr<framework::ProgramDesc> &program) {
5858
VLOG(30) << "Predictor::init()";
59-
#if !defined(_WIN32)
6059
if (FLAGS_profile) {
6160
LOG(WARNING) << "Profiler is actived, might affect the performance";
6261
LOG(INFO) << "You can turn off by set gflags '-profile false'";
6362
auto tracking_device = config_.use_gpu ? platform::ProfilerState::kAll
6463
: platform::ProfilerState::kCPU;
6564
platform::EnableProfiler(tracking_device);
6665
}
67-
#endif
6866

6967
// no matter with or without MKLDNN
7068
paddle::platform::SetNumThreads(FLAGS_paddle_num_threads);
@@ -501,12 +499,10 @@ bool AnalysisPredictor::LoadParameters() {
501499
}
502500

503501
AnalysisPredictor::~AnalysisPredictor() {
504-
#if !defined(_WIN32)
505502
if (FLAGS_profile) {
506503
platform::DisableProfiler(platform::EventSortingKey::kTotal,
507504
"./profile.log");
508505
}
509-
#endif
510506
if (sub_scope_) {
511507
scope_->DeleteScope(sub_scope_);
512508
}

paddle/fluid/inference/api/api_impl.cc

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,6 @@ void NativePaddlePredictor::PrepareFeedFetch() {
6464
bool NativePaddlePredictor::Init(
6565
std::shared_ptr<framework::Scope> parent_scope) {
6666
VLOG(3) << "Predictor::init()";
67-
#if !defined(_WIN32)
6867
if (FLAGS_profile) {
6968
LOG(WARNING) << "Profiler is actived, might affect the performance";
7069
LOG(INFO) << "You can turn off by set gflags '-profile false'";
@@ -73,7 +72,6 @@ bool NativePaddlePredictor::Init(
7372
: platform::ProfilerState::kCPU;
7473
platform::EnableProfiler(tracking_device);
7574
}
76-
#endif
7775

7876
// no matter with or without MKLDNN
7977
paddle::platform::SetNumThreads(FLAGS_paddle_num_threads);
@@ -121,12 +119,10 @@ bool NativePaddlePredictor::Init(
121119
}
122120

123121
NativePaddlePredictor::~NativePaddlePredictor() {
124-
#if !defined(_WIN32)
125122
if (FLAGS_profile) {
126123
platform::DisableProfiler(platform::EventSortingKey::kTotal,
127124
"./profile.log");
128125
}
129-
#endif
130126
if (sub_scope_) {
131127
scope_->DeleteScope(sub_scope_);
132128
}

paddle/fluid/inference/tests/api/tester_helper.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -177,11 +177,9 @@ void TestOneThreadPrediction(
177177
warmup_timer.tic();
178178
predictor->Run(inputs[0], outputs, batch_size);
179179
PrintTime(batch_size, 1, 1, 0, warmup_timer.toc(), 1);
180-
#if !defined(_WIN32)
181180
if (FLAGS_profile) {
182181
paddle::platform::ResetProfiler();
183182
}
184-
#endif
185183
}
186184

187185
LOG(INFO) << "Run " << num_times << " times...";
@@ -230,11 +228,9 @@ void TestMultiThreadPrediction(
230228
warmup_timer.tic();
231229
predictor->Run(inputs[0], outputs, batch_size);
232230
PrintTime(batch_size, 1, num_threads, tid, warmup_timer.toc(), 1);
233-
#if !defined(_WIN32)
234231
if (FLAGS_profile) {
235232
paddle::platform::ResetProfiler();
236233
}
237-
#endif
238234
}
239235

240236
LOG(INFO) << "Thread " << tid << " run " << num_times << " times...";

0 commit comments

Comments
 (0)