Skip to content

Commit d6f67f2

Browse files
authored
Merge pull request #6409 from typhoonzero/change_release_version
Change release version
2 parents 36fcc95 + ac18580 commit d6f67f2

File tree

9 files changed

+200
-7
lines changed

9 files changed

+200
-7
lines changed

paddle/capi/Main.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,4 +43,11 @@ paddle_error paddle_init(int argc, char** argv) {
4343
isInit = true;
4444
return kPD_NO_ERROR;
4545
}
46+
47+
paddle_error paddle_init_thread() {
48+
if (FLAGS_use_gpu) {
49+
hl_init(FLAGS_gpu_id);
50+
}
51+
return kPD_NO_ERROR;
52+
}
4653
}

paddle/capi/Matrix.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ paddle_error paddle_matrix_destroy(paddle_matrix mat) {
4040
paddle_error paddle_matrix_set_row(paddle_matrix mat,
4141
uint64_t rowID,
4242
paddle_real* rowArray) {
43-
if (mat == nullptr) return kPD_NULLPTR;
43+
if (mat == nullptr || rowArray == nullptr) return kPD_NULLPTR;
4444
auto ptr = cast(mat);
4545
if (ptr->mat == nullptr) return kPD_NULLPTR;
4646
if (rowID >= ptr->mat->getHeight()) return kPD_OUT_OF_RANGE;

paddle/capi/error.cpp

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#include "error.h"
16+
17+
const char* paddle_error_string(paddle_error err) {
18+
switch (err) {
19+
case kPD_NULLPTR:
20+
return "nullptr error";
21+
case kPD_OUT_OF_RANGE:
22+
return "out of range error";
23+
case kPD_PROTOBUF_ERROR:
24+
return "protobuf error";
25+
case kPD_NOT_SUPPORTED:
26+
return "not supported error";
27+
case kPD_UNDEFINED_ERROR:
28+
return "undefined error";
29+
default:
30+
return "";
31+
}
32+
}

paddle/capi/error.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@ limitations under the License. */
1515
#ifndef __PADDLE_CAPI_ERROR_H__
1616
#define __PADDLE_CAPI_ERROR_H__
1717

18+
#include "config.h"
19+
1820
/**
1921
* Error Type for Paddle API.
2022
*/
@@ -27,4 +29,9 @@ typedef enum {
2729
kPD_UNDEFINED_ERROR = -1,
2830
} paddle_error;
2931

32+
/**
33+
* Error string for Paddle API.
34+
*/
35+
PD_API const char* paddle_error_string(paddle_error err);
36+
3037
#endif
Lines changed: 25 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,29 @@
11
project(multi_thread)
22
cmake_minimum_required(VERSION 2.8)
3-
aux_source_directory(. SRC_LIST)
4-
add_executable(${PROJECT_NAME} ${SRC_LIST})
3+
54
find_package (Threads)
5+
6+
if(NOT PADDLE_ROOT)
7+
set(PADDLE_ROOT $ENV{PADDLE_ROOT} CACHE PATH "Paddle Path")
8+
endif()
9+
if(PADDLE_ROOT)
10+
include_directories(${PADDLE_ROOT}/include)
11+
link_directories(${PADDLE_ROOT}/lib)
12+
endif()
13+
14+
set(CPU_SRCS main.c)
15+
add_executable(${PROJECT_NAME} ${CPU_SRCS})
616
set_property(TARGET ${PROJECT_NAME} PROPERTY C_STANDARD 99)
7-
target_link_libraries(${PROJECT_NAME} -lpaddle_capi_shared
8-
${CMAKE_THREAD_LIBS_INIT})
17+
target_link_libraries(${PROJECT_NAME}
18+
-lpaddle_capi_shared
19+
${CMAKE_THREAD_LIBS_INIT})
20+
21+
find_package(CUDA QUIET)
22+
if(CUDA_FOUND)
23+
set(GPU_SRCS main_gpu.c)
24+
cuda_add_executable(${PROJECT_NAME}_gpu ${GPU_SRCS})
25+
set_property(TARGET ${PROJECT_NAME}_gpu PROPERTY C_STANDARD 99)
26+
target_link_libraries(${PROJECT_NAME}_gpu
27+
-lpaddle_capi_shared
28+
${CMAKE_THREAD_LIBS_INIT})
29+
endif(CUDA_FOUND)
Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
#include <paddle/capi.h>
2+
#include <pthread.h>
3+
#include <time.h>
4+
#include "../common/common.h"
5+
6+
#define CONFIG_BIN "./trainer_config.bin"
7+
#define NUM_THREAD 4
8+
#define NUM_ITER 1000
9+
10+
pthread_mutex_t mutex;
11+
12+
/*
13+
* @brief It is an simple inference example that runs multi-threads on a GPU.
14+
* Each thread holds it own local gradient_machine but shares the same
15+
* parameters.
16+
* If you want to run on different GPUs, you need to launch
17+
* multi-processes or set trainer_count > 1.
18+
*/
19+
void* thread_main(void* gm_ptr) {
20+
// Initialize the thread environment of Paddle.
21+
CHECK(paddle_init_thread());
22+
23+
paddle_gradient_machine machine = (paddle_gradient_machine)(gm_ptr);
24+
// Create input arguments.
25+
paddle_arguments in_args = paddle_arguments_create_none();
26+
// Create input matrix.
27+
paddle_matrix mat = paddle_matrix_create(/* sample_num */ 1,
28+
/* size */ 784,
29+
/* useGPU */ true);
30+
// Create output arguments.
31+
paddle_arguments out_args = paddle_arguments_create_none();
32+
// Create output matrix.
33+
paddle_matrix prob = paddle_matrix_create_none();
34+
35+
// CPU buffer to cache the input and output.
36+
paddle_real* cpu_input = (paddle_real*)malloc(784 * sizeof(paddle_real));
37+
paddle_real* cpu_output = (paddle_real*)malloc(10 * sizeof(paddle_real));
38+
for (int iter = 0; iter < NUM_ITER; ++iter) {
39+
// There is only one input layer of this network.
40+
CHECK(paddle_arguments_resize(in_args, 1));
41+
CHECK(paddle_arguments_set_value(in_args, 0, mat));
42+
43+
for (int i = 0; i < 784; ++i) {
44+
cpu_input[i] = rand() / ((float)RAND_MAX);
45+
}
46+
CHECK(paddle_matrix_set_value(mat, cpu_input));
47+
48+
CHECK(paddle_gradient_machine_forward(machine,
49+
in_args,
50+
out_args,
51+
/* isTrain */ false));
52+
53+
CHECK(paddle_arguments_get_value(out_args, 0, prob));
54+
CHECK(paddle_matrix_get_value(prob, cpu_output));
55+
56+
pthread_mutex_lock(&mutex);
57+
printf("Prob: ");
58+
for (int i = 0; i < 10; ++i) {
59+
printf("%.2f ", cpu_output[i]);
60+
}
61+
printf("\n");
62+
pthread_mutex_unlock(&mutex);
63+
}
64+
65+
CHECK(paddle_matrix_destroy(prob));
66+
CHECK(paddle_arguments_destroy(out_args));
67+
CHECK(paddle_matrix_destroy(mat));
68+
CHECK(paddle_arguments_destroy(in_args));
69+
CHECK(paddle_gradient_machine_destroy(machine));
70+
71+
free(cpu_input);
72+
free(cpu_output);
73+
74+
return NULL;
75+
}
76+
77+
int main() {
78+
// Initalize Paddle
79+
char* argv[] = {"--use_gpu=True"};
80+
CHECK(paddle_init(1, (char**)argv));
81+
82+
// Reading config binary file. It is generated by `convert_protobin.sh`
83+
long size;
84+
void* buf = read_config(CONFIG_BIN, &size);
85+
86+
// Create a gradient machine for inference.
87+
paddle_gradient_machine machine;
88+
CHECK(paddle_gradient_machine_create_for_inference(&machine, buf, (int)size));
89+
CHECK(paddle_gradient_machine_randomize_param(machine));
90+
91+
// Loading parameter. Uncomment the following line and change the directory.
92+
// CHECK(paddle_gradient_machine_load_parameter_from_disk(machine,
93+
// "./some_where_to_params"));
94+
srand(time(0));
95+
pthread_mutex_init(&mutex, NULL);
96+
97+
pthread_t threads[NUM_THREAD];
98+
99+
for (int i = 0; i < NUM_THREAD; ++i) {
100+
paddle_gradient_machine thread_local_machine;
101+
CHECK(paddle_gradient_machine_create_shared_param(
102+
machine, buf, size, &thread_local_machine));
103+
pthread_create(&threads[i], NULL, thread_main, thread_local_machine);
104+
}
105+
106+
for (int i = 0; i < NUM_THREAD; ++i) {
107+
pthread_join(threads[i], NULL);
108+
}
109+
110+
pthread_mutex_destroy(&mutex);
111+
112+
return 0;
113+
}

paddle/capi/main.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,13 @@ extern "C" {
2626
*/
2727
PD_API paddle_error paddle_init(int argc, char** argv);
2828

29+
/**
30+
* Initialize the thread environment of Paddle.
31+
* @note it is requisite for GPU runs but optional for CPU runs.
32+
* For GPU runs, all threads will run on the same GPU devices.
33+
*/
34+
PD_API paddle_error paddle_init_thread();
35+
2936
#ifdef __cplusplus
3037
}
3138
#endif

python/CMakeLists.txt

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,12 @@ if(WITH_MKLDNN)
3333
list(APPEND MKL_DEPENDS mkldnn)
3434
endif()
3535

36+
if(WITH_GPU)
37+
SET(PACKAGE_NAME "paddlepaddle-gpu")
38+
else()
39+
SET(PACKAGE_NAME "paddlepaddle")
40+
endif()
41+
3642
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in
3743
${CMAKE_CURRENT_BINARY_DIR}/setup.py)
3844

python/setup.py.in

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ class BinaryDistribution(Distribution):
55
return True
66

77
MAJOR = 0
8-
MINOR = 10
8+
MINOR = 11
99
PATCH = 0
1010
RC = 0
1111
ISTAGED = False
@@ -89,7 +89,7 @@ paddle_rt_libs = ['${WARPCTC_LIBRARIES}']
8989
if '${MKL_SHARED_LIBS}'!= '':
9090
paddle_rt_libs += '${MKL_SHARED_LIBS}'.split(';')
9191

92-
setup(name='paddlepaddle',
92+
setup(name='${PACKAGE_NAME}',
9393
version='${PADDLE_VERSION}',
9494
description='Parallel Distributed Deep Learning',
9595
install_requires=setup_requires,

0 commit comments

Comments
 (0)