Skip to content

Commit 3423022

Browse files
authored
feature/add print op (#6799)
1 parent b4d1811 commit 3423022

File tree

4 files changed

+284
-1
lines changed

4 files changed

+284
-1
lines changed

paddle/operators/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,7 @@ op_library(detection_output_op DEPS softmax)
135135
op_library(sequence_softmax_op DEPS softmax)
136136
op_library(sum_op DEPS selected_rows_functor)
137137
op_library(sgd_op DEPS selected_rows_functor)
138+
op_library(print_op DEPS lod_tensor)
138139
op_library(adagrad_op DEPS selected_rows_functor)
139140
op_library(conv_op DEPS vol2col)
140141
op_library(pool_op DEPS pooling)

paddle/operators/print_op.cc

Lines changed: 206 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,206 @@
1+
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#include <algorithm>
16+
#include <ctime>
17+
18+
#include "paddle/framework/op_registry.h"
19+
20+
namespace paddle {
21+
namespace operators {
22+
23+
#define CLOG std::cout
24+
25+
struct Formater {
26+
std::string message;
27+
std::string name;
28+
std::vector<int> dims;
29+
std::type_index dtype{typeid(char)};
30+
framework::LoD lod;
31+
int summarize;
32+
void* data{nullptr};
33+
34+
void operator()(size_t size) {
35+
PrintMessage();
36+
PrintName();
37+
PrintDims();
38+
PrintDtype();
39+
PrintLod();
40+
PrintData(size);
41+
}
42+
43+
private:
44+
void PrintMessage() { CLOG << std::time(nullptr) << "\t" << message; }
45+
void PrintName() {
46+
if (!name.empty()) {
47+
CLOG << "Tensor[" << name << "]" << std::endl;
48+
}
49+
}
50+
void PrintDims() {
51+
if (!dims.empty()) {
52+
CLOG << "\tshape: [";
53+
for (auto i : dims) {
54+
CLOG << i << ",";
55+
}
56+
CLOG << "]" << std::endl;
57+
}
58+
}
59+
void PrintDtype() {
60+
if (dtype.hash_code() != typeid(char).hash_code()) {
61+
CLOG << "\tdtype: " << dtype.name() << std::endl;
62+
}
63+
}
64+
void PrintLod() {
65+
if (!lod.empty()) {
66+
CLOG << "\tLoD: [";
67+
for (auto level : lod) {
68+
CLOG << "[ ";
69+
for (auto i : level) {
70+
CLOG << i << ",";
71+
}
72+
CLOG << " ]";
73+
}
74+
CLOG << "]" << std::endl;
75+
}
76+
}
77+
78+
void PrintData(size_t size) {
79+
PADDLE_ENFORCE_NOT_NULL(data);
80+
// print float
81+
if (dtype.hash_code() == typeid(float).hash_code()) {
82+
Display<float>(size);
83+
}
84+
if (dtype.hash_code() == typeid(double).hash_code()) {
85+
Display<double>(size);
86+
}
87+
if (dtype.hash_code() == typeid(int).hash_code()) {
88+
Display<int>(size);
89+
}
90+
if (dtype.hash_code() == typeid(int64_t).hash_code()) {
91+
Display<int64_t>(size);
92+
}
93+
}
94+
95+
template <typename T>
96+
void Display(size_t size) {
97+
auto* d = (T*)data;
98+
CLOG << "\tdata: ";
99+
if (summarize != -1) {
100+
summarize = std::min(size, (size_t)summarize);
101+
for (int i = 0; i < summarize; i++) {
102+
CLOG << d[i] << ",";
103+
}
104+
} else {
105+
for (size_t i = 0; i < size; i++) {
106+
CLOG << d[i] << ",";
107+
}
108+
}
109+
CLOG << std::endl;
110+
}
111+
};
112+
113+
// TODO(ChunweiYan) there should be some other printers for TensorArray
114+
class TensorPrintOp : public framework::OperatorBase {
115+
public:
116+
TensorPrintOp(const std::string& type,
117+
const framework::VariableNameMap& inputs,
118+
const framework::VariableNameMap& outputs,
119+
const framework::AttributeMap& attrs)
120+
: OperatorBase(type, inputs, outputs, attrs) {}
121+
122+
TensorPrintOp(const TensorPrintOp& o)
123+
: framework::OperatorBase(
124+
static_cast<const framework::OperatorBase&>(o)) {
125+
PADDLE_THROW("Not implemented");
126+
}
127+
128+
void Run(const framework::Scope& scope,
129+
const platform::Place& place) const override {
130+
// Only run the `first_n` times.
131+
int first_n = Attr<int>("first_n");
132+
if (first_n > 0 && ++times_ > first_n) return;
133+
134+
PADDLE_ENFORCE(!Inputs("input").empty(), "input should be set");
135+
auto* input_var = scope.FindVar(Input("input"));
136+
PADDLE_ENFORCE_NOT_NULL(input_var);
137+
auto& tensor = input_var->Get<framework::LoDTensor>();
138+
139+
// TODO(ChunweiYan) support GPU
140+
PADDLE_ENFORCE(platform::is_cpu_place(tensor.place()));
141+
142+
Formater formater;
143+
if (Attr<bool>("print_tensor_name")) {
144+
formater.name = Inputs("input").front();
145+
}
146+
if (Attr<bool>("print_tensor_type")) {
147+
formater.dtype = tensor.type();
148+
}
149+
if (Attr<bool>("print_tensor_shape")) {
150+
formater.dims.assign(tensor.dims()[0],
151+
tensor.dims()[tensor.dims().size() - 1]);
152+
}
153+
if (Attr<bool>("print_tensor_lod")) {
154+
formater.lod = tensor.lod();
155+
}
156+
formater.summarize = Attr<int>("summarize");
157+
formater.data = (void*)tensor.data<void>();
158+
formater(tensor.numel());
159+
}
160+
161+
private:
162+
mutable int times_{0};
163+
};
164+
165+
class PrintOpProtoAndCheckMaker : public framework::OpProtoAndCheckerMaker {
166+
public:
167+
PrintOpProtoAndCheckMaker(OpProto* proto, OpAttrChecker* op_checker)
168+
: OpProtoAndCheckerMaker(proto, op_checker) {
169+
AddInput("input", "the tensor that will be displayed.");
170+
AddAttr<int>("first_n", "Only log `first_n` number of times.");
171+
AddAttr<std::string>("message", "A string message to print as a prefix.");
172+
AddAttr<int>("summarize", "Print this number of elements in the tensor.");
173+
AddAttr<bool>("print_tensor_name", "Whether to print the tensor name.");
174+
AddAttr<bool>("print_tensor_type", "Whether to print the tensor's dtype.");
175+
AddAttr<bool>("print_tensor_shape", "Whether to print the tensor's shape.");
176+
AddAttr<bool>("print_tensor_lod", "Whether to print the tensor's lod.");
177+
AddComment(R"DOC(
178+
Creates a print op that will print when a tensor is accessed.
179+
180+
Wraps the tensor passed in so that whenever that a tensor is accessed,
181+
the message `message` is printed, along with the current value of the
182+
tensor `t`.)DOC");
183+
}
184+
};
185+
186+
class InferShape : public framework::InferShapeBase {
187+
public:
188+
void operator()(framework::InferShapeContext* context) const override {
189+
PADDLE_ENFORCE(context->HasInput("input"), "input should be set");
190+
}
191+
};
192+
193+
class InferVarType : public framework::VarTypeInference {
194+
public:
195+
void operator()(const framework::OpDesc& op_desc,
196+
framework::BlockDesc* block) const override {}
197+
};
198+
199+
} // namespace operators
200+
} // namespace paddle
201+
202+
REGISTER_OPERATOR(print, paddle::operators::TensorPrintOp,
203+
paddle::operators::PrintOpProtoAndCheckMaker,
204+
paddle::operators::InferShape,
205+
paddle::operators::InferVarType,
206+
paddle::framework::EmptyGradOpMaker);

python/paddle/v2/fluid/layers/control_flow.py

Lines changed: 56 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
'array_to_lod_tensor', 'increment', 'array_write', 'create_array',
1313
'less_than', 'array_read', 'shrink_memory', 'array_length', 'IfElse',
1414
'DynamicRNN', 'ConditionalBlock', 'StaticRNN', 'reorder_lod_tensor_by_rank',
15-
'ParallelDo'
15+
'ParallelDo', 'Print'
1616
]
1717

1818

@@ -110,6 +110,61 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0):
110110
return out
111111

112112

113+
def Print(input,
114+
first_n=-1,
115+
message=None,
116+
summarize=-1,
117+
print_tensor_name=True,
118+
print_tensor_type=True,
119+
print_tensor_shape=True,
120+
print_tensor_lod=True):
121+
'''
122+
**Print operator**
123+
124+
This creates a print op that will print when a tensor is accessed.
125+
126+
Wraps the tensor passed in so that whenever that a tensor is accessed,
127+
the message `message` is printed, along with the current value of the
128+
tensor `t`.
129+
130+
Args:
131+
input(Variable): A Tensor to print.
132+
summarize(int): Print this number of elements in the tensor, will print all
133+
if left negative.
134+
message(str): A string message to print as a prefix.
135+
first_n(int): Only log `first_n` number of times.
136+
print_tensor_name(bool): Print the tensor name.
137+
print_tensor_type(bool): Print the tensor type.
138+
print_tensor_shape(bool): Print the tensor shape.
139+
print_tensor_lod(bool): Print the tensor lod.
140+
141+
Returns:
142+
None
143+
144+
Examples:
145+
.. code-block:: python
146+
147+
value = some_layer(...)
148+
Print(value, summarize=10,
149+
message="The content of some_layer: ")
150+
'''
151+
helper = LayerHelper('print', **locals())
152+
out = helper.create_tmp_variable(dtype='int32')
153+
helper.append_op(
154+
type='print',
155+
inputs={'input': input},
156+
attrs={
157+
'first_n': first_n,
158+
'summarize': summarize,
159+
'message': message or "",
160+
'print_tensor_name': print_tensor_name,
161+
'print_tensor_type': print_tensor_type,
162+
'print_tensor_shape': print_tensor_shape,
163+
'print_tensor_lod': print_tensor_lod,
164+
})
165+
return out
166+
167+
113168
class BlockGuard(object):
114169
"""
115170
BlockGuard class.
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import unittest
2+
import numpy as np
3+
from paddle.v2.fluid.executor import Executor
4+
import paddle.v2.fluid.core as core
5+
import paddle.v2.fluid.layers as pd
6+
7+
8+
class TestSumOp(unittest.TestCase):
9+
def test_tensor(self):
10+
i = pd.zeros(shape=[2, 10], dtype='float32')
11+
12+
pd.Print(i, message="I am a message", summarize=10)
13+
14+
cpu = core.CPUPlace()
15+
exe = Executor(cpu)
16+
17+
exe.run()
18+
19+
20+
if __name__ == '__main__':
21+
unittest.main()

0 commit comments

Comments
 (0)