|
| 1 | +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. |
| 2 | +
|
| 3 | + Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | + you may not use this file except in compliance with the License. |
| 5 | + You may obtain a copy of the License at |
| 6 | +
|
| 7 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +
|
| 9 | + Unless required by applicable law or agreed to in writing, software |
| 10 | + distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | + See the License for the specific language governing permissions and |
| 13 | + limitations under the License. */ |
| 14 | + |
| 15 | +#include <algorithm> |
| 16 | +#include <ctime> |
| 17 | + |
| 18 | +#include "paddle/framework/op_registry.h" |
| 19 | + |
| 20 | +namespace paddle { |
| 21 | +namespace operators { |
| 22 | + |
| 23 | +#define CLOG std::cout |
| 24 | + |
| 25 | +struct Formater { |
| 26 | + std::string message; |
| 27 | + std::string name; |
| 28 | + std::vector<int> dims; |
| 29 | + std::type_index dtype{typeid(char)}; |
| 30 | + framework::LoD lod; |
| 31 | + int summarize; |
| 32 | + void* data{nullptr}; |
| 33 | + |
| 34 | + void operator()(size_t size) { |
| 35 | + PrintMessage(); |
| 36 | + PrintName(); |
| 37 | + PrintDims(); |
| 38 | + PrintDtype(); |
| 39 | + PrintLod(); |
| 40 | + PrintData(size); |
| 41 | + } |
| 42 | + |
| 43 | + private: |
| 44 | + void PrintMessage() { CLOG << std::time(nullptr) << "\t" << message; } |
| 45 | + void PrintName() { |
| 46 | + if (!name.empty()) { |
| 47 | + CLOG << "Tensor[" << name << "]" << std::endl; |
| 48 | + } |
| 49 | + } |
| 50 | + void PrintDims() { |
| 51 | + if (!dims.empty()) { |
| 52 | + CLOG << "\tshape: ["; |
| 53 | + for (auto i : dims) { |
| 54 | + CLOG << i << ","; |
| 55 | + } |
| 56 | + CLOG << "]" << std::endl; |
| 57 | + } |
| 58 | + } |
| 59 | + void PrintDtype() { |
| 60 | + if (dtype.hash_code() != typeid(char).hash_code()) { |
| 61 | + CLOG << "\tdtype: " << dtype.name() << std::endl; |
| 62 | + } |
| 63 | + } |
| 64 | + void PrintLod() { |
| 65 | + if (!lod.empty()) { |
| 66 | + CLOG << "\tLoD: ["; |
| 67 | + for (auto level : lod) { |
| 68 | + CLOG << "[ "; |
| 69 | + for (auto i : level) { |
| 70 | + CLOG << i << ","; |
| 71 | + } |
| 72 | + CLOG << " ]"; |
| 73 | + } |
| 74 | + CLOG << "]" << std::endl; |
| 75 | + } |
| 76 | + } |
| 77 | + |
| 78 | + void PrintData(size_t size) { |
| 79 | + PADDLE_ENFORCE_NOT_NULL(data); |
| 80 | + // print float |
| 81 | + if (dtype.hash_code() == typeid(float).hash_code()) { |
| 82 | + Display<float>(size); |
| 83 | + } |
| 84 | + if (dtype.hash_code() == typeid(double).hash_code()) { |
| 85 | + Display<double>(size); |
| 86 | + } |
| 87 | + if (dtype.hash_code() == typeid(int).hash_code()) { |
| 88 | + Display<int>(size); |
| 89 | + } |
| 90 | + if (dtype.hash_code() == typeid(int64_t).hash_code()) { |
| 91 | + Display<int64_t>(size); |
| 92 | + } |
| 93 | + } |
| 94 | + |
| 95 | + template <typename T> |
| 96 | + void Display(size_t size) { |
| 97 | + auto* d = (T*)data; |
| 98 | + CLOG << "\tdata: "; |
| 99 | + if (summarize != -1) { |
| 100 | + summarize = std::min(size, (size_t)summarize); |
| 101 | + for (int i = 0; i < summarize; i++) { |
| 102 | + CLOG << d[i] << ","; |
| 103 | + } |
| 104 | + } else { |
| 105 | + for (size_t i = 0; i < size; i++) { |
| 106 | + CLOG << d[i] << ","; |
| 107 | + } |
| 108 | + } |
| 109 | + CLOG << std::endl; |
| 110 | + } |
| 111 | +}; |
| 112 | + |
| 113 | +// TODO(ChunweiYan) there should be some other printers for TensorArray |
| 114 | +class TensorPrintOp : public framework::OperatorBase { |
| 115 | + public: |
| 116 | + TensorPrintOp(const std::string& type, |
| 117 | + const framework::VariableNameMap& inputs, |
| 118 | + const framework::VariableNameMap& outputs, |
| 119 | + const framework::AttributeMap& attrs) |
| 120 | + : OperatorBase(type, inputs, outputs, attrs) {} |
| 121 | + |
| 122 | + TensorPrintOp(const TensorPrintOp& o) |
| 123 | + : framework::OperatorBase( |
| 124 | + static_cast<const framework::OperatorBase&>(o)) { |
| 125 | + PADDLE_THROW("Not implemented"); |
| 126 | + } |
| 127 | + |
| 128 | + void Run(const framework::Scope& scope, |
| 129 | + const platform::Place& place) const override { |
| 130 | + // Only run the `first_n` times. |
| 131 | + int first_n = Attr<int>("first_n"); |
| 132 | + if (first_n > 0 && ++times_ > first_n) return; |
| 133 | + |
| 134 | + PADDLE_ENFORCE(!Inputs("input").empty(), "input should be set"); |
| 135 | + auto* input_var = scope.FindVar(Input("input")); |
| 136 | + PADDLE_ENFORCE_NOT_NULL(input_var); |
| 137 | + auto& tensor = input_var->Get<framework::LoDTensor>(); |
| 138 | + |
| 139 | + // TODO(ChunweiYan) support GPU |
| 140 | + PADDLE_ENFORCE(platform::is_cpu_place(tensor.place())); |
| 141 | + |
| 142 | + Formater formater; |
| 143 | + if (Attr<bool>("print_tensor_name")) { |
| 144 | + formater.name = Inputs("input").front(); |
| 145 | + } |
| 146 | + if (Attr<bool>("print_tensor_type")) { |
| 147 | + formater.dtype = tensor.type(); |
| 148 | + } |
| 149 | + if (Attr<bool>("print_tensor_shape")) { |
| 150 | + formater.dims.assign(tensor.dims()[0], |
| 151 | + tensor.dims()[tensor.dims().size() - 1]); |
| 152 | + } |
| 153 | + if (Attr<bool>("print_tensor_lod")) { |
| 154 | + formater.lod = tensor.lod(); |
| 155 | + } |
| 156 | + formater.summarize = Attr<int>("summarize"); |
| 157 | + formater.data = (void*)tensor.data<void>(); |
| 158 | + formater(tensor.numel()); |
| 159 | + } |
| 160 | + |
| 161 | + private: |
| 162 | + mutable int times_{0}; |
| 163 | +}; |
| 164 | + |
| 165 | +class PrintOpProtoAndCheckMaker : public framework::OpProtoAndCheckerMaker { |
| 166 | + public: |
| 167 | + PrintOpProtoAndCheckMaker(OpProto* proto, OpAttrChecker* op_checker) |
| 168 | + : OpProtoAndCheckerMaker(proto, op_checker) { |
| 169 | + AddInput("input", "the tensor that will be displayed."); |
| 170 | + AddAttr<int>("first_n", "Only log `first_n` number of times."); |
| 171 | + AddAttr<std::string>("message", "A string message to print as a prefix."); |
| 172 | + AddAttr<int>("summarize", "Print this number of elements in the tensor."); |
| 173 | + AddAttr<bool>("print_tensor_name", "Whether to print the tensor name."); |
| 174 | + AddAttr<bool>("print_tensor_type", "Whether to print the tensor's dtype."); |
| 175 | + AddAttr<bool>("print_tensor_shape", "Whether to print the tensor's shape."); |
| 176 | + AddAttr<bool>("print_tensor_lod", "Whether to print the tensor's lod."); |
| 177 | + AddComment(R"DOC( |
| 178 | + Creates a print op that will print when a tensor is accessed. |
| 179 | +
|
| 180 | + Wraps the tensor passed in so that whenever that a tensor is accessed, |
| 181 | + the message `message` is printed, along with the current value of the |
| 182 | + tensor `t`.)DOC"); |
| 183 | + } |
| 184 | +}; |
| 185 | + |
| 186 | +class InferShape : public framework::InferShapeBase { |
| 187 | + public: |
| 188 | + void operator()(framework::InferShapeContext* context) const override { |
| 189 | + PADDLE_ENFORCE(context->HasInput("input"), "input should be set"); |
| 190 | + } |
| 191 | +}; |
| 192 | + |
| 193 | +class InferVarType : public framework::VarTypeInference { |
| 194 | + public: |
| 195 | + void operator()(const framework::OpDesc& op_desc, |
| 196 | + framework::BlockDesc* block) const override {} |
| 197 | +}; |
| 198 | + |
| 199 | +} // namespace operators |
| 200 | +} // namespace paddle |
| 201 | + |
| 202 | +REGISTER_OPERATOR(print, paddle::operators::TensorPrintOp, |
| 203 | + paddle::operators::PrintOpProtoAndCheckMaker, |
| 204 | + paddle::operators::InferShape, |
| 205 | + paddle::operators::InferVarType, |
| 206 | + paddle::framework::EmptyGradOpMaker); |
0 commit comments