|
23 | 23 | namespace paddle {
|
24 | 24 | namespace inference {
|
25 | 25 |
|
26 |
| -DEFINE_int32(tensorrt_max_batchsize, 300, "TensorRT maximum batch size"); |
| 26 | +DEFINE_int32(tensorrt_max_batchsize, 3, "TensorRT maximum batch size"); |
27 | 27 | DEFINE_int32(tensorrt_workspace_size, 2048, "TensorRT workspace size");
|
28 | 28 |
|
29 | 29 | namespace analysis {
|
@@ -87,34 +87,113 @@ void DataFlowGraphToFluidPass::AddFluidOp(Node *node) {
|
87 | 87 | }
|
88 | 88 |
|
89 | 89 | void CreateTrtEngineOp(Node *node, const DataFlowGraph &graph,
|
90 |
| - const framework::proto::BlockDesc &block) { |
| 90 | + framework::proto::BlockDesc *block) { |
91 | 91 | static int counter{0};
|
92 | 92 | PADDLE_ENFORCE(node->IsFunctionBlock());
|
93 | 93 | framework::OpDesc desc;
|
94 | 94 | auto *func = static_cast<FunctionBlock *>(node);
|
95 | 95 |
|
96 | 96 | // collect inputs
|
97 |
| - std::vector<std::string> io; |
| 97 | + std::unordered_set<std::string> input_names; |
98 | 98 | for (auto *x : func->inlinks) {
|
99 |
| - io.push_back(x->name()); |
| 99 | + input_names.insert(x->name()); |
100 | 100 | }
|
101 |
| - desc.SetInput("Xs", io); |
| 101 | + desc.SetInput( |
| 102 | + "Xs", std::vector<std::string>(input_names.begin(), input_names.end())); |
102 | 103 |
|
103 |
| - // collect outputs |
104 |
| - io.clear(); |
| 104 | + std::unordered_set<std::string> output_names; |
105 | 105 | for (auto *x : func->outlinks) {
|
106 |
| - io.push_back(x->name()); |
| 106 | + output_names.insert(x->name()); |
107 | 107 | }
|
108 |
| - desc.SetOutput("Ys", io); |
| 108 | + |
| 109 | + std::vector<std::string> output_temp(output_names.begin(), |
| 110 | + output_names.end()); |
| 111 | + desc.SetOutput("Ys", output_temp); |
109 | 112 | desc.SetType("tensorrt_engine");
|
110 | 113 |
|
111 |
| - PADDLE_ENFORCE(!block.vars().empty(), "the block has no var-desc"); |
| 114 | + std::unordered_map<std::string, std::string> output_name_map; |
| 115 | + |
| 116 | + // The following procedure is used to rename all the intermediate |
| 117 | + // variables and the output variables of the subgraph. |
| 118 | + // Why we do this? |
| 119 | + // During the transition from fluid OP to tensorrt OP, we map |
| 120 | + // the input and output Tensor(fluid data structure) of fluid OP |
| 121 | + // to the correspondin ITensor (trt data structure) through the |
| 122 | + // Tensor name. When we set up ITensor for an variable, we must |
| 123 | + // ensure that it has not been set before. |
| 124 | + // If there is variable in the fluid graph, which is not only the |
| 125 | + // input of a OP, but also the output of a Op, there will be problems. |
| 126 | + // So we have to rename the variable in the subgraph to make sure |
| 127 | + // it is either an OP's input or an OP's output. |
| 128 | + |
| 129 | + auto subgraph_nodes = func->subgraph; |
| 130 | + for (int index = 0; index < block->ops_size(); index++) { |
| 131 | + framework::proto::OpDesc *op = block->mutable_ops(index); |
| 132 | + auto correspond_node = subgraph_nodes[index]; |
| 133 | + PADDLE_ENFORCE_EQ(correspond_node->name(), op->type()); |
| 134 | + |
| 135 | + std::unordered_map<std::string, size_t> var2id; |
| 136 | + for (auto *in_var : correspond_node->inlinks) { |
| 137 | + var2id[in_var->name()] = in_var->id(); |
| 138 | + } |
| 139 | + // rename for the input variables of op inside subgraph |
| 140 | + for (int i = 0; i < op->inputs_size(); i++) { |
| 141 | + framework::proto::OpDesc_Var *in_var = op->mutable_inputs(i); |
| 142 | + std::vector<std::string> replaced_names; |
| 143 | + for (int k = 0; k < in_var->arguments_size(); k++) { |
| 144 | + std::string arg_value = in_var->arguments(k); |
| 145 | + if (input_names.count(arg_value)) { |
| 146 | + replaced_names.push_back(arg_value); |
| 147 | + } else { |
| 148 | + replaced_names.push_back(arg_value + |
| 149 | + std::to_string(var2id[arg_value])); |
| 150 | + } |
| 151 | + } |
| 152 | + in_var->clear_arguments(); |
| 153 | + for (size_t k = 0; k < replaced_names.size(); k++) { |
| 154 | + in_var->add_arguments(replaced_names[k]); |
| 155 | + } |
| 156 | + } |
| 157 | + var2id.clear(); |
| 158 | + for (auto out_var : correspond_node->outlinks) { |
| 159 | + var2id[out_var->name()] = out_var->id(); |
| 160 | + } |
| 161 | + |
| 162 | + // rename for the output variables of op inside subgraph |
| 163 | + for (int i = 0; i < op->outputs_size(); i++) { |
| 164 | + framework::proto::OpDesc_Var *out_var = op->mutable_outputs(i); |
| 165 | + std::vector<std::string> replaced_names; |
| 166 | + for (int k = 0; k < out_var->arguments_size(); k++) { |
| 167 | + std::string arg_value = out_var->arguments(k); |
| 168 | + if (output_names.count(arg_value)) { |
| 169 | + output_name_map[arg_value] = |
| 170 | + arg_value + std::to_string(var2id[arg_value]); |
| 171 | + } |
| 172 | + replaced_names.push_back(arg_value + std::to_string(var2id[arg_value])); |
| 173 | + } |
| 174 | + out_var->clear_arguments(); |
| 175 | + for (size_t k = 0; k < replaced_names.size(); k++) { |
| 176 | + out_var->add_arguments(replaced_names[k]); |
| 177 | + } |
| 178 | + } |
| 179 | + } |
| 180 | + // When tensorrt engine runs at the end of the operation, |
| 181 | + // output_mapping help us copy the data from the renamed ITensor |
| 182 | + // to Tensor. |
| 183 | + std::vector<std::string> output_mapping; |
| 184 | + for (auto name : output_names) { |
| 185 | + PADDLE_ENFORCE(output_name_map.count(name) != 0); |
| 186 | + output_mapping.push_back(output_name_map[name]); |
| 187 | + } |
| 188 | + |
| 189 | + PADDLE_ENFORCE(!block->vars().empty(), "the block has no var-desc"); |
112 | 190 | // Set attrs
|
113 |
| - SetAttr(desc.Proto(), "subgraph", block.SerializeAsString()); |
| 191 | + SetAttr(desc.Proto(), "subgraph", block->SerializeAsString()); |
114 | 192 | SetAttr(desc.Proto(), "engine_uniq_key", "trt-" + std::to_string(counter++));
|
115 | 193 | SetAttr(desc.Proto(), "max_batch", FLAGS_tensorrt_max_batchsize);
|
116 | 194 | SetAttr(desc.Proto(), "max_workspace", FLAGS_tensorrt_workspace_size);
|
117 | 195 | SetAttr(desc.Proto(), "parameters", ExtractParameters(graph.nodes.nodes()));
|
| 196 | + SetAttr(desc.Proto(), "output_name_mapping", output_mapping); |
118 | 197 | node->SetPbMsg(desc.Proto()->SerializeAsString());
|
119 | 198 | }
|
120 | 199 |
|
@@ -146,15 +225,17 @@ void DataFlowGraphToFluidPass::AddEngineOp(Node *node) {
|
146 | 225 | LOG(INFO) << "transformed variable size: "
|
147 | 226 | << block_desc.Proto()->vars().size();
|
148 | 227 | // copy ops.
|
| 228 | + |
149 | 229 | for (auto *node : block_node->subgraph) {
|
150 | 230 | auto *op = block_desc.AppendOp();
|
151 | 231 | PADDLE_ENFORCE(!node->pb_msg().empty());
|
152 | 232 | op->Proto()->ParseFromString(node->pb_msg());
|
153 | 233 | }
|
| 234 | + |
154 | 235 | *block_desc.Proto()->mutable_vars() =
|
155 | 236 | argument_->origin_program_desc->blocks(0).vars();
|
156 | 237 | PADDLE_ENFORCE(!block_desc.Proto()->vars().empty());
|
157 |
| - CreateTrtEngineOp(node, *argument_->main_dfg, *block_desc.Proto()); |
| 238 | + CreateTrtEngineOp(node, *argument_->main_dfg, block_desc.Proto()); |
158 | 239 | auto *main_block = desc_->mutable_blocks(framework::kRootBlockIndex);
|
159 | 240 | auto *op = main_block->add_ops();
|
160 | 241 | PADDLE_ENFORCE(!node->pb_msg().empty(), "failed to set desc for block");
|
|
0 commit comments