|
23 | 23 | namespace paddle {
|
24 | 24 | namespace inference {
|
25 | 25 |
|
26 |
| -DEFINE_int32(tensorrt_max_batchsize, 300, "TensorRT maximum batch size"); |
| 26 | +DEFINE_int32(tensorrt_max_batchsize, 3, "TensorRT maximum batch size"); |
27 | 27 | DEFINE_int32(tensorrt_workspace_size, 2048, "TensorRT workspace size");
|
28 | 28 |
|
29 | 29 | namespace analysis {
|
@@ -88,34 +88,113 @@ void DataFlowGraphToFluidPass::AddFluidOp(Node *node) {
|
88 | 88 | }
|
89 | 89 |
|
90 | 90 | void CreateTrtEngineOp(Node *node, const DataFlowGraph &graph,
|
91 |
| - const framework::proto::BlockDesc &block) { |
| 91 | + framework::proto::BlockDesc *block) { |
92 | 92 | static int counter{0};
|
93 | 93 | PADDLE_ENFORCE(node->IsFunctionBlock());
|
94 | 94 | framework::OpDesc desc;
|
95 | 95 | auto *func = static_cast<FunctionBlock *>(node);
|
96 | 96 |
|
97 | 97 | // collect inputs
|
98 |
| - std::vector<std::string> io; |
| 98 | + std::unordered_set<std::string> input_names; |
99 | 99 | for (auto *x : func->inlinks) {
|
100 |
| - io.push_back(x->name()); |
| 100 | + input_names.insert(x->name()); |
101 | 101 | }
|
102 |
| - desc.SetInput("Xs", io); |
| 102 | + desc.SetInput( |
| 103 | + "Xs", std::vector<std::string>(input_names.begin(), input_names.end())); |
103 | 104 |
|
104 |
| - // collect outputs |
105 |
| - io.clear(); |
| 105 | + std::unordered_set<std::string> output_names; |
106 | 106 | for (auto *x : func->outlinks) {
|
107 |
| - io.push_back(x->name()); |
| 107 | + output_names.insert(x->name()); |
108 | 108 | }
|
109 |
| - desc.SetOutput("Ys", io); |
| 109 | + |
| 110 | + std::vector<std::string> output_temp(output_names.begin(), |
| 111 | + output_names.end()); |
| 112 | + desc.SetOutput("Ys", output_temp); |
110 | 113 | desc.SetType("tensorrt_engine");
|
111 | 114 |
|
112 |
| - PADDLE_ENFORCE(!block.vars().empty(), "the block has no var-desc"); |
| 115 | + std::unordered_map<std::string, std::string> output_name_map; |
| 116 | + |
| 117 | + // The following procedure is used to rename all the intermediate |
| 118 | + // variables and the output variables of the subgraph. |
| 119 | + // Why we do this? |
| 120 | + // During the transition from fluid OP to tensorrt OP, we map |
| 121 | + // the input and output Tensor(fluid data structure) of fluid OP |
| 122 | + // to the correspondin ITensor (trt data structure) through the |
| 123 | + // Tensor name. When we set up ITensor for an variable, we must |
| 124 | + // ensure that it has not been set before. |
| 125 | + // If there is variable in the fluid graph, which is not only the |
| 126 | + // input of a OP, but also the output of a Op, there will be problems. |
| 127 | + // So we have to rename the variable in the subgraph to make sure |
| 128 | + // it is either an OP's input or an OP's output. |
| 129 | + |
| 130 | + auto subgraph_nodes = func->subgraph; |
| 131 | + for (int index = 0; index < block->ops_size(); index++) { |
| 132 | + framework::proto::OpDesc *op = block->mutable_ops(index); |
| 133 | + auto correspond_node = subgraph_nodes[index]; |
| 134 | + PADDLE_ENFORCE_EQ(correspond_node->name(), op->type()); |
| 135 | + |
| 136 | + std::unordered_map<std::string, size_t> var2id; |
| 137 | + for (auto *in_var : correspond_node->inlinks) { |
| 138 | + var2id[in_var->name()] = in_var->id(); |
| 139 | + } |
| 140 | + // rename for the input variables of op inside subgraph |
| 141 | + for (int i = 0; i < op->inputs_size(); i++) { |
| 142 | + framework::proto::OpDesc_Var *in_var = op->mutable_inputs(i); |
| 143 | + std::vector<std::string> replaced_names; |
| 144 | + for (int k = 0; k < in_var->arguments_size(); k++) { |
| 145 | + std::string arg_value = in_var->arguments(k); |
| 146 | + if (input_names.count(arg_value)) { |
| 147 | + replaced_names.push_back(arg_value); |
| 148 | + } else { |
| 149 | + replaced_names.push_back(arg_value + |
| 150 | + std::to_string(var2id[arg_value])); |
| 151 | + } |
| 152 | + } |
| 153 | + in_var->clear_arguments(); |
| 154 | + for (size_t k = 0; k < replaced_names.size(); k++) { |
| 155 | + in_var->add_arguments(replaced_names[k]); |
| 156 | + } |
| 157 | + } |
| 158 | + var2id.clear(); |
| 159 | + for (auto out_var : correspond_node->outlinks) { |
| 160 | + var2id[out_var->name()] = out_var->id(); |
| 161 | + } |
| 162 | + |
| 163 | + // rename for the output variables of op inside subgraph |
| 164 | + for (int i = 0; i < op->outputs_size(); i++) { |
| 165 | + framework::proto::OpDesc_Var *out_var = op->mutable_outputs(i); |
| 166 | + std::vector<std::string> replaced_names; |
| 167 | + for (int k = 0; k < out_var->arguments_size(); k++) { |
| 168 | + std::string arg_value = out_var->arguments(k); |
| 169 | + if (output_names.count(arg_value)) { |
| 170 | + output_name_map[arg_value] = |
| 171 | + arg_value + std::to_string(var2id[arg_value]); |
| 172 | + } |
| 173 | + replaced_names.push_back(arg_value + std::to_string(var2id[arg_value])); |
| 174 | + } |
| 175 | + out_var->clear_arguments(); |
| 176 | + for (size_t k = 0; k < replaced_names.size(); k++) { |
| 177 | + out_var->add_arguments(replaced_names[k]); |
| 178 | + } |
| 179 | + } |
| 180 | + } |
| 181 | + // When tensorrt engine runs at the end of the operation, |
| 182 | + // output_mapping help us copy the data from the renamed ITensor |
| 183 | + // to Tensor. |
| 184 | + std::vector<std::string> output_mapping; |
| 185 | + for (auto name : output_names) { |
| 186 | + PADDLE_ENFORCE(output_name_map.count(name) != 0); |
| 187 | + output_mapping.push_back(output_name_map[name]); |
| 188 | + } |
| 189 | + |
| 190 | + PADDLE_ENFORCE(!block->vars().empty(), "the block has no var-desc"); |
113 | 191 | // Set attrs
|
114 |
| - SetAttr(desc.Proto(), "subgraph", block.SerializeAsString()); |
| 192 | + SetAttr(desc.Proto(), "subgraph", block->SerializeAsString()); |
115 | 193 | SetAttr(desc.Proto(), "engine_uniq_key", "trt-" + std::to_string(counter++));
|
116 | 194 | SetAttr(desc.Proto(), "max_batch", FLAGS_tensorrt_max_batchsize);
|
117 | 195 | SetAttr(desc.Proto(), "max_workspace", FLAGS_tensorrt_workspace_size);
|
118 | 196 | SetAttr(desc.Proto(), "parameters", ExtractParameters(graph.nodes.nodes()));
|
| 197 | + SetAttr(desc.Proto(), "output_name_mapping", output_mapping); |
119 | 198 | node->SetPbMsg(desc.Proto()->SerializeAsString());
|
120 | 199 | }
|
121 | 200 |
|
@@ -147,15 +226,17 @@ void DataFlowGraphToFluidPass::AddEngineOp(Node *node) {
|
147 | 226 | LOG(INFO) << "transformed variable size: "
|
148 | 227 | << block_desc.Proto()->vars().size();
|
149 | 228 | // copy ops.
|
| 229 | + |
150 | 230 | for (auto *node : block_node->subgraph) {
|
151 | 231 | auto *op = block_desc.AppendOp();
|
152 | 232 | PADDLE_ENFORCE(!node->pb_msg().empty());
|
153 | 233 | op->Proto()->ParseFromString(node->pb_msg());
|
154 | 234 | }
|
| 235 | + |
155 | 236 | *block_desc.Proto()->mutable_vars() =
|
156 | 237 | argument_->origin_program_desc->blocks(0).vars();
|
157 | 238 | PADDLE_ENFORCE(!block_desc.Proto()->vars().empty());
|
158 |
| - CreateTrtEngineOp(node, *argument_->main_dfg, *block_desc.Proto()); |
| 239 | + CreateTrtEngineOp(node, *argument_->main_dfg, block_desc.Proto()); |
159 | 240 | auto *main_block = desc_->mutable_blocks(framework::kRootBlockIndex);
|
160 | 241 | auto *op = main_block->add_ops();
|
161 | 242 | PADDLE_ENFORCE(!node->pb_msg().empty(), "failed to set desc for block");
|
|
0 commit comments