13
13
// limitations under the License.
14
14
15
15
#include " paddle/fluid/framework/ir/attention_lstm_fuse_pass.h"
16
+
17
+ #include < string>
18
+
16
19
#include " paddle/fluid/framework/ir/graph_pattern_detector.h"
17
20
#include " paddle/fluid/framework/ir/graph_viz_pass.h"
18
21
#include " paddle/fluid/framework/lod_tensor.h"
@@ -216,11 +219,11 @@ void PrepareLSTMWeight(const LoDTensor& W_forget_w0,
216
219
217
220
float * out_data = out->mutable_data <float >(platform::CPUPlace ());
218
221
std::array<const float *, 4 > tensors (
219
- {W_forget_w0.data <float >(), W_input_w0.data <float >(),
220
- W_output_w0.data <float >(), W_cell_w0.data <float >()});
222
+ {{ W_forget_w0.data <float >(), W_input_w0.data <float >(),
223
+ W_output_w0.data <float >(), W_cell_w0.data <float >()} });
221
224
std::array<const float *, 4 > tensors1 (
222
- {W_forget_w1.data <float >(), W_input_w1.data <float >(),
223
- W_output_w1.data <float >(), W_cell_w1.data <float >()});
225
+ {{ W_forget_w1.data <float >(), W_input_w1.data <float >(),
226
+ W_output_w1.data <float >(), W_cell_w1.data <float >()} });
224
227
225
228
for (int row = 0 ; row < D; row++) {
226
229
for (int col = 0 ; col < 4 ; col++) {
@@ -243,8 +246,8 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input,
243
246
const LoDTensor& B_output, const LoDTensor& B_cell,
244
247
LoDTensor* out) {
245
248
std::array<const float *, 4 > tensors (
246
- {B_forget.data <float >(), B_input.data <float >(), B_output.data <float >(),
247
- B_cell.data <float >()});
249
+ {{ B_forget.data <float >(), B_input.data <float >(), B_output.data <float >(),
250
+ B_cell.data <float >()} });
248
251
249
252
PADDLE_ENFORCE_EQ (B_forget.dims ().size (), 1 );
250
253
int D = B_forget.dims ()[0 ];
0 commit comments