@@ -46,7 +46,8 @@ std::vector<paddle::Tensor> EncoderForward(
46
46
const int64_t & num_layer,
47
47
const int64_t & layer_idx,
48
48
const bool & allow_gemm_test,
49
- const bool & use_trt_kernel) {
49
+ const bool & use_trt_kernel,
50
+ const bool & normalize_before) {
50
51
if (input.place () == paddle::PlaceType::kGPU ) {
51
52
auto shape = input.shape ();
52
53
auto encoder_out = paddle::Tensor (paddle::PlaceType::kGPU , shape);
@@ -80,7 +81,8 @@ std::vector<paddle::Tensor> EncoderForward(
80
81
num_layer,
81
82
layer_idx,
82
83
allow_gemm_test,
83
- use_trt_kernel);
84
+ use_trt_kernel,
85
+ normalize_before);
84
86
} else {
85
87
PD_THROW (" Not implemented place. Only GPU is supported. " );
86
88
}
@@ -116,7 +118,8 @@ std::vector<std::vector<int64_t>> EncoderInferShape(
116
118
const int64_t & num_layer,
117
119
const int64_t & layer_idx,
118
120
const bool & allow_gemm_test,
119
- const bool & use_trt_kernel) {
121
+ const bool & use_trt_kernel,
122
+ const bool & normalize_before) {
120
123
return {input_shape};
121
124
}
122
125
@@ -179,7 +182,8 @@ PD_BUILD_OP(fusion_encoder)
179
182
" num_layer: int64_t" ,
180
183
" layer_idx: int64_t" ,
181
184
" allow_gemm_test: bool" ,
182
- " use_trt_kernel: bool" })
185
+ " use_trt_kernel: bool" ,
186
+ " normalize_before: bool" })
183
187
.SetKernelFn(PD_KERNEL(EncoderForward))
184
188
.SetInferShapeFn(PD_INFER_SHAPE(EncoderInferShape))
185
189
.SetInferDtypeFn(PD_INFER_DTYPE(EncoderInferDtype));
0 commit comments