Skip to content

Commit 49c04cf

Browse files
authored
fix typos causual causal (PaddlePaddle#76086)
* fix typos * fix typos
1 parent 11fff57 commit 49c04cf

File tree

8 files changed

+38
-36
lines changed

8 files changed

+38
-36
lines changed

paddle/phi/backends/gpu/cuda/cuda_graph.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -311,16 +311,16 @@ void CUDAGraph::PrintToDotFiles(const std::string &dirname,
311311
}
312312

313313
void CUDAGraphNodeLauncher::KernelNodeLaunch(
314-
parameterSetter_t parameterSetter, gpuKernelCallback_t cudakernelCallback) {
314+
parameterSetter_t parameterSetter, gpuKernelCallback_t cudaKernelCallback) {
315315
if (UNLIKELY(phi::backends::gpu::CUDAGraph::IsThisThreadCapturing())) {
316316
unsigned int id = GenerateIdentifier();
317-
auto cudaFunc = cudakernelCallback(id);
317+
auto cudaFunc = cudaKernelCallback(id);
318318

319319
parameterSetters[cudaFunc][id] = parameterSetter;
320320
VLOG(10) << "[KernelNodeLaunch] Launch kernel with cudaFunc = " << cudaFunc
321321
<< " id = " << id;
322322
} else {
323-
cudakernelCallback(0);
323+
cudaKernelCallback(0);
324324
}
325325
}
326326

paddle/phi/backends/gpu/cuda/cuda_graph.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ class CUDAGraphNodeLauncher {
150150
// With the callbacks defined and the CUDA function obtained, the kernel can
151151
// be launched using the `KernelNodeLaunch` method.
152152
void KernelNodeLaunch(parameterSetter_t parameterSetter,
153-
gpuKernelCallback_t cudakernelCallback);
153+
gpuKernelCallback_t cudaKernelCallback);
154154

155155
std::vector<cudaGraphExecuterSetter_t> GetParameterSettersForExecGraph(
156156
cudaGraph_t graph);

paddle/phi/backends/gpu/rocm/hip_graph.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -263,16 +263,16 @@ void CUDAGraph::PrintToDotFiles(const std::string &dirname,
263263
}
264264

265265
void CUDAGraphNodeLauncher::KernelNodeLaunch(
266-
parameterSetter_t parameterSetter, gpuKernelCallback_t cudakernelCallback) {
266+
parameterSetter_t parameterSetter, gpuKernelCallback_t cudaKernelCallback) {
267267
if (UNLIKELY(phi::backends::gpu::CUDAGraph::IsThisThreadCapturing())) {
268268
unsigned int id = GenerateIdentifier();
269-
auto cudaFunc = cudakernelCallback(id);
269+
auto cudaFunc = cudaKernelCallback(id);
270270

271271
parameterSetters[cudaFunc][id] = parameterSetter;
272272
VLOG(10) << "[KernelNodeLaunch] Launch kernel with cudaFunc = " << cudaFunc
273273
<< " id = " << id;
274274
} else {
275-
cudakernelCallback(0);
275+
cudaKernelCallback(0);
276276
}
277277
}
278278

paddle/phi/backends/gpu/rocm/hip_graph.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ class CUDAGraphNodeLauncher {
152152
// With the callbacks defined and the CUDA function obtained, the kernel can
153153
// be launched using the `KernelNodeLaunch` method.
154154
void KernelNodeLaunch(parameterSetter_t parameterSetter,
155-
gpuKernelCallback_t cudakernelCallback);
155+
gpuKernelCallback_t cudaKernelCallback);
156156

157157
std::vector<cudaGraphExecuterSetter_t> GetParameterSettersForExecGraph(
158158
hipGraph_t graph);

paddle/phi/kernels/funcs/sequence2batch.cc

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -34,19 +34,20 @@ class CopyMatrixRowsFunctor<phi::CPUContext, T> {
3434
"got the source tensor rank is %lu. "
3535
"Please check the rank of the source tensor",
3636
src_dims.size()));
37-
PADDLE_ENFORCE_EQ(dst_dims.size(),
38-
2UL,
39-
common::errors::InvalidArgument(
40-
"The destination tensor must be a matrix with rank, "
41-
"but got the destination tensor rank is %lu. "
42-
"Please check the rank of the destination tensor",
43-
dst_dims.size()));
37+
PADDLE_ENFORCE_EQ(
38+
dst_dims.size(),
39+
2UL,
40+
common::errors::InvalidArgument(
41+
"The destination tensor must be a matrix with rank 2, "
42+
"but got the destination tensor rank is %lu. "
43+
"Please check the rank of the destination tensor",
44+
dst_dims.size()));
4445
PADDLE_ENFORCE_EQ(
4546
src_dims[1],
4647
dst_dims[1],
4748
common::errors::InvalidArgument(
4849
"The width of the source tensor and the destination tensor must be "
49-
"same. But got %lu != %lu.Please check the rank of the source "
50+
"same. But got %lu != %lu. Please check the rank of the source "
5051
"tensor",
5152
src_dims.size(),
5253
dst_dims.size()));

paddle/phi/kernels/funcs/sequence2batch.cu

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -55,19 +55,20 @@ class CopyMatrixRowsFunctor<phi::GPUContext, T> {
5555
"got the source tensor rank is %lu. "
5656
"Please check the rank of the source tensor",
5757
src_dims.size()));
58-
PADDLE_ENFORCE_EQ(dst_dims.size(),
59-
2,
60-
common::errors::InvalidArgument(
61-
"The destination tensor must be a matrix with rank, "
62-
"but got the destination tensor rank is %lu. "
63-
"Please check the rank of the destination tensor",
64-
dst_dims.size()));
58+
PADDLE_ENFORCE_EQ(
59+
dst_dims.size(),
60+
2,
61+
common::errors::InvalidArgument(
62+
"The destination tensor must be a matrix with rank 2, "
63+
"but got the destination tensor rank is %lu. "
64+
"Please check the rank of the destination tensor",
65+
dst_dims.size()));
6566
PADDLE_ENFORCE_EQ(
6667
src_dims[1],
6768
dst_dims[1],
6869
common::errors::InvalidArgument(
6970
"The width of the source tensor and the destination tensor must be "
70-
"same. But got %lu != %lu.Please check the rank of the source "
71+
"same. But got %lu != %lu. Please check the rank of the source "
7172
"tensor",
7273
src_dims.size(),
7374
dst_dims.size()));

paddle/phi/kernels/fusion/gpu/block_multi_head_attention_kernel.cu

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -350,9 +350,9 @@ void DispatchWithDtype(
350350
<< " max_block_per_seq: " << max_block_per_seq;
351351
VLOG(3) << "fmha_out_dims: " << fmha_out->dims();
352352

353-
bool causual = true;
353+
bool causal = true;
354354
if (mask) {
355-
causual = false;
355+
causal = false;
356356
}
357357

358358
bool use_pre_cache = false;
@@ -516,7 +516,7 @@ void DispatchWithDtype(
516516
// qkv_buf.data<T>(), qkv_buf.numel(), "qkv_buf after",
517517
// qkv_buf.numel());
518518
VLOG(3) << "rope end";
519-
VLOG(3) << "causual: " << causual;
519+
VLOG(3) << "causal: " << causal;
520520
if (!use_pre_cache && sm >= 80) {
521521
qkv_transpose_split<T>(dev_ctx,
522522
unpadding_q.data<T>(),
@@ -555,12 +555,12 @@ void DispatchWithDtype(
555555
cu_seqlens_q,
556556
cu_seqlens_k,
557557
paddle::none /*fixed_seed_offset*/,
558-
causual ? paddle::none : mask,
558+
causal ? paddle::none : mask,
559559
max_enc_len_this_time_data,
560560
max_enc_len_this_time_data,
561561
1.0f / sqrt(static_cast<float>(dim_head)),
562562
0.0,
563-
causual,
563+
causal,
564564
false,
565565
true /* is_test*/,
566566
"" /*rng_name*/,
@@ -620,7 +620,7 @@ void DispatchWithDtype(
620620
seq_lens_encoder,
621621
(sm < 80 && !use_pre_cache) ? paddle::none : mask,
622622
1.0f / sqrt(static_cast<float>(dim_head)),
623-
(sm < 80 && !use_pre_cache) ? causual : false,
623+
(sm < 80 && !use_pre_cache) ? causal : false,
624624
pre_cache_length,
625625
&qktv_out);
626626
#elif defined(PADDLE_WITH_HIP)
@@ -653,7 +653,7 @@ void DispatchWithDtype(
653653
paddle::none /*fixed_seed_offset*/,
654654
paddle::none /*mask*/,
655655
0.0,
656-
is_precache_infer ? false : causual /*precache_infer_casual*/,
656+
is_precache_infer ? false : causal /*precache_infer_causal*/,
657657
false,
658658
is_precache_infer /*is_test*/,
659659
"" /*rng_name*/,

paddle/phi/kernels/fusion/xpu/block_multi_head_attention_kernel.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -205,9 +205,9 @@ void BlockMultiheadAttentionXPUKernel(
205205
<< " dim_head: " << dim_head
206206
<< " max_block_per_seq: " << max_block_per_seq;
207207
VLOG(3) << "fmha_out_dims: " << fmha_out->dims();
208-
bool causual = true;
208+
bool causal = true;
209209
if (mask) {
210-
causual = false;
210+
causal = false;
211211
}
212212
bool use_pre_cache = false;
213213
int pre_cache_length = 0;
@@ -324,7 +324,7 @@ void BlockMultiheadAttentionXPUKernel(
324324
&unpadding_v);
325325

326326
VLOG(3) << "rope end";
327-
VLOG(3) << "causual: " << causual;
327+
VLOG(3) << "causal: " << causal;
328328
if (!use_pre_cache) {
329329
phi::FlashAttnUnpaddedKernel<T>(dev_ctx,
330330
unpadding_q,
@@ -333,12 +333,12 @@ void BlockMultiheadAttentionXPUKernel(
333333
cu_seqlens_q,
334334
cu_seqlens_k,
335335
paddle::none /*fixed_seed_offset*/,
336-
causual ? paddle::none : mask,
336+
causal ? paddle::none : mask,
337337
max_enc_len_this_time_data,
338338
max_enc_len_this_time_data,
339339
1.0f / sqrt(static_cast<float>(dim_head)),
340340
0.0,
341-
causual,
341+
causal,
342342
false,
343343
true /* is_test*/,
344344
"" /*rng_name*/,

0 commit comments

Comments
 (0)