Skip to content

Commit fbdd6b0

Browse files
authored
[Iluvatar GPU] Optimze attention and moe performance (#3234)
1 parent 37569cc commit fbdd6b0

File tree

24 files changed

+1140
-1663
lines changed

24 files changed

+1140
-1663
lines changed

.github/workflows/ci_gcu.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,8 @@ concurrency:
1313

1414
jobs:
1515
CI_GCU:
16-
runs-on: [self-hosted, GCU-S60-8Card]
16+
runs-on:
17+
group: GCU
1718
steps:
1819
- name: Print current runner name
1920
run: |

.github/workflows/ci_iluvatar.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,8 @@ concurrency:
1111

1212
jobs:
1313
CI_ILUVATAR:
14-
runs-on: [self-hosted, IXUCA]
14+
runs-on:
15+
group: IXUCA
1516
steps:
1617
- name: Print current runner name
1718
run: |

custom_ops/gpu_ops/sample_kernels/rejection_top_p_sampling.cu

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,11 @@ std::vector<paddle::Tensor> TopPSamplingReject(const paddle::Tensor &probs,
2929

3030
// need_batch_random
3131
if (seed == -1) {
32+
#ifdef PADDLE_WITH_COREX
33+
auto dev_ctx = static_cast<const phi::CustomContext*>(paddle::experimental::DeviceContextPool::Instance().Get(probs.place()));
34+
#else
3235
phi::GPUContext* dev_ctx = static_cast<phi::GPUContext*>(phi::DeviceContextPool::Instance().Get(probs.place()));
36+
#endif
3337
auto gen_cuda = dev_ctx->GetGenerator();
3438
auto seed_offset = gen_cuda->IncrementOffset(32 * batch_size);
3539
philox_seed = seed_offset.first;

custom_ops/gpu_ops/sample_kernels/sampling.cuh

Lines changed: 69 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -212,9 +212,15 @@ __device__ __forceinline__ void DeviceSamplingFromProb(
212212
prob_greater_than_threshold[j] = pred(prob_vec[j]) ? prob_vec[j] : 0;
213213
valid[j] = pred(prob_vec[j]) && (i * BLOCK_THREADS + tx) * VEC_SIZE + j < d;
214214
}
215+
#ifdef PADDLE_WITH_COREX
216+
float aggregate_local =
217+
BlockReduce<float, BLOCK_THREADS, REDUCE_ALGORITHM>(temp_storage->block_prim.reduce)
218+
.Sum(prob_greater_than_threshold);
219+
#else
215220
float aggregate_local =
216221
BlockReduce<float, BLOCK_THREADS, REDUCE_ALGORITHM>(temp_storage->block_prim.reduce)
217222
.Sum<VEC_SIZE>(prob_greater_than_threshold);
223+
#endif
218224
if (tx == 0) {
219225
temp_storage->block_aggregate.value = aggregate_local;
220226
}
@@ -226,8 +232,13 @@ __device__ __forceinline__ void DeviceSamplingFromProb(
226232
DeterministicInclusiveSum<VEC_SIZE, BLOCK_THREADS, SCAN_ALGORITHM, REDUCE_ALGORITHM>(
227233
prob_greater_than_threshold, inclusive_cdf, temp_storage);
228234
} else {
235+
#ifdef PADDLE_WITH_COREX
236+
BlockScan<float, BLOCK_THREADS, SCAN_ALGORITHM>(temp_storage->block_prim.scan)
237+
.InclusiveSum(prob_greater_than_threshold, inclusive_cdf);
238+
#else
229239
BlockScan<float, BLOCK_THREADS, SCAN_ALGORITHM>(temp_storage->block_prim.scan)
230240
.InclusiveSum<VEC_SIZE>(prob_greater_than_threshold, inclusive_cdf);
241+
#endif
231242

232243
__syncthreads();
233244
}
@@ -239,11 +250,21 @@ __device__ __forceinline__ void DeviceSamplingFromProb(
239250

240251
bool greater_than_u_diff[VEC_SIZE];
241252
#ifdef SAMPLING_CUB_SUBTRACTLEFT_DEFINED
242-
BlockAdjacentDifference<bool, BLOCK_THREADS>(temp_storage->block_prim.adj_diff)
243-
.SubtractLeft<VEC_SIZE>(greater_than_u, greater_than_u_diff, BoolDiffOp());
253+
#ifdef PADDLE_WITH_COREX
254+
BlockAdjacentDifference<bool, BLOCK_THREADS>(temp_storage->block_prim.adj_diff)
255+
.SubtractLeft(greater_than_u, greater_than_u_diff, BoolDiffOp());
256+
#else
257+
BlockAdjacentDifference<bool, BLOCK_THREADS>(temp_storage->block_prim.adj_diff)
258+
.SubtractLeft<VEC_SIZE>(greater_than_u, greater_than_u_diff, BoolDiffOp());
259+
#endif
244260
#else
245-
BlockAdjacentDifference<bool, BLOCK_THREADS>(temp_storage->block_prim.adj_diff)
246-
.FlagHeads<VEC_SIZE>(greater_than_u_diff, greater_than_u, BoolDiffOp(), 0);
261+
#ifdef PADDLE_WITH_COREX
262+
BlockAdjacentDifference<bool, BLOCK_THREADS>(temp_storage->block_prim.adj_diff)
263+
.FlagHeads(greater_than_u_diff, greater_than_u, BoolDiffOp(), 0);
264+
#else
265+
BlockAdjacentDifference<bool, BLOCK_THREADS>(temp_storage->block_prim.adj_diff)
266+
.FlagHeads<VEC_SIZE>(greater_than_u_diff, greater_than_u, BoolDiffOp(), 0);
267+
#endif
247268
#endif
248269
__syncthreads();
249270

@@ -355,18 +376,30 @@ __global__ void TopKTopPSamplingFromProbKernel(DType* probs, IdType* output,
355376
(probs_vec[j] > pivot_1 && (i * BLOCK_THREADS + tx) * VEC_SIZE + j < d)};
356377
}
357378

379+
#ifdef PADDLE_WITH_COREX
380+
aggregate_gt_pivot_0 +=
381+
BlockReduce<ValueCount<float>, BLOCK_THREADS>(temp_storage.block_prim.reduce_value_count)
382+
.Sum(probs_gt_pivot_0);
383+
#else
358384
aggregate_gt_pivot_0 +=
359385
BlockReduce<ValueCount<float>, BLOCK_THREADS>(temp_storage.block_prim.reduce_value_count)
360386
.Sum<VEC_SIZE>(probs_gt_pivot_0);
387+
#endif
361388
if (tx == 0) {
362389
temp_storage.block_aggregate.pair = aggregate_gt_pivot_0;
363390
}
364391
__syncthreads();
365392
aggregate_gt_pivot_0 = temp_storage.block_aggregate.pair;
366393

394+
#ifdef PADDLE_WITH_COREX
395+
aggregate_gt_pivot_1 +=
396+
BlockReduce<ValueCount<float>, BLOCK_THREADS>(temp_storage.block_prim.reduce_value_count)
397+
.Sum(probs_gt_pivot_1);
398+
#else
367399
aggregate_gt_pivot_1 +=
368400
BlockReduce<ValueCount<float>, BLOCK_THREADS>(temp_storage.block_prim.reduce_value_count)
369401
.Sum<VEC_SIZE>(probs_gt_pivot_1);
402+
#endif
370403
if (tx == 0) {
371404
temp_storage.block_aggregate.pair = aggregate_gt_pivot_1;
372405
}
@@ -466,16 +499,26 @@ __global__ void TopPSamplingFromProbKernel(DType* probs, IdType* output,
466499
probs_gt_pivot_1[j] = (probs_vec[j] > pivot_1) ? probs_vec[j] : 0;
467500
}
468501

502+
#ifdef PADDLE_WITH_COREX
503+
aggregate_gt_pivot_0 += BlockReduce<float, BLOCK_THREADS>(temp_storage.block_prim.reduce)
504+
.Sum(probs_gt_pivot_0);
505+
#else
469506
aggregate_gt_pivot_0 += BlockReduce<float, BLOCK_THREADS>(temp_storage.block_prim.reduce)
470507
.Sum<VEC_SIZE>(probs_gt_pivot_0);
508+
#endif
471509
if (tx == 0) {
472510
temp_storage.block_aggregate.value = aggregate_gt_pivot_0;
473511
}
474512
__syncthreads();
475513
aggregate_gt_pivot_0 = temp_storage.block_aggregate.value;
476514

515+
#ifdef PADDLE_WITH_COREX
516+
aggregate_gt_pivot_1 += BlockReduce<float, BLOCK_THREADS>(temp_storage.block_prim.reduce)
517+
.Sum(probs_gt_pivot_1);
518+
#else
477519
aggregate_gt_pivot_1 += BlockReduce<float, BLOCK_THREADS>(temp_storage.block_prim.reduce)
478520
.Sum<VEC_SIZE>(probs_gt_pivot_1);
521+
#endif
479522
if (tx == 0) {
480523
temp_storage.block_aggregate.value = aggregate_gt_pivot_1;
481524
}
@@ -521,9 +564,15 @@ __device__ __forceinline__ float GetMaxValue(float* in_data, uint32_t row_idx, u
521564
for (uint32_t j = 0; j < VEC_SIZE; ++j) {
522565
in_data_[j] = in_data_vec[j];
523566
}
567+
#ifdef PADDLE_WITH_COREX
568+
max_val = max(
569+
max_val, BlockReduce<float, BLOCK_THREADS, REDUCE_ALGORITHM>(temp_storage.block_prim.reduce)
570+
.Reduce(in_data_, cub::Max()));
571+
#else
524572
max_val = max(
525573
max_val, BlockReduce<float, BLOCK_THREADS, REDUCE_ALGORITHM>(temp_storage.block_prim.reduce)
526574
.Reduce<VEC_SIZE>(in_data_, cub::Max()));
575+
#endif
527576
__syncthreads();
528577
}
529578
if (tx == 0) {
@@ -605,7 +654,11 @@ __global__ void TopKRenormProbKernel(DType* probs, DType* renormed_prob, IdType*
605654
const uint32_t bx = blockIdx.x, tx = threadIdx.x;
606655
const uint32_t row_idx = bx;
607656
const uint32_t k = top_k_arr[row_idx] == 0 ? d : top_k_arr[row_idx];
657+
#ifdef PADDLE_WITH_COREX
658+
double pivot = std::numeric_limits<float>::infinity(), normalizer = 1;
659+
#else
608660
double pivot = -cuda::std::numeric_limits<float>::infinity(), normalizer = 1;
661+
#endif
609662
vec_t<float, VEC_SIZE> probs_vec;
610663
if (k < d) {
611664
extern __shared__ __align__(alignof(RenormTempStorage<BLOCK_THREADS, REDUCE_ALGO>))
@@ -659,14 +712,26 @@ __global__ void TopKRenormProbKernel(DType* probs, DType* renormed_prob, IdType*
659712
}
660713
}
661714

715+
#ifdef PADDLE_WITH_COREX
716+
aggregate_gt_pivot_0 += BlockReduce<ValueCount<float>, BLOCK_THREADS, REDUCE_ALGORITHM>(
717+
temp_storage.block_prim.reduce_value_count)
718+
.Sum(probs_gt_pivot_0_pair);
719+
#else
662720
aggregate_gt_pivot_0 += BlockReduce<ValueCount<float>, BLOCK_THREADS, REDUCE_ALGORITHM>(
663721
temp_storage.block_prim.reduce_value_count)
664722
.Sum<VEC_SIZE>(probs_gt_pivot_0_pair);
723+
#endif
665724
__syncthreads();
666725

726+
#ifdef PADDLE_WITH_COREX
727+
aggregate_gt_pivot_1 += BlockReduce<ValueCount<float>, BLOCK_THREADS, REDUCE_ALGORITHM>(
728+
temp_storage.block_prim.reduce_value_count)
729+
.Sum(probs_gt_pivot_1_pair);
730+
#else
667731
aggregate_gt_pivot_1 += BlockReduce<ValueCount<float>, BLOCK_THREADS, REDUCE_ALGORITHM>(
668732
temp_storage.block_prim.reduce_value_count)
669733
.Sum<VEC_SIZE>(probs_gt_pivot_1_pair);
734+
#endif
670735
__syncthreads();
671736
}
672737
min_gt_low =

custom_ops/gpu_ops/sample_kernels/utils.cuh

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -258,9 +258,13 @@ inline std::pair<int, int> GetCudaComputeCapability() {
258258

259259
/******************* math *******************/
260260
__forceinline__ __device__ float ptx_rcp(float x) {
261+
#ifdef PADDLE_WITH_COREX
262+
return __ivcorex_rcpf(x);
263+
#else
261264
float y;
262265
asm volatile("rcp.approx.ftz.f32 %0, %1;" : "=f"(y) : "f"(x));
263266
return y;
267+
#endif
264268
}
265269

266270
template <typename T1, typename T2>

0 commit comments

Comments
 (0)