@@ -178,7 +178,7 @@ int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
178
178
sev , RUNTIME , NONSTD_SEC_LEN ,
179
179
NONSTD_SEC_OFFSET (hdr -> sec_cnt , idx ));
180
180
181
- reg_count = min (reg_count , CPER_ACA_REG_COUNT );
181
+ reg_count = umin (reg_count , CPER_ACA_REG_COUNT );
182
182
183
183
section -> hdr .valid_bits .err_info_cnt = 1 ;
184
184
section -> hdr .valid_bits .err_context_cnt = 1 ;
@@ -382,6 +382,99 @@ int amdgpu_cper_generate_ce_records(struct amdgpu_device *adev,
382
382
return 0 ;
383
383
}
384
384
385
+ static bool amdgpu_cper_is_hdr (struct amdgpu_ring * ring , u64 pos )
386
+ {
387
+ struct cper_hdr * chdr ;
388
+
389
+ chdr = (struct cper_hdr * )& (ring -> ring [pos ]);
390
+ return strcmp (chdr -> signature , "CPER" ) ? false : true;
391
+ }
392
+
393
+ static u32 amdgpu_cper_ring_get_ent_sz (struct amdgpu_ring * ring , u64 pos )
394
+ {
395
+ struct cper_hdr * chdr ;
396
+ u64 p ;
397
+ u32 chunk , rec_len = 0 ;
398
+
399
+ chdr = (struct cper_hdr * )& (ring -> ring [pos ]);
400
+ chunk = ring -> ring_size - (pos << 2 );
401
+
402
+ if (!strcmp (chdr -> signature , "CPER" )) {
403
+ rec_len = chdr -> record_length ;
404
+ goto calc ;
405
+ }
406
+
407
+ /* ring buffer is not full, no cper data after ring->wptr */
408
+ if (ring -> count_dw )
409
+ goto calc ;
410
+
411
+ for (p = pos + 1 ; p <= ring -> buf_mask ; p ++ ) {
412
+ chdr = (struct cper_hdr * )& (ring -> ring [p ]);
413
+ if (!strcmp (chdr -> signature , "CPER" )) {
414
+ rec_len = (p - pos ) << 2 ;
415
+ goto calc ;
416
+ }
417
+ }
418
+
419
+ calc :
420
+ if (!rec_len )
421
+ return chunk ;
422
+ else
423
+ return umin (rec_len , chunk );
424
+ }
425
+
426
+ void amdgpu_cper_ring_write (struct amdgpu_ring * ring ,
427
+ void * src , int count )
428
+ {
429
+ u64 pos , wptr_old , rptr = * ring -> rptr_cpu_addr & ring -> ptr_mask ;
430
+ u32 chunk , ent_sz ;
431
+ u8 * s = (u8 * )src ;
432
+
433
+ if (count >= ring -> ring_size - 4 ) {
434
+ dev_err (ring -> adev -> dev ,
435
+ "CPER data size(%d) is larger than ring size(%d)\n" ,
436
+ count , ring -> ring_size - 4 );
437
+
438
+ return ;
439
+ }
440
+
441
+ wptr_old = ring -> wptr ;
442
+
443
+ while (count ) {
444
+ ent_sz = amdgpu_cper_ring_get_ent_sz (ring , ring -> wptr );
445
+ chunk = umin (ent_sz , count );
446
+
447
+ memcpy (& ring -> ring [ring -> wptr ], s , chunk );
448
+
449
+ ring -> wptr += (chunk >> 2 );
450
+ ring -> wptr &= ring -> ptr_mask ;
451
+ count -= chunk ;
452
+ s += chunk ;
453
+ }
454
+
455
+ /* the buffer is overflow, adjust rptr */
456
+ if (((wptr_old < rptr ) && (rptr <= ring -> wptr )) ||
457
+ ((ring -> wptr < wptr_old ) && (wptr_old < rptr )) ||
458
+ ((rptr <= ring -> wptr ) && (ring -> wptr < wptr_old ))) {
459
+ pos = (ring -> wptr + 1 ) & ring -> ptr_mask ;
460
+
461
+ do {
462
+ ent_sz = amdgpu_cper_ring_get_ent_sz (ring , pos );
463
+
464
+ rptr += (ent_sz >> 2 );
465
+ rptr &= ring -> ptr_mask ;
466
+ * ring -> rptr_cpu_addr = rptr ;
467
+
468
+ pos = rptr ;
469
+ } while (!amdgpu_cper_is_hdr (ring , rptr ));
470
+ }
471
+
472
+ if (ring -> count_dw >= (count >> 2 ))
473
+ ring -> count_dw -= (count >> 2 );
474
+ else
475
+ ring -> count_dw = 0 ;
476
+ }
477
+
385
478
static u64 amdgpu_cper_ring_get_rptr (struct amdgpu_ring * ring )
386
479
{
387
480
return * (ring -> rptr_cpu_addr );
0 commit comments