@@ -451,11 +451,9 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
451
451
{
452
452
struct mem_ctl_info * mci ;
453
453
struct edac_mc_layer * layer ;
454
- u32 * ce_per_layer [EDAC_MAX_LAYERS ], * ue_per_layer [EDAC_MAX_LAYERS ];
455
- unsigned int idx , size , tot_dimms = 1 , count = 1 ;
456
- unsigned int tot_csrows = 1 , tot_channels = 1 , tot_errcount = 0 ;
454
+ unsigned int idx , size , tot_dimms = 1 ;
455
+ unsigned int tot_csrows = 1 , tot_channels = 1 ;
457
456
void * pvt , * ptr = NULL ;
458
- int i ;
459
457
bool per_rank = false;
460
458
461
459
if (WARN_ON (n_layers > EDAC_MAX_LAYERS || n_layers == 0 ))
@@ -482,19 +480,10 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
482
480
* stringent as what the compiler would provide if we could simply
483
481
* hardcode everything into a single struct.
484
482
*/
485
- mci = edac_align_ptr (& ptr , sizeof (* mci ), 1 );
486
- layer = edac_align_ptr (& ptr , sizeof (* layer ), n_layers );
487
- for (i = 0 ; i < n_layers ; i ++ ) {
488
- count *= layers [i ].size ;
489
- edac_dbg (4 , "errcount layer %d size %d\n" , i , count );
490
- ce_per_layer [i ] = edac_align_ptr (& ptr , sizeof (u32 ), count );
491
- ue_per_layer [i ] = edac_align_ptr (& ptr , sizeof (u32 ), count );
492
- tot_errcount += 2 * count ;
493
- }
494
-
495
- edac_dbg (4 , "allocating %d error counters\n" , tot_errcount );
496
- pvt = edac_align_ptr (& ptr , sz_pvt , 1 );
497
- size = ((unsigned long )pvt ) + sz_pvt ;
483
+ mci = edac_align_ptr (& ptr , sizeof (* mci ), 1 );
484
+ layer = edac_align_ptr (& ptr , sizeof (* layer ), n_layers );
485
+ pvt = edac_align_ptr (& ptr , sz_pvt , 1 );
486
+ size = ((unsigned long )pvt ) + sz_pvt ;
498
487
499
488
edac_dbg (1 , "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n" ,
500
489
size ,
@@ -513,10 +502,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
513
502
* rather than an imaginary chunk of memory located at address 0.
514
503
*/
515
504
layer = (struct edac_mc_layer * )(((char * )mci ) + ((unsigned long )layer ));
516
- for (i = 0 ; i < n_layers ; i ++ ) {
517
- mci -> ce_per_layer [i ] = (u32 * )((char * )mci + ((unsigned long )ce_per_layer [i ]));
518
- mci -> ue_per_layer [i ] = (u32 * )((char * )mci + ((unsigned long )ue_per_layer [i ]));
519
- }
520
505
pvt = sz_pvt ? (((char * )mci ) + ((unsigned long )pvt )) : NULL ;
521
506
522
507
/* setup index and various internal pointers */
@@ -949,48 +934,28 @@ static void edac_inc_ce_error(struct edac_raw_error_desc *e)
949
934
{
950
935
int pos [EDAC_MAX_LAYERS ] = { e -> top_layer , e -> mid_layer , e -> low_layer };
951
936
struct mem_ctl_info * mci = error_desc_to_mci (e );
952
- int i , index = 0 ;
937
+ struct dimm_info * dimm = edac_get_dimm ( mci , pos [ 0 ], pos [ 1 ], pos [ 2 ]) ;
953
938
954
939
mci -> ce_mc += e -> error_count ;
955
940
956
- if (pos [0 ] < 0 ) {
941
+ if (dimm )
942
+ dimm -> ce_count += e -> error_count ;
943
+ else
957
944
mci -> ce_noinfo_count += e -> error_count ;
958
- return ;
959
- }
960
-
961
- for (i = 0 ; i < mci -> n_layers ; i ++ ) {
962
- if (pos [i ] < 0 )
963
- break ;
964
- index += pos [i ];
965
- mci -> ce_per_layer [i ][index ] += e -> error_count ;
966
-
967
- if (i < mci -> n_layers - 1 )
968
- index *= mci -> layers [i + 1 ].size ;
969
- }
970
945
}
971
946
972
947
static void edac_inc_ue_error (struct edac_raw_error_desc * e )
973
948
{
974
949
int pos [EDAC_MAX_LAYERS ] = { e -> top_layer , e -> mid_layer , e -> low_layer };
975
950
struct mem_ctl_info * mci = error_desc_to_mci (e );
976
- int i , index = 0 ;
951
+ struct dimm_info * dimm = edac_get_dimm ( mci , pos [ 0 ], pos [ 1 ], pos [ 2 ]) ;
977
952
978
953
mci -> ue_mc += e -> error_count ;
979
954
980
- if (pos [0 ] < 0 ) {
955
+ if (dimm )
956
+ dimm -> ue_count += e -> error_count ;
957
+ else
981
958
mci -> ue_noinfo_count += e -> error_count ;
982
- return ;
983
- }
984
-
985
- for (i = 0 ; i < mci -> n_layers ; i ++ ) {
986
- if (pos [i ] < 0 )
987
- break ;
988
- index += pos [i ];
989
- mci -> ue_per_layer [i ][index ] += e -> error_count ;
990
-
991
- if (i < mci -> n_layers - 1 )
992
- index *= mci -> layers [i + 1 ].size ;
993
- }
994
959
}
995
960
996
961
static void edac_ce_error (struct edac_raw_error_desc * e )
@@ -1143,8 +1108,8 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1143
1108
1144
1109
/*
1145
1110
* Check if the event report is consistent and if the memory location is
1146
- * known. If it is, the DIMM(s) label info will be filled and the
1147
- * per-layer error counters will be incremented.
1111
+ * known. If it is, the DIMM(s) label info will be filled and the DIMM's
1112
+ * error counters will be incremented.
1148
1113
*/
1149
1114
for (i = 0 ; i < mci -> n_layers ; i ++ ) {
1150
1115
if (pos [i ] >= (int )mci -> layers [i ].size ) {
0 commit comments