4
4
*
5
5
* Written by Jacob Shin - AMD, Inc.
6
6
* Maintained by: Borislav Petkov <[email protected] >
7
- *
8
- * All MC4_MISCi registers are shared between cores on a node.
9
7
*/
10
8
#include <linux/interrupt.h>
11
9
#include <linux/notifier.h>
20
18
#include <linux/smp.h>
21
19
#include <linux/string.h>
22
20
23
- #include <asm/amd_nb.h>
24
21
#include <asm/traps.h>
25
22
#include <asm/apic.h>
26
23
#include <asm/mce.h>
@@ -221,6 +218,32 @@ static const struct smca_hwid smca_hwid_mcatypes[] = {
221
218
#define MAX_MCATYPE_NAME_LEN 30
222
219
static char buf_mcatype [MAX_MCATYPE_NAME_LEN ];
223
220
221
+ struct threshold_block {
222
+ /* This block's number within its bank. */
223
+ unsigned int block ;
224
+ /* MCA bank number that contains this block. */
225
+ unsigned int bank ;
226
+ /* CPU which controls this block's MCA bank. */
227
+ unsigned int cpu ;
228
+ /* MCA_MISC MSR address for this block. */
229
+ u32 address ;
230
+ /* Enable/Disable APIC interrupt. */
231
+ bool interrupt_enable ;
232
+ /* Bank can generate an interrupt. */
233
+ bool interrupt_capable ;
234
+ /* Value upon which threshold interrupt is generated. */
235
+ u16 threshold_limit ;
236
+ /* sysfs object */
237
+ struct kobject kobj ;
238
+ /* List of threshold blocks within this block's MCA bank. */
239
+ struct list_head miscj ;
240
+ };
241
+
242
+ struct threshold_bank {
243
+ struct kobject * kobj ;
244
+ struct threshold_block * blocks ;
245
+ };
246
+
224
247
static DEFINE_PER_CPU (struct threshold_bank * * , threshold_banks ) ;
225
248
226
249
/*
@@ -333,19 +356,6 @@ struct thresh_restart {
333
356
u16 old_limit ;
334
357
};
335
358
336
- static inline bool is_shared_bank (int bank )
337
- {
338
- /*
339
- * Scalable MCA provides for only one core to have access to the MSRs of
340
- * a shared bank.
341
- */
342
- if (mce_flags .smca )
343
- return false;
344
-
345
- /* Bank 4 is for northbridge reporting and is thus shared */
346
- return (bank == 4 );
347
- }
348
-
349
359
static const char * bank4_names (const struct threshold_block * b )
350
360
{
351
361
switch (b -> address ) {
@@ -381,15 +391,15 @@ static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
381
391
return msr_high_bits & BIT (28 );
382
392
}
383
393
384
- static int lvt_off_valid (struct threshold_block * b , int apic , u32 lo , u32 hi )
394
+ static bool lvt_off_valid (struct threshold_block * b , int apic , u32 lo , u32 hi )
385
395
{
386
396
int msr = (hi & MASK_LVTOFF_HI ) >> 20 ;
387
397
388
398
if (apic < 0 ) {
389
399
pr_err (FW_BUG "cpu %d, failed to setup threshold interrupt "
390
400
"for bank %d, block %d (MSR%08X=0x%x%08x)\n" , b -> cpu ,
391
401
b -> bank , b -> block , b -> address , hi , lo );
392
- return 0 ;
402
+ return false ;
393
403
}
394
404
395
405
if (apic != msr ) {
@@ -399,15 +409,15 @@ static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
399
409
* was set is reserved. Return early here:
400
410
*/
401
411
if (mce_flags .smca )
402
- return 0 ;
412
+ return false ;
403
413
404
414
pr_err (FW_BUG "cpu %d, invalid threshold interrupt offset %d "
405
415
"for bank %d, block %d (MSR%08X=0x%x%08x)\n" ,
406
416
b -> cpu , apic , b -> bank , b -> block , b -> address , hi , lo );
407
- return 0 ;
417
+ return false ;
408
418
}
409
419
410
- return 1 ;
420
+ return true ;
411
421
};
412
422
413
423
/* Reprogram MCx_MISC MSR behind this threshold bank. */
@@ -1198,62 +1208,17 @@ static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb
1198
1208
return err ;
1199
1209
}
1200
1210
1201
- static int __threshold_add_blocks (struct threshold_bank * b )
1202
- {
1203
- struct list_head * head = & b -> blocks -> miscj ;
1204
- struct threshold_block * pos = NULL ;
1205
- struct threshold_block * tmp = NULL ;
1206
- int err = 0 ;
1207
-
1208
- err = kobject_add (& b -> blocks -> kobj , b -> kobj , b -> blocks -> kobj .name );
1209
- if (err )
1210
- return err ;
1211
-
1212
- list_for_each_entry_safe (pos , tmp , head , miscj ) {
1213
-
1214
- err = kobject_add (& pos -> kobj , b -> kobj , pos -> kobj .name );
1215
- if (err ) {
1216
- list_for_each_entry_safe_reverse (pos , tmp , head , miscj )
1217
- kobject_del (& pos -> kobj );
1218
-
1219
- return err ;
1220
- }
1221
- }
1222
- return err ;
1223
- }
1224
-
1225
1211
static int threshold_create_bank (struct threshold_bank * * bp , unsigned int cpu ,
1226
1212
unsigned int bank )
1227
1213
{
1228
1214
struct device * dev = this_cpu_read (mce_device );
1229
- struct amd_northbridge * nb = NULL ;
1230
1215
struct threshold_bank * b = NULL ;
1231
1216
const char * name = get_name (cpu , bank , NULL );
1232
1217
int err = 0 ;
1233
1218
1234
1219
if (!dev )
1235
1220
return - ENODEV ;
1236
1221
1237
- if (is_shared_bank (bank )) {
1238
- nb = node_to_amd_nb (topology_amd_node_id (cpu ));
1239
-
1240
- /* threshold descriptor already initialized on this node? */
1241
- if (nb && nb -> bank4 ) {
1242
- /* yes, use it */
1243
- b = nb -> bank4 ;
1244
- err = kobject_add (b -> kobj , & dev -> kobj , name );
1245
- if (err )
1246
- goto out ;
1247
-
1248
- bp [bank ] = b ;
1249
- refcount_inc (& b -> cpus );
1250
-
1251
- err = __threshold_add_blocks (b );
1252
-
1253
- goto out ;
1254
- }
1255
- }
1256
-
1257
1222
b = kzalloc (sizeof (struct threshold_bank ), GFP_KERNEL );
1258
1223
if (!b ) {
1259
1224
err = - ENOMEM ;
@@ -1267,17 +1232,6 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
1267
1232
goto out_free ;
1268
1233
}
1269
1234
1270
- if (is_shared_bank (bank )) {
1271
- b -> shared = 1 ;
1272
- refcount_set (& b -> cpus , 1 );
1273
-
1274
- /* nb is already initialized, see above */
1275
- if (nb ) {
1276
- WARN_ON (nb -> bank4 );
1277
- nb -> bank4 = b ;
1278
- }
1279
- }
1280
-
1281
1235
err = allocate_threshold_blocks (cpu , b , bank , 0 , mca_msr_reg (bank , MCA_MISC ));
1282
1236
if (err )
1283
1237
goto out_kobj ;
@@ -1310,40 +1264,11 @@ static void deallocate_threshold_blocks(struct threshold_bank *bank)
1310
1264
kobject_put (& bank -> blocks -> kobj );
1311
1265
}
1312
1266
1313
- static void __threshold_remove_blocks (struct threshold_bank * b )
1314
- {
1315
- struct threshold_block * pos = NULL ;
1316
- struct threshold_block * tmp = NULL ;
1317
-
1318
- kobject_put (b -> kobj );
1319
-
1320
- list_for_each_entry_safe (pos , tmp , & b -> blocks -> miscj , miscj )
1321
- kobject_put (b -> kobj );
1322
- }
1323
-
1324
1267
static void threshold_remove_bank (struct threshold_bank * bank )
1325
1268
{
1326
- struct amd_northbridge * nb ;
1327
-
1328
1269
if (!bank -> blocks )
1329
1270
goto out_free ;
1330
1271
1331
- if (!bank -> shared )
1332
- goto out_dealloc ;
1333
-
1334
- if (!refcount_dec_and_test (& bank -> cpus )) {
1335
- __threshold_remove_blocks (bank );
1336
- return ;
1337
- } else {
1338
- /*
1339
- * The last CPU on this node using the shared bank is going
1340
- * away, remove that bank now.
1341
- */
1342
- nb = node_to_amd_nb (topology_amd_node_id (smp_processor_id ()));
1343
- nb -> bank4 = NULL ;
1344
- }
1345
-
1346
- out_dealloc :
1347
1272
deallocate_threshold_blocks (bank );
1348
1273
1349
1274
out_free :
0 commit comments