@@ -67,6 +67,18 @@ static struct uv_hub_nmi_s **uv_hub_nmi_list;
67
67
DEFINE_PER_CPU (struct uv_cpu_nmi_s , uv_cpu_nmi );
68
68
EXPORT_PER_CPU_SYMBOL_GPL (uv_cpu_nmi );
69
69
70
+ /* UV hubless values */
71
+ #define NMI_CONTROL_PORT 0x70
72
+ #define NMI_DUMMY_PORT 0x71
73
+ #define GPI_NMI_STS_GPP_D_0 0x164
74
+ #define GPI_NMI_ENA_GPP_D_0 0x174
75
+ #define STS_GPP_D_0_MASK 0x1
76
+ #define PAD_CFG_DW0_GPP_D_0 0x4c0
77
+ #define GPIROUTNMI (1ul << 17)
78
+ #define PCH_PCR_GPIO_1_BASE 0xfdae0000ul
79
+ #define PCH_PCR_GPIO_ADDRESS (offset ) (int *)((u64)(pch_base) | (u64)(offset))
80
+
81
+ static u64 * pch_base ;
70
82
static unsigned long nmi_mmr ;
71
83
static unsigned long nmi_mmr_clear ;
72
84
static unsigned long nmi_mmr_pending ;
@@ -144,6 +156,19 @@ module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
144
156
static int uv_nmi_retry_count = 500 ;
145
157
module_param_named (retry_count , uv_nmi_retry_count , int , 0644 );
146
158
159
+ static bool uv_pch_intr_enable = true;
160
+ static bool uv_pch_intr_now_enabled ;
161
+ module_param_named (pch_intr_enable , uv_pch_intr_enable , bool , 0644 );
162
+
163
+ static int uv_nmi_debug ;
164
+ module_param_named (debug , uv_nmi_debug , int , 0644 );
165
+
166
+ #define nmi_debug (fmt , ...) \
167
+ do { \
168
+ if (uv_nmi_debug) \
169
+ pr_info(fmt, ##__VA_ARGS__); \
170
+ } while (0)
171
+
147
172
/*
148
173
* Valid NMI Actions:
149
174
* "dump" - dump process stack for each cpu
@@ -191,6 +216,77 @@ static inline void uv_local_mmr_clear_nmi(void)
191
216
uv_write_local_mmr (nmi_mmr_clear , nmi_mmr_pending );
192
217
}
193
218
219
+ /*
220
+ * UV hubless NMI handler functions
221
+ */
222
+ static inline void uv_reassert_nmi (void )
223
+ {
224
+ /* (from arch/x86/include/asm/mach_traps.h) */
225
+ outb (0x8f , NMI_CONTROL_PORT );
226
+ inb (NMI_DUMMY_PORT ); /* dummy read */
227
+ outb (0x0f , NMI_CONTROL_PORT );
228
+ inb (NMI_DUMMY_PORT ); /* dummy read */
229
+ }
230
+
231
+ static void uv_init_hubless_pch_io (int offset , int mask , int data )
232
+ {
233
+ int * addr = PCH_PCR_GPIO_ADDRESS (offset );
234
+ int readd = readl (addr );
235
+
236
+ if (mask ) { /* OR in new data */
237
+ int writed = (readd & ~mask ) | data ;
238
+
239
+ nmi_debug ("UV:PCH: %p = %x & %x | %x (%x)\n" ,
240
+ addr , readd , ~mask , data , writed );
241
+ writel (writed , addr );
242
+ } else if (readd & data ) { /* clear status bit */
243
+ nmi_debug ("UV:PCH: %p = %x\n" , addr , data );
244
+ writel (data , addr );
245
+ }
246
+
247
+ (void )readl (addr ); /* flush write data */
248
+ }
249
+
250
+ static void uv_nmi_setup_hubless_intr (void )
251
+ {
252
+ uv_pch_intr_now_enabled = uv_pch_intr_enable ;
253
+
254
+ uv_init_hubless_pch_io (
255
+ PAD_CFG_DW0_GPP_D_0 , GPIROUTNMI ,
256
+ uv_pch_intr_now_enabled ? GPIROUTNMI : 0 );
257
+
258
+ nmi_debug ("UV:NMI: GPP_D_0 interrupt %s\n" ,
259
+ uv_pch_intr_now_enabled ? "enabled" : "disabled" );
260
+ }
261
+
262
+ static int uv_nmi_test_hubless (struct uv_hub_nmi_s * hub_nmi )
263
+ {
264
+ int * pstat = PCH_PCR_GPIO_ADDRESS (GPI_NMI_STS_GPP_D_0 );
265
+ int status = * pstat ;
266
+
267
+ hub_nmi -> nmi_value = status ;
268
+ atomic_inc (& hub_nmi -> read_mmr_count );
269
+
270
+ if (!(status & STS_GPP_D_0_MASK )) /* Not a UV external NMI */
271
+ return 0 ;
272
+
273
+ * pstat = STS_GPP_D_0_MASK ; /* Is a UV NMI: clear GPP_D_0 status */
274
+ (void )* pstat ; /* flush write */
275
+
276
+ return 1 ;
277
+ }
278
+
279
+ static int uv_test_nmi (struct uv_hub_nmi_s * hub_nmi )
280
+ {
281
+ if (hub_nmi -> hub_present )
282
+ return uv_nmi_test_mmr (hub_nmi );
283
+
284
+ if (hub_nmi -> pch_owner ) /* Only PCH owner can check status */
285
+ return uv_nmi_test_hubless (hub_nmi );
286
+
287
+ return -1 ;
288
+ }
289
+
194
290
/*
195
291
* If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and
196
292
* return true. If first cpu in on the system, set global "in_nmi" flag.
@@ -214,6 +310,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
214
310
{
215
311
int cpu = smp_processor_id ();
216
312
int nmi = 0 ;
313
+ int nmi_detected = 0 ;
217
314
218
315
local64_inc (& uv_nmi_count );
219
316
this_cpu_inc (uv_cpu_nmi .queries );
@@ -224,20 +321,26 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
224
321
break ;
225
322
226
323
if (raw_spin_trylock (& hub_nmi -> nmi_lock )) {
324
+ nmi_detected = uv_test_nmi (hub_nmi );
227
325
228
- /* check hub MMR NMI flag */
229
- if (uv_nmi_test_mmr ( hub_nmi ) ) {
326
+ /* check flag for UV external NMI */
327
+ if (nmi_detected > 0 ) {
230
328
uv_set_in_nmi (cpu , hub_nmi );
231
329
nmi = 1 ;
232
330
break ;
233
331
}
234
332
235
- /* MMR NMI flag is clear */
333
+ /* A non-PCH node in a hubless system waits for NMI */
334
+ else if (nmi_detected < 0 )
335
+ goto slave_wait ;
336
+
337
+ /* MMR/PCH NMI flag is clear */
236
338
raw_spin_unlock (& hub_nmi -> nmi_lock );
237
339
238
340
} else {
239
- /* wait a moment for the hub nmi locker to set flag */
240
- cpu_relax ();
341
+
342
+ /* Wait a moment for the HUB NMI locker to set flag */
343
+ slave_wait : cpu_relax ();
241
344
udelay (uv_nmi_slave_delay );
242
345
243
346
/* re-check hub in_nmi flag */
@@ -246,13 +349,20 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
246
349
break ;
247
350
}
248
351
249
- /* check if this BMC missed setting the MMR NMI flag */
352
+ /*
353
+ * Check if this BMC missed setting the MMR NMI flag (or)
354
+ * UV hubless system where only PCH owner can check flag
355
+ */
250
356
if (!nmi ) {
251
357
nmi = atomic_read (& uv_in_nmi );
252
358
if (nmi )
253
359
uv_set_in_nmi (cpu , hub_nmi );
254
360
}
255
361
362
+ /* If we're holding the hub lock, release it now */
363
+ if (nmi_detected < 0 )
364
+ raw_spin_unlock (& hub_nmi -> nmi_lock );
365
+
256
366
} while (0 );
257
367
258
368
if (!nmi )
@@ -269,7 +379,10 @@ static inline void uv_clear_nmi(int cpu)
269
379
if (cpu == atomic_read (& hub_nmi -> cpu_owner )) {
270
380
atomic_set (& hub_nmi -> cpu_owner , -1 );
271
381
atomic_set (& hub_nmi -> in_nmi , 0 );
272
- uv_local_mmr_clear_nmi ();
382
+ if (hub_nmi -> hub_present )
383
+ uv_local_mmr_clear_nmi ();
384
+ else
385
+ uv_reassert_nmi ();
273
386
raw_spin_unlock (& hub_nmi -> nmi_lock );
274
387
}
275
388
}
@@ -297,11 +410,12 @@ static void uv_nmi_cleanup_mask(void)
297
410
}
298
411
}
299
412
300
- /* Loop waiting as cpus enter nmi handler */
413
+ /* Loop waiting as cpus enter NMI handler */
301
414
static int uv_nmi_wait_cpus (int first )
302
415
{
303
416
int i , j , k , n = num_online_cpus ();
304
417
int last_k = 0 , waiting = 0 ;
418
+ int cpu = smp_processor_id ();
305
419
306
420
if (first ) {
307
421
cpumask_copy (uv_nmi_cpu_mask , cpu_online_mask );
@@ -310,6 +424,12 @@ static int uv_nmi_wait_cpus(int first)
310
424
k = n - cpumask_weight (uv_nmi_cpu_mask );
311
425
}
312
426
427
+ /* PCH NMI causes only one cpu to respond */
428
+ if (first && uv_pch_intr_now_enabled ) {
429
+ cpumask_clear_cpu (cpu , uv_nmi_cpu_mask );
430
+ return n - k - 1 ;
431
+ }
432
+
313
433
udelay (uv_nmi_initial_delay );
314
434
for (i = 0 ; i < uv_nmi_retry_count ; i ++ ) {
315
435
int loop_delay = uv_nmi_loop_delay ;
@@ -358,7 +478,7 @@ static void uv_nmi_wait(int master)
358
478
break ;
359
479
360
480
/* if not all made it in, send IPI NMI to them */
361
- pr_alert ("UV: Sending NMI IPI to %d non-responding CPUs: %*pbl\n" ,
481
+ pr_alert ("UV: Sending NMI IPI to %d CPUs: %*pbl\n" ,
362
482
cpumask_weight (uv_nmi_cpu_mask ),
363
483
cpumask_pr_args (uv_nmi_cpu_mask ));
364
484
@@ -538,7 +658,7 @@ static inline int uv_nmi_kdb_reason(void)
538
658
#else /* !CONFIG_KGDB_KDB */
539
659
static inline int uv_nmi_kdb_reason (void )
540
660
{
541
- /* Insure user is expecting to attach gdb remote */
661
+ /* Ensure user is expecting to attach gdb remote */
542
662
if (uv_nmi_action_is ("kgdb" ))
543
663
return 0 ;
544
664
@@ -626,15 +746,18 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
626
746
/* Pause as all cpus enter the NMI handler */
627
747
uv_nmi_wait (master );
628
748
629
- /* Dump state of each cpu */
630
- if (uv_nmi_action_is ("ips" ) || uv_nmi_action_is ("dump" ))
749
+ /* Process actions other than "kdump": */
750
+ if (uv_nmi_action_is ("ips" ) || uv_nmi_action_is ("dump" )) {
631
751
uv_nmi_dump_state (cpu , regs , master );
632
-
633
- /* Call KGDB/KDB if enabled */
634
- else if (uv_nmi_action_is ("kdb" ) || uv_nmi_action_is ("kgdb" ))
752
+ } else if (uv_nmi_action_is ("kdb" ) || uv_nmi_action_is ("kgdb" )) {
635
753
uv_call_kgdb_kdb (cpu , regs , master );
754
+ } else {
755
+ if (master )
756
+ pr_alert ("UV: unknown NMI action: %s\n" , uv_nmi_action );
757
+ uv_nmi_sync_exit (master );
758
+ }
636
759
637
- /* Clear per_cpu "in nmi " flag */
760
+ /* Clear per_cpu "in_nmi " flag */
638
761
this_cpu_write (uv_cpu_nmi .state , UV_NMI_STATE_OUT );
639
762
640
763
/* Clear MMR NMI flag on each hub */
@@ -648,6 +771,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
648
771
atomic_set (& uv_nmi_cpu , -1 );
649
772
atomic_set (& uv_in_nmi , 0 );
650
773
atomic_set (& uv_nmi_kexec_failed , 0 );
774
+ atomic_set (& uv_nmi_slave_continue , SLAVE_CLEAR );
651
775
}
652
776
653
777
uv_nmi_touch_watchdogs ();
@@ -697,28 +821,53 @@ void uv_nmi_init(void)
697
821
apic_write (APIC_LVT1 , value );
698
822
}
699
823
700
- void uv_nmi_setup (void )
824
+ /* Setup HUB NMI info */
825
+ void __init uv_nmi_setup_common (bool hubbed )
701
826
{
702
827
int size = sizeof (void * ) * (1 << NODES_SHIFT );
703
- int cpu , nid ;
828
+ int cpu ;
704
829
705
- /* Setup hub nmi info */
706
- uv_nmi_setup_mmrs ();
707
830
uv_hub_nmi_list = kzalloc (size , GFP_KERNEL );
708
- pr_info ("UV: NMI hub list @ 0x%p (%d)\n" , uv_hub_nmi_list , size );
831
+ nmi_debug ("UV: NMI hub list @ 0x%p (%d)\n" , uv_hub_nmi_list , size );
709
832
BUG_ON (!uv_hub_nmi_list );
710
833
size = sizeof (struct uv_hub_nmi_s );
711
834
for_each_present_cpu (cpu ) {
712
- nid = cpu_to_node (cpu );
835
+ int nid = cpu_to_node (cpu );
713
836
if (uv_hub_nmi_list [nid ] == NULL ) {
714
837
uv_hub_nmi_list [nid ] = kzalloc_node (size ,
715
838
GFP_KERNEL , nid );
716
839
BUG_ON (!uv_hub_nmi_list [nid ]);
717
840
raw_spin_lock_init (& (uv_hub_nmi_list [nid ]-> nmi_lock ));
718
841
atomic_set (& uv_hub_nmi_list [nid ]-> cpu_owner , -1 );
842
+ uv_hub_nmi_list [nid ]-> hub_present = hubbed ;
843
+ uv_hub_nmi_list [nid ]-> pch_owner = (nid == 0 );
719
844
}
720
845
uv_hub_nmi_per (cpu ) = uv_hub_nmi_list [nid ];
721
846
}
722
847
BUG_ON (!alloc_cpumask_var (& uv_nmi_cpu_mask , GFP_KERNEL ));
848
+ }
849
+
850
+ /* Setup for UV Hub systems */
851
+ void __init uv_nmi_setup (void )
852
+ {
853
+ uv_nmi_setup_mmrs ();
854
+ uv_nmi_setup_common (true);
855
+ uv_register_nmi_notifier ();
856
+ pr_info ("UV: Hub NMI enabled\n" );
857
+ }
858
+
859
+ /* Setup for UV Hubless systems */
860
+ void __init uv_nmi_setup_hubless (void )
861
+ {
862
+ uv_nmi_setup_common (false);
863
+ pch_base = xlate_dev_mem_ptr (PCH_PCR_GPIO_1_BASE );
864
+ nmi_debug ("UV: PCH base:%p from 0x%lx, GPP_D_0\n" ,
865
+ pch_base , PCH_PCR_GPIO_1_BASE );
866
+ uv_init_hubless_pch_io (GPI_NMI_ENA_GPP_D_0 ,
867
+ STS_GPP_D_0_MASK , STS_GPP_D_0_MASK );
868
+ uv_nmi_setup_hubless_intr ();
869
+ /* Ensure NMI enabled in Processor Interface Reg: */
870
+ uv_reassert_nmi ();
723
871
uv_register_nmi_notifier ();
872
+ pr_info ("UV: Hubless NMI enabled\n" );
724
873
}
0 commit comments