31
31
32
32
#define ERR_INJ_ENABLE_REG 0x30
33
33
34
+ #define RAS_DES_EVENT_COUNTER_DATA_REG 0xc
35
+
36
+ #define RAS_DES_EVENT_COUNTER_CTRL_REG 0x8
37
+ #define EVENT_COUNTER_GROUP_SELECT GENMASK(27, 24)
38
+ #define EVENT_COUNTER_EVENT_SELECT GENMASK(23, 16)
39
+ #define EVENT_COUNTER_LANE_SELECT GENMASK(11, 8)
40
+ #define EVENT_COUNTER_STATUS BIT(7)
41
+ #define EVENT_COUNTER_ENABLE GENMASK(4, 2)
42
+ #define PER_EVENT_ON 0x3
43
+ #define PER_EVENT_OFF 0x1
44
+
34
45
#define DWC_DEBUGFS_BUF_MAX 128
35
46
36
47
/**
@@ -115,6 +126,63 @@ static const u32 err_inj_type_mask[] = {
115
126
EINJ5_TYPE ,
116
127
};
117
128
129
+ /**
130
+ * struct dwc_pcie_event_counter - Store details about each event counter
131
+ * supported in DWC RAS DES
132
+ * @name: Name of the error counter
133
+ * @group_no: Group number that the event belongs to. The value can range
134
+ * from 0 to 4
135
+ * @event_no: Event number of the particular event. The value ranges are:
136
+ * Group 0: 0 - 10
137
+ * Group 1: 5 - 13
138
+ * Group 2: 0 - 7
139
+ * Group 3: 0 - 5
140
+ * Group 4: 0 - 1
141
+ */
142
+ struct dwc_pcie_event_counter {
143
+ const char * name ;
144
+ u32 group_no ;
145
+ u32 event_no ;
146
+ };
147
+
148
+ static const struct dwc_pcie_event_counter event_list [] = {
149
+ {"ebuf_overflow" , 0x0 , 0x0 },
150
+ {"ebuf_underrun" , 0x0 , 0x1 },
151
+ {"decode_err" , 0x0 , 0x2 },
152
+ {"running_disparity_err" , 0x0 , 0x3 },
153
+ {"skp_os_parity_err" , 0x0 , 0x4 },
154
+ {"sync_header_err" , 0x0 , 0x5 },
155
+ {"rx_valid_deassertion" , 0x0 , 0x6 },
156
+ {"ctl_skp_os_parity_err" , 0x0 , 0x7 },
157
+ {"retimer_parity_err_1st" , 0x0 , 0x8 },
158
+ {"retimer_parity_err_2nd" , 0x0 , 0x9 },
159
+ {"margin_crc_parity_err" , 0x0 , 0xA },
160
+ {"detect_ei_infer" , 0x1 , 0x5 },
161
+ {"receiver_err" , 0x1 , 0x6 },
162
+ {"rx_recovery_req" , 0x1 , 0x7 },
163
+ {"n_fts_timeout" , 0x1 , 0x8 },
164
+ {"framing_err" , 0x1 , 0x9 },
165
+ {"deskew_err" , 0x1 , 0xa },
166
+ {"framing_err_in_l0" , 0x1 , 0xc },
167
+ {"deskew_uncompleted_err" , 0x1 , 0xd },
168
+ {"bad_tlp" , 0x2 , 0x0 },
169
+ {"lcrc_err" , 0x2 , 0x1 },
170
+ {"bad_dllp" , 0x2 , 0x2 },
171
+ {"replay_num_rollover" , 0x2 , 0x3 },
172
+ {"replay_timeout" , 0x2 , 0x4 },
173
+ {"rx_nak_dllp" , 0x2 , 0x5 },
174
+ {"tx_nak_dllp" , 0x2 , 0x6 },
175
+ {"retry_tlp" , 0x2 , 0x7 },
176
+ {"fc_timeout" , 0x3 , 0x0 },
177
+ {"poisoned_tlp" , 0x3 , 0x1 },
178
+ {"ecrc_error" , 0x3 , 0x2 },
179
+ {"unsupported_request" , 0x3 , 0x3 },
180
+ {"completer_abort" , 0x3 , 0x4 },
181
+ {"completion_timeout" , 0x3 , 0x5 },
182
+ {"ebuf_skp_add" , 0x4 , 0x0 },
183
+ {"ebuf_skp_del" , 0x4 , 0x1 },
184
+ };
185
+
118
186
static ssize_t lane_detect_read (struct file * file , char __user * buf ,
119
187
size_t count , loff_t * ppos )
120
188
{
@@ -236,6 +304,145 @@ static ssize_t err_inj_write(struct file *file, const char __user *buf,
236
304
return count ;
237
305
}
238
306
307
+ static void set_event_number (struct dwc_pcie_rasdes_priv * pdata ,
308
+ struct dw_pcie * pci , struct dwc_pcie_rasdes_info * rinfo )
309
+ {
310
+ u32 val ;
311
+
312
+ val = dw_pcie_readl_dbi (pci , rinfo -> ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG );
313
+ val &= ~EVENT_COUNTER_ENABLE ;
314
+ val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT );
315
+ val |= FIELD_PREP (EVENT_COUNTER_GROUP_SELECT , event_list [pdata -> idx ].group_no );
316
+ val |= FIELD_PREP (EVENT_COUNTER_EVENT_SELECT , event_list [pdata -> idx ].event_no );
317
+ dw_pcie_writel_dbi (pci , rinfo -> ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG , val );
318
+ }
319
+
320
+ static ssize_t counter_enable_read (struct file * file , char __user * buf ,
321
+ size_t count , loff_t * ppos )
322
+ {
323
+ struct dwc_pcie_rasdes_priv * pdata = file -> private_data ;
324
+ struct dw_pcie * pci = pdata -> pci ;
325
+ struct dwc_pcie_rasdes_info * rinfo = pci -> debugfs -> rasdes_info ;
326
+ char debugfs_buf [DWC_DEBUGFS_BUF_MAX ];
327
+ ssize_t pos ;
328
+ u32 val ;
329
+
330
+ mutex_lock (& rinfo -> reg_event_lock );
331
+ set_event_number (pdata , pci , rinfo );
332
+ val = dw_pcie_readl_dbi (pci , rinfo -> ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG );
333
+ mutex_unlock (& rinfo -> reg_event_lock );
334
+ val = FIELD_GET (EVENT_COUNTER_STATUS , val );
335
+ if (val )
336
+ pos = scnprintf (debugfs_buf , DWC_DEBUGFS_BUF_MAX , "Counter Enabled\n" );
337
+ else
338
+ pos = scnprintf (debugfs_buf , DWC_DEBUGFS_BUF_MAX , "Counter Disabled\n" );
339
+
340
+ return simple_read_from_buffer (buf , count , ppos , debugfs_buf , pos );
341
+ }
342
+
343
+ static ssize_t counter_enable_write (struct file * file , const char __user * buf ,
344
+ size_t count , loff_t * ppos )
345
+ {
346
+ struct dwc_pcie_rasdes_priv * pdata = file -> private_data ;
347
+ struct dw_pcie * pci = pdata -> pci ;
348
+ struct dwc_pcie_rasdes_info * rinfo = pci -> debugfs -> rasdes_info ;
349
+ u32 val , enable ;
350
+
351
+ val = kstrtou32_from_user (buf , count , 0 , & enable );
352
+ if (val )
353
+ return val ;
354
+
355
+ mutex_lock (& rinfo -> reg_event_lock );
356
+ set_event_number (pdata , pci , rinfo );
357
+ val = dw_pcie_readl_dbi (pci , rinfo -> ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG );
358
+ if (enable )
359
+ val |= FIELD_PREP (EVENT_COUNTER_ENABLE , PER_EVENT_ON );
360
+ else
361
+ val |= FIELD_PREP (EVENT_COUNTER_ENABLE , PER_EVENT_OFF );
362
+
363
+ dw_pcie_writel_dbi (pci , rinfo -> ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG , val );
364
+
365
+ /*
366
+ * While enabling the counter, always read the status back to check if
367
+ * it is enabled or not. Return error if it is not enabled to let the
368
+ * users know that the counter is not supported on the platform.
369
+ */
370
+ if (enable ) {
371
+ val = dw_pcie_readl_dbi (pci , rinfo -> ras_cap_offset +
372
+ RAS_DES_EVENT_COUNTER_CTRL_REG );
373
+ if (!FIELD_GET (EVENT_COUNTER_STATUS , val )) {
374
+ mutex_unlock (& rinfo -> reg_event_lock );
375
+ return - EOPNOTSUPP ;
376
+ }
377
+ }
378
+
379
+ mutex_unlock (& rinfo -> reg_event_lock );
380
+
381
+ return count ;
382
+ }
383
+
384
+ static ssize_t counter_lane_read (struct file * file , char __user * buf ,
385
+ size_t count , loff_t * ppos )
386
+ {
387
+ struct dwc_pcie_rasdes_priv * pdata = file -> private_data ;
388
+ struct dw_pcie * pci = pdata -> pci ;
389
+ struct dwc_pcie_rasdes_info * rinfo = pci -> debugfs -> rasdes_info ;
390
+ char debugfs_buf [DWC_DEBUGFS_BUF_MAX ];
391
+ ssize_t pos ;
392
+ u32 val ;
393
+
394
+ mutex_lock (& rinfo -> reg_event_lock );
395
+ set_event_number (pdata , pci , rinfo );
396
+ val = dw_pcie_readl_dbi (pci , rinfo -> ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG );
397
+ mutex_unlock (& rinfo -> reg_event_lock );
398
+ val = FIELD_GET (EVENT_COUNTER_LANE_SELECT , val );
399
+ pos = scnprintf (debugfs_buf , DWC_DEBUGFS_BUF_MAX , "Lane: %d\n" , val );
400
+
401
+ return simple_read_from_buffer (buf , count , ppos , debugfs_buf , pos );
402
+ }
403
+
404
+ static ssize_t counter_lane_write (struct file * file , const char __user * buf ,
405
+ size_t count , loff_t * ppos )
406
+ {
407
+ struct dwc_pcie_rasdes_priv * pdata = file -> private_data ;
408
+ struct dw_pcie * pci = pdata -> pci ;
409
+ struct dwc_pcie_rasdes_info * rinfo = pci -> debugfs -> rasdes_info ;
410
+ u32 val , lane ;
411
+
412
+ val = kstrtou32_from_user (buf , count , 0 , & lane );
413
+ if (val )
414
+ return val ;
415
+
416
+ mutex_lock (& rinfo -> reg_event_lock );
417
+ set_event_number (pdata , pci , rinfo );
418
+ val = dw_pcie_readl_dbi (pci , rinfo -> ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG );
419
+ val &= ~(EVENT_COUNTER_LANE_SELECT );
420
+ val |= FIELD_PREP (EVENT_COUNTER_LANE_SELECT , lane );
421
+ dw_pcie_writel_dbi (pci , rinfo -> ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG , val );
422
+ mutex_unlock (& rinfo -> reg_event_lock );
423
+
424
+ return count ;
425
+ }
426
+
427
+ static ssize_t counter_value_read (struct file * file , char __user * buf ,
428
+ size_t count , loff_t * ppos )
429
+ {
430
+ struct dwc_pcie_rasdes_priv * pdata = file -> private_data ;
431
+ struct dw_pcie * pci = pdata -> pci ;
432
+ struct dwc_pcie_rasdes_info * rinfo = pci -> debugfs -> rasdes_info ;
433
+ char debugfs_buf [DWC_DEBUGFS_BUF_MAX ];
434
+ ssize_t pos ;
435
+ u32 val ;
436
+
437
+ mutex_lock (& rinfo -> reg_event_lock );
438
+ set_event_number (pdata , pci , rinfo );
439
+ val = dw_pcie_readl_dbi (pci , rinfo -> ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG );
440
+ mutex_unlock (& rinfo -> reg_event_lock );
441
+ pos = scnprintf (debugfs_buf , DWC_DEBUGFS_BUF_MAX , "Counter value: %d\n" , val );
442
+
443
+ return simple_read_from_buffer (buf , count , ppos , debugfs_buf , pos );
444
+ }
445
+
239
446
#define dwc_debugfs_create (name ) \
240
447
debugfs_create_file(#name, 0644, rasdes_debug, pci, \
241
448
&dbg_ ## name ## _fops)
@@ -255,6 +462,23 @@ static const struct file_operations dwc_pcie_err_inj_ops = {
255
462
.write = err_inj_write ,
256
463
};
257
464
465
+ static const struct file_operations dwc_pcie_counter_enable_ops = {
466
+ .open = simple_open ,
467
+ .read = counter_enable_read ,
468
+ .write = counter_enable_write ,
469
+ };
470
+
471
+ static const struct file_operations dwc_pcie_counter_lane_ops = {
472
+ .open = simple_open ,
473
+ .read = counter_lane_read ,
474
+ .write = counter_lane_write ,
475
+ };
476
+
477
+ static const struct file_operations dwc_pcie_counter_value_ops = {
478
+ .open = simple_open ,
479
+ .read = counter_value_read ,
480
+ };
481
+
258
482
static void dwc_pcie_rasdes_debugfs_deinit (struct dw_pcie * pci )
259
483
{
260
484
struct dwc_pcie_rasdes_info * rinfo = pci -> debugfs -> rasdes_info ;
@@ -265,6 +489,7 @@ static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci)
265
489
static int dwc_pcie_rasdes_debugfs_init (struct dw_pcie * pci , struct dentry * dir )
266
490
{
267
491
struct dentry * rasdes_debug , * rasdes_err_inj ;
492
+ struct dentry * rasdes_event_counter , * rasdes_events ;
268
493
struct dwc_pcie_rasdes_info * rasdes_info ;
269
494
struct dwc_pcie_rasdes_priv * priv_tmp ;
270
495
struct device * dev = pci -> dev ;
@@ -288,6 +513,7 @@ static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
288
513
/* Create subdirectories for Debug, Error Injection, Statistics. */
289
514
rasdes_debug = debugfs_create_dir ("rasdes_debug" , dir );
290
515
rasdes_err_inj = debugfs_create_dir ("rasdes_err_inj" , dir );
516
+ rasdes_event_counter = debugfs_create_dir ("rasdes_event_counter" , dir );
291
517
292
518
mutex_init (& rasdes_info -> reg_event_lock );
293
519
rasdes_info -> ras_cap_offset = ras_cap ;
@@ -310,6 +536,28 @@ static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
310
536
debugfs_create_file (err_inj_list [i ].name , 0200 , rasdes_err_inj , priv_tmp ,
311
537
& dwc_pcie_err_inj_ops );
312
538
}
539
+
540
+ /* Create debugfs files for Statistical Counter subdirectory. */
541
+ for (i = 0 ; i < ARRAY_SIZE (event_list ); i ++ ) {
542
+ priv_tmp = devm_kzalloc (dev , sizeof (* priv_tmp ), GFP_KERNEL );
543
+ if (!priv_tmp ) {
544
+ ret = - ENOMEM ;
545
+ goto err_deinit ;
546
+ }
547
+
548
+ priv_tmp -> idx = i ;
549
+ priv_tmp -> pci = pci ;
550
+ rasdes_events = debugfs_create_dir (event_list [i ].name , rasdes_event_counter );
551
+ if (event_list [i ].group_no == 0 || event_list [i ].group_no == 4 ) {
552
+ debugfs_create_file ("lane_select" , 0644 , rasdes_events ,
553
+ priv_tmp , & dwc_pcie_counter_lane_ops );
554
+ }
555
+ debugfs_create_file ("counter_value" , 0444 , rasdes_events , priv_tmp ,
556
+ & dwc_pcie_counter_value_ops );
557
+ debugfs_create_file ("counter_enable" , 0644 , rasdes_events , priv_tmp ,
558
+ & dwc_pcie_counter_enable_ops );
559
+ }
560
+
313
561
return 0 ;
314
562
315
563
err_deinit :
0 commit comments