34
34
#define PCIE_DEVICEID_SHIFT 16
35
35
36
36
/* Application registers */
37
+ #define PID 0x000
38
+ #define RTL GENMASK(15, 11)
39
+ #define RTL_SHIFT 11
40
+ #define AM6_PCI_PG1_RTL_VER 0x15
41
+
37
42
#define CMD_STATUS 0x004
38
43
#define LTSSM_EN_VAL BIT(0)
39
44
#define OB_XLAT_EN_VAL BIT(1)
104
109
105
110
#define to_keystone_pcie (x ) dev_get_drvdata((x)->dev)
106
111
112
+ #define PCI_DEVICE_ID_TI_AM654X 0xb00c
113
+
107
114
struct ks_pcie_of_data {
108
115
enum dw_pcie_device_mode mode ;
109
116
const struct dw_pcie_host_ops * host_ops ;
@@ -245,8 +252,68 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
245
252
.irq_unmask = ks_pcie_msi_unmask ,
246
253
};
247
254
255
+ /**
256
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
257
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
258
+ * PCIe host controller driver information.
259
+ *
260
+ * Since modification of dbi_cs2 involves different clock domain, read the
261
+ * status back to ensure the transition is complete.
262
+ */
263
+ static void ks_pcie_set_dbi_mode (struct keystone_pcie * ks_pcie )
264
+ {
265
+ u32 val ;
266
+
267
+ val = ks_pcie_app_readl (ks_pcie , CMD_STATUS );
268
+ val |= DBI_CS2 ;
269
+ ks_pcie_app_writel (ks_pcie , CMD_STATUS , val );
270
+
271
+ do {
272
+ val = ks_pcie_app_readl (ks_pcie , CMD_STATUS );
273
+ } while (!(val & DBI_CS2 ));
274
+ }
275
+
276
+ /**
277
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
278
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
279
+ * PCIe host controller driver information.
280
+ *
281
+ * Since modification of dbi_cs2 involves different clock domain, read the
282
+ * status back to ensure the transition is complete.
283
+ */
284
+ static void ks_pcie_clear_dbi_mode (struct keystone_pcie * ks_pcie )
285
+ {
286
+ u32 val ;
287
+
288
+ val = ks_pcie_app_readl (ks_pcie , CMD_STATUS );
289
+ val &= ~DBI_CS2 ;
290
+ ks_pcie_app_writel (ks_pcie , CMD_STATUS , val );
291
+
292
+ do {
293
+ val = ks_pcie_app_readl (ks_pcie , CMD_STATUS );
294
+ } while (val & DBI_CS2 );
295
+ }
296
+
248
297
static int ks_pcie_msi_host_init (struct dw_pcie_rp * pp )
249
298
{
299
+ struct dw_pcie * pci = to_dw_pcie_from_pp (pp );
300
+ struct keystone_pcie * ks_pcie = to_keystone_pcie (pci );
301
+
302
+ /* Configure and set up BAR0 */
303
+ ks_pcie_set_dbi_mode (ks_pcie );
304
+
305
+ /* Enable BAR0 */
306
+ dw_pcie_writel_dbi (pci , PCI_BASE_ADDRESS_0 , 1 );
307
+ dw_pcie_writel_dbi (pci , PCI_BASE_ADDRESS_0 , SZ_4K - 1 );
308
+
309
+ ks_pcie_clear_dbi_mode (ks_pcie );
310
+
311
+ /*
312
+ * For BAR0, just setting bus address for inbound writes (MSI) should
313
+ * be sufficient. Use physical address to avoid any conflicts.
314
+ */
315
+ dw_pcie_writel_dbi (pci , PCI_BASE_ADDRESS_0 , ks_pcie -> app .start );
316
+
250
317
pp -> msi_irq_chip = & ks_pcie_msi_irq_chip ;
251
318
return dw_pcie_allocate_domains (pp );
252
319
}
@@ -340,59 +407,22 @@ static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {
340
407
.xlate = irq_domain_xlate_onetwocell ,
341
408
};
342
409
343
- /**
344
- * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
345
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
346
- * PCIe host controller driver information.
347
- *
348
- * Since modification of dbi_cs2 involves different clock domain, read the
349
- * status back to ensure the transition is complete.
350
- */
351
- static void ks_pcie_set_dbi_mode (struct keystone_pcie * ks_pcie )
352
- {
353
- u32 val ;
354
-
355
- val = ks_pcie_app_readl (ks_pcie , CMD_STATUS );
356
- val |= DBI_CS2 ;
357
- ks_pcie_app_writel (ks_pcie , CMD_STATUS , val );
358
-
359
- do {
360
- val = ks_pcie_app_readl (ks_pcie , CMD_STATUS );
361
- } while (!(val & DBI_CS2 ));
362
- }
363
-
364
- /**
365
- * ks_pcie_clear_dbi_mode() - Disable DBI mode
366
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
367
- * PCIe host controller driver information.
368
- *
369
- * Since modification of dbi_cs2 involves different clock domain, read the
370
- * status back to ensure the transition is complete.
371
- */
372
- static void ks_pcie_clear_dbi_mode (struct keystone_pcie * ks_pcie )
373
- {
374
- u32 val ;
375
-
376
- val = ks_pcie_app_readl (ks_pcie , CMD_STATUS );
377
- val &= ~DBI_CS2 ;
378
- ks_pcie_app_writel (ks_pcie , CMD_STATUS , val );
379
-
380
- do {
381
- val = ks_pcie_app_readl (ks_pcie , CMD_STATUS );
382
- } while (val & DBI_CS2 );
383
- }
384
-
385
- static void ks_pcie_setup_rc_app_regs (struct keystone_pcie * ks_pcie )
410
+ static int ks_pcie_setup_rc_app_regs (struct keystone_pcie * ks_pcie )
386
411
{
387
412
u32 val ;
388
413
u32 num_viewport = ks_pcie -> num_viewport ;
389
414
struct dw_pcie * pci = ks_pcie -> pci ;
390
415
struct dw_pcie_rp * pp = & pci -> pp ;
391
- u64 start , end ;
416
+ struct resource_entry * entry ;
392
417
struct resource * mem ;
418
+ u64 start , end ;
393
419
int i ;
394
420
395
- mem = resource_list_first_type (& pp -> bridge -> windows , IORESOURCE_MEM )-> res ;
421
+ entry = resource_list_first_type (& pp -> bridge -> windows , IORESOURCE_MEM );
422
+ if (!entry )
423
+ return - ENODEV ;
424
+
425
+ mem = entry -> res ;
396
426
start = mem -> start ;
397
427
end = mem -> end ;
398
428
@@ -403,7 +433,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
403
433
ks_pcie_clear_dbi_mode (ks_pcie );
404
434
405
435
if (ks_pcie -> is_am6 )
406
- return ;
436
+ return 0 ;
407
437
408
438
val = ilog2 (OB_WIN_SIZE );
409
439
ks_pcie_app_writel (ks_pcie , OB_SIZE , val );
@@ -420,6 +450,8 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
420
450
val = ks_pcie_app_readl (ks_pcie , CMD_STATUS );
421
451
val |= OB_XLAT_EN_VAL ;
422
452
ks_pcie_app_writel (ks_pcie , CMD_STATUS , val );
453
+
454
+ return 0 ;
423
455
}
424
456
425
457
static void __iomem * ks_pcie_other_map_bus (struct pci_bus * bus ,
@@ -445,44 +477,10 @@ static struct pci_ops ks_child_pcie_ops = {
445
477
.write = pci_generic_config_write ,
446
478
};
447
479
448
- /**
449
- * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
450
- * @bus: A pointer to the PCI bus structure.
451
- *
452
- * This sets BAR0 to enable inbound access for MSI_IRQ register
453
- */
454
- static int ks_pcie_v3_65_add_bus (struct pci_bus * bus )
455
- {
456
- struct dw_pcie_rp * pp = bus -> sysdata ;
457
- struct dw_pcie * pci = to_dw_pcie_from_pp (pp );
458
- struct keystone_pcie * ks_pcie = to_keystone_pcie (pci );
459
-
460
- if (!pci_is_root_bus (bus ))
461
- return 0 ;
462
-
463
- /* Configure and set up BAR0 */
464
- ks_pcie_set_dbi_mode (ks_pcie );
465
-
466
- /* Enable BAR0 */
467
- dw_pcie_writel_dbi (pci , PCI_BASE_ADDRESS_0 , 1 );
468
- dw_pcie_writel_dbi (pci , PCI_BASE_ADDRESS_0 , SZ_4K - 1 );
469
-
470
- ks_pcie_clear_dbi_mode (ks_pcie );
471
-
472
- /*
473
- * For BAR0, just setting bus address for inbound writes (MSI) should
474
- * be sufficient. Use physical address to avoid any conflicts.
475
- */
476
- dw_pcie_writel_dbi (pci , PCI_BASE_ADDRESS_0 , ks_pcie -> app .start );
477
-
478
- return 0 ;
479
- }
480
-
481
480
static struct pci_ops ks_pcie_ops = {
482
481
.map_bus = dw_pcie_own_conf_map_bus ,
483
482
.read = pci_generic_config_read ,
484
483
.write = pci_generic_config_write ,
485
- .add_bus = ks_pcie_v3_65_add_bus ,
486
484
};
487
485
488
486
/**
@@ -525,7 +523,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
525
523
static void ks_pcie_quirk (struct pci_dev * dev )
526
524
{
527
525
struct pci_bus * bus = dev -> bus ;
526
+ struct keystone_pcie * ks_pcie ;
527
+ struct device * bridge_dev ;
528
528
struct pci_dev * bridge ;
529
+ u32 val ;
530
+
529
531
static const struct pci_device_id rc_pci_devids [] = {
530
532
{ PCI_DEVICE (PCI_VENDOR_ID_TI , PCIE_RC_K2HK ),
531
533
.class = PCI_CLASS_BRIDGE_PCI_NORMAL , .class_mask = ~0 , },
@@ -537,6 +539,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)
537
539
.class = PCI_CLASS_BRIDGE_PCI_NORMAL , .class_mask = ~0 , },
538
540
{ 0 , },
539
541
};
542
+ static const struct pci_device_id am6_pci_devids [] = {
543
+ { PCI_DEVICE (PCI_VENDOR_ID_TI , PCI_DEVICE_ID_TI_AM654X ),
544
+ .class = PCI_CLASS_BRIDGE_PCI << 8 , .class_mask = ~0 , },
545
+ { 0 , },
546
+ };
540
547
541
548
if (pci_is_root_bus (bus ))
542
549
bridge = dev ;
@@ -558,10 +565,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)
558
565
*/
559
566
if (pci_match_id (rc_pci_devids , bridge )) {
560
567
if (pcie_get_readrq (dev ) > 256 ) {
561
- dev_info (& dev -> dev , "limiting MRRS to 256\n" );
568
+ dev_info (& dev -> dev , "limiting MRRS to 256 bytes \n" );
562
569
pcie_set_readrq (dev , 256 );
563
570
}
564
571
}
572
+
573
+ /*
574
+ * Memory transactions fail with PCI controller in AM654 PG1.0
575
+ * when MRRS is set to more than 128 bytes. Force the MRRS to
576
+ * 128 bytes in all downstream devices.
577
+ */
578
+ if (pci_match_id (am6_pci_devids , bridge )) {
579
+ bridge_dev = pci_get_host_bridge_device (dev );
580
+ if (!bridge_dev && !bridge_dev -> parent )
581
+ return ;
582
+
583
+ ks_pcie = dev_get_drvdata (bridge_dev -> parent );
584
+ if (!ks_pcie )
585
+ return ;
586
+
587
+ val = ks_pcie_app_readl (ks_pcie , PID );
588
+ val &= RTL ;
589
+ val >>= RTL_SHIFT ;
590
+ if (val != AM6_PCI_PG1_RTL_VER )
591
+ return ;
592
+
593
+ if (pcie_get_readrq (dev ) > 128 ) {
594
+ dev_info (& dev -> dev , "limiting MRRS to 128 bytes\n" );
595
+ pcie_set_readrq (dev , 128 );
596
+ }
597
+ }
565
598
}
566
599
DECLARE_PCI_FIXUP_ENABLE (PCI_ANY_ID , PCI_ANY_ID , ks_pcie_quirk );
567
600
@@ -814,7 +847,10 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
814
847
return ret ;
815
848
816
849
ks_pcie_stop_link (pci );
817
- ks_pcie_setup_rc_app_regs (ks_pcie );
850
+ ret = ks_pcie_setup_rc_app_regs (ks_pcie );
851
+ if (ret )
852
+ return ret ;
853
+
818
854
writew (PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8 ),
819
855
pci -> dbi_base + PCI_IO_BASE );
820
856
0 commit comments