4
4
// Author: Cyrille Pitchen <[email protected] >
5
5
6
6
#include <linux/kernel.h>
7
+ #include <linux/list_sort.h>
7
8
#include <linux/of_address.h>
8
9
#include <linux/of_pci.h>
9
10
#include <linux/platform_device.h>
10
11
#include <linux/pm_runtime.h>
11
12
12
13
#include "pcie-cadence.h"
13
14
15
+ static u64 bar_max_size [] = {
16
+ [RP_BAR0 ] = _ULL (128 * SZ_2G ),
17
+ [RP_BAR1 ] = SZ_2G ,
18
+ [RP_NO_BAR ] = _BITULL (63 ),
19
+ };
20
+
21
+ static u8 bar_aperture_mask [] = {
22
+ [RP_BAR0 ] = 0x1F ,
23
+ [RP_BAR1 ] = 0xF ,
24
+ };
25
+
14
26
static void __iomem * cdns_pci_map_bus (struct pci_bus * bus , unsigned int devfn ,
15
27
int where )
16
28
{
@@ -101,6 +113,218 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
101
113
return 0 ;
102
114
}
103
115
116
+ static int cdns_pcie_host_bar_ib_config (struct cdns_pcie_rc * rc ,
117
+ enum cdns_pcie_rp_bar bar ,
118
+ u64 cpu_addr , u64 size ,
119
+ unsigned long flags )
120
+ {
121
+ struct cdns_pcie * pcie = & rc -> pcie ;
122
+ u32 addr0 , addr1 , aperture , value ;
123
+
124
+ if (!rc -> avail_ib_bar [bar ])
125
+ return - EBUSY ;
126
+
127
+ rc -> avail_ib_bar [bar ] = false;
128
+
129
+ aperture = ilog2 (size );
130
+ addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS (aperture ) |
131
+ (lower_32_bits (cpu_addr ) & GENMASK (31 , 8 ));
132
+ addr1 = upper_32_bits (cpu_addr );
133
+ cdns_pcie_writel (pcie , CDNS_PCIE_AT_IB_RP_BAR_ADDR0 (bar ), addr0 );
134
+ cdns_pcie_writel (pcie , CDNS_PCIE_AT_IB_RP_BAR_ADDR1 (bar ), addr1 );
135
+
136
+ if (bar == RP_NO_BAR )
137
+ return 0 ;
138
+
139
+ value = cdns_pcie_readl (pcie , CDNS_PCIE_LM_RC_BAR_CFG );
140
+ value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS (bar ) |
141
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS (bar ) |
142
+ LM_RC_BAR_CFG_CTRL_MEM_32BITS (bar ) |
143
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS (bar ) |
144
+ LM_RC_BAR_CFG_APERTURE (bar , bar_aperture_mask [bar ] + 2 ));
145
+ if (size + cpu_addr >= SZ_4G ) {
146
+ if (!(flags & IORESOURCE_PREFETCH ))
147
+ value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS (bar );
148
+ value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS (bar );
149
+ } else {
150
+ if (!(flags & IORESOURCE_PREFETCH ))
151
+ value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS (bar );
152
+ value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS (bar );
153
+ }
154
+
155
+ value |= LM_RC_BAR_CFG_APERTURE (bar , aperture );
156
+ cdns_pcie_writel (pcie , CDNS_PCIE_LM_RC_BAR_CFG , value );
157
+
158
+ return 0 ;
159
+ }
160
+
161
+ static enum cdns_pcie_rp_bar
162
+ cdns_pcie_host_find_min_bar (struct cdns_pcie_rc * rc , u64 size )
163
+ {
164
+ enum cdns_pcie_rp_bar bar , sel_bar ;
165
+
166
+ sel_bar = RP_BAR_UNDEFINED ;
167
+ for (bar = RP_BAR0 ; bar <= RP_NO_BAR ; bar ++ ) {
168
+ if (!rc -> avail_ib_bar [bar ])
169
+ continue ;
170
+
171
+ if (size <= bar_max_size [bar ]) {
172
+ if (sel_bar == RP_BAR_UNDEFINED ) {
173
+ sel_bar = bar ;
174
+ continue ;
175
+ }
176
+
177
+ if (bar_max_size [bar ] < bar_max_size [sel_bar ])
178
+ sel_bar = bar ;
179
+ }
180
+ }
181
+
182
+ return sel_bar ;
183
+ }
184
+
185
+ static enum cdns_pcie_rp_bar
186
+ cdns_pcie_host_find_max_bar (struct cdns_pcie_rc * rc , u64 size )
187
+ {
188
+ enum cdns_pcie_rp_bar bar , sel_bar ;
189
+
190
+ sel_bar = RP_BAR_UNDEFINED ;
191
+ for (bar = RP_BAR0 ; bar <= RP_NO_BAR ; bar ++ ) {
192
+ if (!rc -> avail_ib_bar [bar ])
193
+ continue ;
194
+
195
+ if (size >= bar_max_size [bar ]) {
196
+ if (sel_bar == RP_BAR_UNDEFINED ) {
197
+ sel_bar = bar ;
198
+ continue ;
199
+ }
200
+
201
+ if (bar_max_size [bar ] > bar_max_size [sel_bar ])
202
+ sel_bar = bar ;
203
+ }
204
+ }
205
+
206
+ return sel_bar ;
207
+ }
208
+
209
+ static int cdns_pcie_host_bar_config (struct cdns_pcie_rc * rc ,
210
+ struct resource_entry * entry )
211
+ {
212
+ u64 cpu_addr , pci_addr , size , winsize ;
213
+ struct cdns_pcie * pcie = & rc -> pcie ;
214
+ struct device * dev = pcie -> dev ;
215
+ enum cdns_pcie_rp_bar bar ;
216
+ unsigned long flags ;
217
+ int ret ;
218
+
219
+ cpu_addr = entry -> res -> start ;
220
+ pci_addr = entry -> res -> start - entry -> offset ;
221
+ flags = entry -> res -> flags ;
222
+ size = resource_size (entry -> res );
223
+
224
+ if (entry -> offset ) {
225
+ dev_err (dev , "PCI addr: %llx must be equal to CPU addr: %llx\n" ,
226
+ pci_addr , cpu_addr );
227
+ return - EINVAL ;
228
+ }
229
+
230
+ while (size > 0 ) {
231
+ /*
232
+ * Try to find a minimum BAR whose size is greater than
233
+ * or equal to the remaining resource_entry size. This will
234
+ * fail if the size of each of the available BARs is less than
235
+ * the remaining resource_entry size.
236
+ * If a minimum BAR is found, IB ATU will be configured and
237
+ * exited.
238
+ */
239
+ bar = cdns_pcie_host_find_min_bar (rc , size );
240
+ if (bar != RP_BAR_UNDEFINED ) {
241
+ ret = cdns_pcie_host_bar_ib_config (rc , bar , cpu_addr ,
242
+ size , flags );
243
+ if (ret )
244
+ dev_err (dev , "IB BAR: %d config failed\n" , bar );
245
+ return ret ;
246
+ }
247
+
248
+ /*
249
+ * If the control reaches here, it would mean the remaining
250
+ * resource_entry size cannot be fitted in a single BAR. So we
251
+ * find a maximum BAR whose size is less than or equal to the
252
+ * remaining resource_entry size and split the resource entry
253
+ * so that part of resource entry is fitted inside the maximum
254
+ * BAR. The remaining size would be fitted during the next
255
+ * iteration of the loop.
256
+ * If a maximum BAR is not found, there is no way we can fit
257
+ * this resource_entry, so we error out.
258
+ */
259
+ bar = cdns_pcie_host_find_max_bar (rc , size );
260
+ if (bar == RP_BAR_UNDEFINED ) {
261
+ dev_err (dev , "No free BAR to map cpu_addr %llx\n" ,
262
+ cpu_addr );
263
+ return - EINVAL ;
264
+ }
265
+
266
+ winsize = bar_max_size [bar ];
267
+ ret = cdns_pcie_host_bar_ib_config (rc , bar , cpu_addr , winsize ,
268
+ flags );
269
+ if (ret ) {
270
+ dev_err (dev , "IB BAR: %d config failed\n" , bar );
271
+ return ret ;
272
+ }
273
+
274
+ size -= winsize ;
275
+ cpu_addr += winsize ;
276
+ }
277
+
278
+ return 0 ;
279
+ }
280
+
281
+ static int cdns_pcie_host_dma_ranges_cmp (void * priv , struct list_head * a , struct list_head * b )
282
+ {
283
+ struct resource_entry * entry1 , * entry2 ;
284
+
285
+ entry1 = container_of (a , struct resource_entry , node );
286
+ entry2 = container_of (b , struct resource_entry , node );
287
+
288
+ return resource_size (entry2 -> res ) - resource_size (entry1 -> res );
289
+ }
290
+
291
+ static int cdns_pcie_host_map_dma_ranges (struct cdns_pcie_rc * rc )
292
+ {
293
+ struct cdns_pcie * pcie = & rc -> pcie ;
294
+ struct device * dev = pcie -> dev ;
295
+ struct device_node * np = dev -> of_node ;
296
+ struct pci_host_bridge * bridge ;
297
+ struct resource_entry * entry ;
298
+ u32 no_bar_nbits = 32 ;
299
+ int err ;
300
+
301
+ bridge = pci_host_bridge_from_priv (rc );
302
+ if (!bridge )
303
+ return - ENOMEM ;
304
+
305
+ if (list_empty (& bridge -> dma_ranges )) {
306
+ of_property_read_u32 (np , "cdns,no-bar-match-nbits" ,
307
+ & no_bar_nbits );
308
+ err = cdns_pcie_host_bar_ib_config (rc , RP_NO_BAR , 0x0 ,
309
+ (u64 )1 << no_bar_nbits , 0 );
310
+ if (err )
311
+ dev_err (dev , "IB BAR: %d config failed\n" , RP_NO_BAR );
312
+ return err ;
313
+ }
314
+
315
+ list_sort (NULL , & bridge -> dma_ranges , cdns_pcie_host_dma_ranges_cmp );
316
+
317
+ resource_list_for_each_entry (entry , & bridge -> dma_ranges ) {
318
+ err = cdns_pcie_host_bar_config (rc , entry );
319
+ if (err ) {
320
+ dev_err (dev , "Fail to configure IB using dma-ranges\n" );
321
+ return err ;
322
+ }
323
+ }
324
+
325
+ return 0 ;
326
+ }
327
+
104
328
static int cdns_pcie_host_init_address_translation (struct cdns_pcie_rc * rc )
105
329
{
106
330
struct cdns_pcie * pcie = & rc -> pcie ;
@@ -154,16 +378,9 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
154
378
r ++ ;
155
379
}
156
380
157
- /*
158
- * Set Root Port no BAR match Inbound Translation registers:
159
- * needed for MSI and DMA.
160
- * Root Port BAR0 and BAR1 are disabled, hence no need to set their
161
- * inbound translation registers.
162
- */
163
- addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS (rc -> no_bar_nbits );
164
- addr1 = 0 ;
165
- cdns_pcie_writel (pcie , CDNS_PCIE_AT_IB_RP_BAR_ADDR0 (RP_NO_BAR ), addr0 );
166
- cdns_pcie_writel (pcie , CDNS_PCIE_AT_IB_RP_BAR_ADDR1 (RP_NO_BAR ), addr1 );
381
+ err = cdns_pcie_host_map_dma_ranges (rc );
382
+ if (err )
383
+ return err ;
167
384
168
385
return 0 ;
169
386
}
@@ -173,10 +390,16 @@ static int cdns_pcie_host_init(struct device *dev,
173
390
struct cdns_pcie_rc * rc )
174
391
{
175
392
struct resource * bus_range = NULL ;
393
+ struct pci_host_bridge * bridge ;
176
394
int err ;
177
395
396
+ bridge = pci_host_bridge_from_priv (rc );
397
+ if (!bridge )
398
+ return - ENOMEM ;
399
+
178
400
/* Parse our PCI ranges and request their resources */
179
- err = pci_parse_request_of_pci_ranges (dev , resources , NULL , & bus_range );
401
+ err = pci_parse_request_of_pci_ranges (dev , resources ,
402
+ & bridge -> dma_ranges , & bus_range );
180
403
if (err )
181
404
return err ;
182
405
@@ -205,6 +428,7 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
205
428
struct device_node * np = dev -> of_node ;
206
429
struct pci_host_bridge * bridge ;
207
430
struct list_head resources ;
431
+ enum cdns_pcie_rp_bar bar ;
208
432
struct cdns_pcie * pcie ;
209
433
struct resource * res ;
210
434
int ret ;
@@ -216,9 +440,6 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
216
440
pcie = & rc -> pcie ;
217
441
pcie -> is_rc = true;
218
442
219
- rc -> no_bar_nbits = 32 ;
220
- of_property_read_u32 (np , "cdns,no-bar-match-nbits" , & rc -> no_bar_nbits );
221
-
222
443
rc -> vendor_id = 0xffff ;
223
444
of_property_read_u32 (np , "vendor-id" , & rc -> vendor_id );
224
445
@@ -248,6 +469,9 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
248
469
249
470
pcie -> mem_res = res ;
250
471
472
+ for (bar = RP_BAR0 ; bar <= RP_NO_BAR ; bar ++ )
473
+ rc -> avail_ib_bar [bar ] = true;
474
+
251
475
ret = cdns_pcie_host_init (dev , & resources , rc );
252
476
if (ret )
253
477
goto err_init ;
0 commit comments