|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +#include <linux/pci.h> |
| 3 | +#include "pci.h" |
| 4 | + |
| 5 | +/* |
| 6 | + * PCI iomap devres |
| 7 | + */ |
| 8 | +#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS |
| 9 | + |
| 10 | +struct pcim_iomap_devres { |
| 11 | + void __iomem *table[PCIM_IOMAP_MAX]; |
| 12 | +}; |
| 13 | + |
| 14 | +static void pcim_iomap_release(struct device *gendev, void *res) |
| 15 | +{ |
| 16 | + struct pci_dev *dev = to_pci_dev(gendev); |
| 17 | + struct pcim_iomap_devres *this = res; |
| 18 | + int i; |
| 19 | + |
| 20 | + for (i = 0; i < PCIM_IOMAP_MAX; i++) |
| 21 | + if (this->table[i]) |
| 22 | + pci_iounmap(dev, this->table[i]); |
| 23 | +} |
| 24 | + |
| 25 | +/** |
| 26 | + * pcim_iomap_table - access iomap allocation table |
| 27 | + * @pdev: PCI device to access iomap table for |
| 28 | + * |
| 29 | + * Access iomap allocation table for @dev. If iomap table doesn't |
| 30 | + * exist and @pdev is managed, it will be allocated. All iomaps |
| 31 | + * recorded in the iomap table are automatically unmapped on driver |
| 32 | + * detach. |
| 33 | + * |
| 34 | + * This function might sleep when the table is first allocated but can |
| 35 | + * be safely called without context and guaranteed to succeed once |
| 36 | + * allocated. |
| 37 | + */ |
| 38 | +void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) |
| 39 | +{ |
| 40 | + struct pcim_iomap_devres *dr, *new_dr; |
| 41 | + |
| 42 | + dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); |
| 43 | + if (dr) |
| 44 | + return dr->table; |
| 45 | + |
| 46 | + new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL, |
| 47 | + dev_to_node(&pdev->dev)); |
| 48 | + if (!new_dr) |
| 49 | + return NULL; |
| 50 | + dr = devres_get(&pdev->dev, new_dr, NULL, NULL); |
| 51 | + return dr->table; |
| 52 | +} |
| 53 | +EXPORT_SYMBOL(pcim_iomap_table); |
| 54 | + |
| 55 | +/** |
| 56 | + * pcim_iomap - Managed pcim_iomap() |
| 57 | + * @pdev: PCI device to iomap for |
| 58 | + * @bar: BAR to iomap |
| 59 | + * @maxlen: Maximum length of iomap |
| 60 | + * |
| 61 | + * Managed pci_iomap(). Map is automatically unmapped on driver |
| 62 | + * detach. |
| 63 | + */ |
| 64 | +void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) |
| 65 | +{ |
| 66 | + void __iomem **tbl; |
| 67 | + |
| 68 | + BUG_ON(bar >= PCIM_IOMAP_MAX); |
| 69 | + |
| 70 | + tbl = (void __iomem **)pcim_iomap_table(pdev); |
| 71 | + if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ |
| 72 | + return NULL; |
| 73 | + |
| 74 | + tbl[bar] = pci_iomap(pdev, bar, maxlen); |
| 75 | + return tbl[bar]; |
| 76 | +} |
| 77 | +EXPORT_SYMBOL(pcim_iomap); |
| 78 | + |
| 79 | +/** |
| 80 | + * pcim_iounmap - Managed pci_iounmap() |
| 81 | + * @pdev: PCI device to iounmap for |
| 82 | + * @addr: Address to unmap |
| 83 | + * |
| 84 | + * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). |
| 85 | + */ |
| 86 | +void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) |
| 87 | +{ |
| 88 | + void __iomem **tbl; |
| 89 | + int i; |
| 90 | + |
| 91 | + pci_iounmap(pdev, addr); |
| 92 | + |
| 93 | + tbl = (void __iomem **)pcim_iomap_table(pdev); |
| 94 | + BUG_ON(!tbl); |
| 95 | + |
| 96 | + for (i = 0; i < PCIM_IOMAP_MAX; i++) |
| 97 | + if (tbl[i] == addr) { |
| 98 | + tbl[i] = NULL; |
| 99 | + return; |
| 100 | + } |
| 101 | + WARN_ON(1); |
| 102 | +} |
| 103 | +EXPORT_SYMBOL(pcim_iounmap); |
| 104 | + |
| 105 | +/** |
| 106 | + * pcim_iomap_regions - Request and iomap PCI BARs |
| 107 | + * @pdev: PCI device to map IO resources for |
| 108 | + * @mask: Mask of BARs to request and iomap |
| 109 | + * @name: Name used when requesting regions |
| 110 | + * |
| 111 | + * Request and iomap regions specified by @mask. |
| 112 | + */ |
| 113 | +int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) |
| 114 | +{ |
| 115 | + void __iomem * const *iomap; |
| 116 | + int i, rc; |
| 117 | + |
| 118 | + iomap = pcim_iomap_table(pdev); |
| 119 | + if (!iomap) |
| 120 | + return -ENOMEM; |
| 121 | + |
| 122 | + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
| 123 | + unsigned long len; |
| 124 | + |
| 125 | + if (!(mask & (1 << i))) |
| 126 | + continue; |
| 127 | + |
| 128 | + rc = -EINVAL; |
| 129 | + len = pci_resource_len(pdev, i); |
| 130 | + if (!len) |
| 131 | + goto err_inval; |
| 132 | + |
| 133 | + rc = pci_request_region(pdev, i, name); |
| 134 | + if (rc) |
| 135 | + goto err_inval; |
| 136 | + |
| 137 | + rc = -ENOMEM; |
| 138 | + if (!pcim_iomap(pdev, i, 0)) |
| 139 | + goto err_region; |
| 140 | + } |
| 141 | + |
| 142 | + return 0; |
| 143 | + |
| 144 | + err_region: |
| 145 | + pci_release_region(pdev, i); |
| 146 | + err_inval: |
| 147 | + while (--i >= 0) { |
| 148 | + if (!(mask & (1 << i))) |
| 149 | + continue; |
| 150 | + pcim_iounmap(pdev, iomap[i]); |
| 151 | + pci_release_region(pdev, i); |
| 152 | + } |
| 153 | + |
| 154 | + return rc; |
| 155 | +} |
| 156 | +EXPORT_SYMBOL(pcim_iomap_regions); |
| 157 | + |
| 158 | +/** |
| 159 | + * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones |
| 160 | + * @pdev: PCI device to map IO resources for |
| 161 | + * @mask: Mask of BARs to iomap |
| 162 | + * @name: Name used when requesting regions |
| 163 | + * |
| 164 | + * Request all PCI BARs and iomap regions specified by @mask. |
| 165 | + */ |
| 166 | +int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, |
| 167 | + const char *name) |
| 168 | +{ |
| 169 | + int request_mask = ((1 << 6) - 1) & ~mask; |
| 170 | + int rc; |
| 171 | + |
| 172 | + rc = pci_request_selected_regions(pdev, request_mask, name); |
| 173 | + if (rc) |
| 174 | + return rc; |
| 175 | + |
| 176 | + rc = pcim_iomap_regions(pdev, mask, name); |
| 177 | + if (rc) |
| 178 | + pci_release_selected_regions(pdev, request_mask); |
| 179 | + return rc; |
| 180 | +} |
| 181 | +EXPORT_SYMBOL(pcim_iomap_regions_request_all); |
| 182 | + |
| 183 | +/** |
| 184 | + * pcim_iounmap_regions - Unmap and release PCI BARs |
| 185 | + * @pdev: PCI device to map IO resources for |
| 186 | + * @mask: Mask of BARs to unmap and release |
| 187 | + * |
| 188 | + * Unmap and release regions specified by @mask. |
| 189 | + */ |
| 190 | +void pcim_iounmap_regions(struct pci_dev *pdev, int mask) |
| 191 | +{ |
| 192 | + void __iomem * const *iomap; |
| 193 | + int i; |
| 194 | + |
| 195 | + iomap = pcim_iomap_table(pdev); |
| 196 | + if (!iomap) |
| 197 | + return; |
| 198 | + |
| 199 | + for (i = 0; i < PCIM_IOMAP_MAX; i++) { |
| 200 | + if (!(mask & (1 << i))) |
| 201 | + continue; |
| 202 | + |
| 203 | + pcim_iounmap(pdev, iomap[i]); |
| 204 | + pci_release_region(pdev, i); |
| 205 | + } |
| 206 | +} |
| 207 | +EXPORT_SYMBOL(pcim_iounmap_regions); |
0 commit comments