20
20
#include "internals.h"
21
21
22
22
static inline int msi_sysfs_create_group (struct device * dev );
23
- #define dev_to_msi_list (dev ) (&(dev)->msi.data->list)
24
23
25
24
/**
26
25
* msi_alloc_desc - Allocate an initialized msi_desc
@@ -41,7 +40,6 @@ static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
41
40
if (!desc )
42
41
return NULL ;
43
42
44
- INIT_LIST_HEAD (& desc -> list );
45
43
desc -> dev = dev ;
46
44
desc -> nvec_used = nvec ;
47
45
if (affinity ) {
@@ -60,6 +58,17 @@ static void msi_free_desc(struct msi_desc *desc)
60
58
kfree (desc );
61
59
}
62
60
61
+ static int msi_insert_desc (struct msi_device_data * md , struct msi_desc * desc , unsigned int index )
62
+ {
63
+ int ret ;
64
+
65
+ desc -> msi_index = index ;
66
+ ret = xa_insert (& md -> __store , index , desc , GFP_KERNEL );
67
+ if (ret )
68
+ msi_free_desc (desc );
69
+ return ret ;
70
+ }
71
+
63
72
/**
64
73
* msi_add_msi_desc - Allocate and initialize a MSI descriptor
65
74
* @dev: Pointer to the device for which the descriptor is allocated
@@ -77,12 +86,9 @@ int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
77
86
if (!desc )
78
87
return - ENOMEM ;
79
88
80
- /* Copy the MSI index and type specific data to the new descriptor. */
81
- desc -> msi_index = init_desc -> msi_index ;
89
+ /* Copy type specific data to the new descriptor. */
82
90
desc -> pci = init_desc -> pci ;
83
-
84
- list_add_tail (& desc -> list , & dev -> msi .data -> list );
85
- return 0 ;
91
+ return msi_insert_desc (dev -> msi .data , desc , init_desc -> msi_index );
86
92
}
87
93
88
94
/**
@@ -95,28 +101,41 @@ int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
95
101
*/
96
102
static int msi_add_simple_msi_descs (struct device * dev , unsigned int index , unsigned int ndesc )
97
103
{
98
- struct msi_desc * desc , * tmp ;
99
- LIST_HEAD ( list ) ;
100
- unsigned int i ;
104
+ unsigned int idx , last = index + ndesc - 1 ;
105
+ struct msi_desc * desc ;
106
+ int ret ;
101
107
102
108
lockdep_assert_held (& dev -> msi .data -> mutex );
103
109
104
- for (i = 0 ; i < ndesc ; i ++ ) {
110
+ for (idx = index ; idx <= last ; idx ++ ) {
105
111
desc = msi_alloc_desc (dev , 1 , NULL );
106
112
if (!desc )
113
+ goto fail_mem ;
114
+ ret = msi_insert_desc (dev -> msi .data , desc , idx );
115
+ if (ret )
107
116
goto fail ;
108
- desc -> msi_index = index + i ;
109
- list_add_tail (& desc -> list , & list );
110
117
}
111
- list_splice_tail (& list , & dev -> msi .data -> list );
112
118
return 0 ;
113
119
120
+ fail_mem :
121
+ ret = - ENOMEM ;
114
122
fail :
115
- list_for_each_entry_safe (desc , tmp , & list , list ) {
116
- list_del (& desc -> list );
117
- msi_free_desc (desc );
123
+ msi_free_msi_descs_range (dev , MSI_DESC_NOTASSOCIATED , index , last );
124
+ return ret ;
125
+ }
126
+
127
+ static bool msi_desc_match (struct msi_desc * desc , enum msi_desc_filter filter )
128
+ {
129
+ switch (filter ) {
130
+ case MSI_DESC_ALL :
131
+ return true;
132
+ case MSI_DESC_NOTASSOCIATED :
133
+ return !desc -> irq ;
134
+ case MSI_DESC_ASSOCIATED :
135
+ return !!desc -> irq ;
118
136
}
119
- return - ENOMEM ;
137
+ WARN_ON_ONCE (1 );
138
+ return false;
120
139
}
121
140
122
141
/**
@@ -129,19 +148,17 @@ static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsi
129
148
void msi_free_msi_descs_range (struct device * dev , enum msi_desc_filter filter ,
130
149
unsigned int first_index , unsigned int last_index )
131
150
{
151
+ struct xarray * xa = & dev -> msi .data -> __store ;
132
152
struct msi_desc * desc ;
153
+ unsigned long idx ;
133
154
134
155
lockdep_assert_held (& dev -> msi .data -> mutex );
135
156
136
- msi_for_each_desc (desc , dev , filter ) {
137
- /*
138
- * Stupid for now to handle MSI device domain until the
139
- * storage is switched over to an xarray.
140
- */
141
- if (desc -> msi_index < first_index || desc -> msi_index > last_index )
142
- continue ;
143
- list_del (& desc -> list );
144
- msi_free_desc (desc );
157
+ xa_for_each_range (xa , idx , desc , first_index , last_index ) {
158
+ if (msi_desc_match (desc , filter )) {
159
+ xa_erase (xa , idx );
160
+ msi_free_desc (desc );
161
+ }
145
162
}
146
163
}
147
164
@@ -162,7 +179,8 @@ static void msi_device_data_release(struct device *dev, void *res)
162
179
{
163
180
struct msi_device_data * md = res ;
164
181
165
- WARN_ON_ONCE (!list_empty (& md -> list ));
182
+ WARN_ON_ONCE (!xa_empty (& md -> __store ));
183
+ xa_destroy (& md -> __store );
166
184
dev -> msi .data = NULL ;
167
185
}
168
186
@@ -194,7 +212,7 @@ int msi_setup_device_data(struct device *dev)
194
212
return ret ;
195
213
}
196
214
197
- INIT_LIST_HEAD (& md -> list );
215
+ xa_init (& md -> __store );
198
216
mutex_init (& md -> mutex );
199
217
dev -> msi .data = md ;
200
218
devres_add (dev , md );
@@ -217,34 +235,21 @@ EXPORT_SYMBOL_GPL(msi_lock_descs);
217
235
*/
218
236
void msi_unlock_descs (struct device * dev )
219
237
{
220
- /* Clear the next pointer which was cached by the iterator */
221
- dev -> msi .data -> __next = NULL ;
238
+ /* Invalidate the index wich was cached by the iterator */
239
+ dev -> msi .data -> __iter_idx = MSI_MAX_INDEX ;
222
240
mutex_unlock (& dev -> msi .data -> mutex );
223
241
}
224
242
EXPORT_SYMBOL_GPL (msi_unlock_descs );
225
243
226
- static bool msi_desc_match (struct msi_desc * desc , enum msi_desc_filter filter )
227
- {
228
- switch (filter ) {
229
- case MSI_DESC_ALL :
230
- return true;
231
- case MSI_DESC_NOTASSOCIATED :
232
- return !desc -> irq ;
233
- case MSI_DESC_ASSOCIATED :
234
- return !!desc -> irq ;
235
- }
236
- WARN_ON_ONCE (1 );
237
- return false;
238
- }
239
-
240
- static struct msi_desc * msi_find_first_desc (struct device * dev , enum msi_desc_filter filter )
244
+ static struct msi_desc * msi_find_desc (struct msi_device_data * md , enum msi_desc_filter filter )
241
245
{
242
246
struct msi_desc * desc ;
243
247
244
- list_for_each_entry ( desc , dev_to_msi_list ( dev ), list ) {
248
+ xa_for_each_start ( & md -> __store , md -> __iter_idx , desc , md -> __iter_idx ) {
245
249
if (msi_desc_match (desc , filter ))
246
250
return desc ;
247
251
}
252
+ md -> __iter_idx = MSI_MAX_INDEX ;
248
253
return NULL ;
249
254
}
250
255
@@ -261,37 +266,24 @@ static struct msi_desc *msi_find_first_desc(struct device *dev, enum msi_desc_fi
261
266
*/
262
267
struct msi_desc * msi_first_desc (struct device * dev , enum msi_desc_filter filter )
263
268
{
264
- struct msi_desc * desc ;
269
+ struct msi_device_data * md = dev -> msi . data ;
265
270
266
- if (WARN_ON_ONCE (!dev -> msi . data ))
271
+ if (WARN_ON_ONCE (!md ))
267
272
return NULL ;
268
273
269
- lockdep_assert_held (& dev -> msi . data -> mutex );
274
+ lockdep_assert_held (& md -> mutex );
270
275
271
- desc = msi_find_first_desc (dev , filter );
272
- dev -> msi .data -> __next = desc ? list_next_entry (desc , list ) : NULL ;
273
- return desc ;
276
+ md -> __iter_idx = 0 ;
277
+ return msi_find_desc (md , filter );
274
278
}
275
279
EXPORT_SYMBOL_GPL (msi_first_desc );
276
280
277
- static struct msi_desc * __msi_next_desc (struct device * dev , enum msi_desc_filter filter ,
278
- struct msi_desc * from )
279
- {
280
- struct msi_desc * desc = from ;
281
-
282
- list_for_each_entry_from (desc , dev_to_msi_list (dev ), list ) {
283
- if (msi_desc_match (desc , filter ))
284
- return desc ;
285
- }
286
- return NULL ;
287
- }
288
-
289
281
/**
290
282
* msi_next_desc - Get the next MSI descriptor of a device
291
283
* @dev: Device to operate on
292
284
*
293
285
* The first invocation of msi_next_desc() has to be preceeded by a
294
- * successful incovation of __msi_first_desc(). Consecutive invocations are
286
+ * successful invocation of __msi_first_desc(). Consecutive invocations are
295
287
* only valid if the previous one was successful. All these operations have
296
288
* to be done within the same MSI mutex held region.
297
289
*
@@ -300,20 +292,18 @@ static struct msi_desc *__msi_next_desc(struct device *dev, enum msi_desc_filter
300
292
*/
301
293
struct msi_desc * msi_next_desc (struct device * dev , enum msi_desc_filter filter )
302
294
{
303
- struct msi_device_data * data = dev -> msi .data ;
304
- struct msi_desc * desc ;
295
+ struct msi_device_data * md = dev -> msi .data ;
305
296
306
- if (WARN_ON_ONCE (!data ))
297
+ if (WARN_ON_ONCE (!md ))
307
298
return NULL ;
308
299
309
- lockdep_assert_held (& data -> mutex );
300
+ lockdep_assert_held (& md -> mutex );
310
301
311
- if (! data -> __next )
302
+ if (md -> __iter_idx >= ( unsigned long ) MSI_MAX_INDEX )
312
303
return NULL ;
313
304
314
- desc = __msi_next_desc (dev , filter , data -> __next );
315
- dev -> msi .data -> __next = desc ? list_next_entry (desc , list ) : NULL ;
316
- return desc ;
305
+ md -> __iter_idx ++ ;
306
+ return msi_find_desc (md , filter );
317
307
}
318
308
EXPORT_SYMBOL_GPL (msi_next_desc );
319
309
@@ -336,21 +326,18 @@ unsigned int msi_get_virq(struct device *dev, unsigned int index)
336
326
pcimsi = dev_is_pci (dev ) ? to_pci_dev (dev )-> msi_enabled : false;
337
327
338
328
msi_lock_descs (dev );
339
- msi_for_each_desc (desc , dev , MSI_DESC_ASSOCIATED ) {
340
- /* PCI-MSI has only one descriptor for multiple interrupts. */
341
- if (pcimsi ) {
342
- if (index < desc -> nvec_used )
343
- ret = desc -> irq + index ;
344
- break ;
345
- }
346
-
329
+ desc = xa_load (& dev -> msi .data -> __store , pcimsi ? 0 : index );
330
+ if (desc && desc -> irq ) {
347
331
/*
332
+ * PCI-MSI has only one descriptor for multiple interrupts.
348
333
* PCI-MSIX and platform MSI use a descriptor per
349
334
* interrupt.
350
335
*/
351
- if (desc -> msi_index == index ) {
336
+ if (pcimsi ) {
337
+ if (index < desc -> nvec_used )
338
+ ret = desc -> irq + index ;
339
+ } else {
352
340
ret = desc -> irq ;
353
- break ;
354
341
}
355
342
}
356
343
msi_unlock_descs (dev );
@@ -731,16 +718,13 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
731
718
int ret , virq ;
732
719
733
720
msi_lock_descs (dev );
734
- for (virq = virq_base ; virq < virq_base + nvec ; virq ++ ) {
735
- desc = msi_alloc_desc (dev , 1 , NULL );
736
- if (!desc ) {
737
- ret = - ENOMEM ;
738
- goto fail ;
739
- }
721
+ ret = msi_add_simple_msi_descs (dev , virq_base , nvec );
722
+ if (ret )
723
+ goto unlock ;
740
724
741
- desc -> msi_index = virq ;
725
+ for (virq = virq_base ; virq < virq_base + nvec ; virq ++ ) {
726
+ desc = xa_load (& dev -> msi .data -> __store , virq );
742
727
desc -> irq = virq ;
743
- list_add_tail (& desc -> list , & dev -> msi .data -> list );
744
728
745
729
ops -> set_desc (arg , desc );
746
730
ret = irq_domain_alloc_irqs_hierarchy (domain , virq , 1 , arg );
@@ -756,6 +740,7 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
756
740
for (-- virq ; virq >= virq_base ; virq -- )
757
741
irq_domain_free_irqs_common (domain , virq , 1 );
758
742
msi_free_msi_descs_range (dev , MSI_DESC_ALL , virq_base , virq_base + nvec - 1 );
743
+ unlock :
759
744
msi_unlock_descs (dev );
760
745
return ret ;
761
746
}
0 commit comments