@@ -75,6 +75,9 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
75
75
unsigned int vid ,
76
76
enum macaccess_entry_type type )
77
77
{
78
+ int ret ;
79
+
80
+ spin_lock (& lan966x -> mac_lock );
78
81
lan966x_mac_select (lan966x , mac , vid );
79
82
80
83
/* Issue a write command */
@@ -86,7 +89,10 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
86
89
ANA_MACACCESS_MAC_TABLE_CMD_SET (MACACCESS_CMD_LEARN ),
87
90
lan966x , ANA_MACACCESS );
88
91
89
- return lan966x_mac_wait_for_completion (lan966x );
92
+ ret = lan966x_mac_wait_for_completion (lan966x );
93
+ spin_unlock (& lan966x -> mac_lock );
94
+
95
+ return ret ;
90
96
}
91
97
92
98
/* The mask of the front ports is encoded inside the mac parameter via a call
@@ -113,11 +119,13 @@ int lan966x_mac_learn(struct lan966x *lan966x, int port,
113
119
return __lan966x_mac_learn (lan966x , port , false, mac , vid , type );
114
120
}
115
121
116
- int lan966x_mac_forget (struct lan966x * lan966x ,
117
- const unsigned char mac [ETH_ALEN ],
118
- unsigned int vid ,
119
- enum macaccess_entry_type type )
122
+ static int lan966x_mac_forget_locked (struct lan966x * lan966x ,
123
+ const unsigned char mac [ETH_ALEN ],
124
+ unsigned int vid ,
125
+ enum macaccess_entry_type type )
120
126
{
127
+ lockdep_assert_held (& lan966x -> mac_lock );
128
+
121
129
lan966x_mac_select (lan966x , mac , vid );
122
130
123
131
/* Issue a forget command */
@@ -128,6 +136,20 @@ int lan966x_mac_forget(struct lan966x *lan966x,
128
136
return lan966x_mac_wait_for_completion (lan966x );
129
137
}
130
138
139
+ int lan966x_mac_forget (struct lan966x * lan966x ,
140
+ const unsigned char mac [ETH_ALEN ],
141
+ unsigned int vid ,
142
+ enum macaccess_entry_type type )
143
+ {
144
+ int ret ;
145
+
146
+ spin_lock (& lan966x -> mac_lock );
147
+ ret = lan966x_mac_forget_locked (lan966x , mac , vid , type );
148
+ spin_unlock (& lan966x -> mac_lock );
149
+
150
+ return ret ;
151
+ }
152
+
131
153
int lan966x_mac_cpu_learn (struct lan966x * lan966x , const char * addr , u16 vid )
132
154
{
133
155
return lan966x_mac_learn (lan966x , PGID_CPU , addr , vid , ENTRYTYPE_LOCKED );
@@ -161,7 +183,7 @@ static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *ma
161
183
{
162
184
struct lan966x_mac_entry * mac_entry ;
163
185
164
- mac_entry = kzalloc (sizeof (* mac_entry ), GFP_KERNEL );
186
+ mac_entry = kzalloc (sizeof (* mac_entry ), GFP_ATOMIC );
165
187
if (!mac_entry )
166
188
return NULL ;
167
189
@@ -179,7 +201,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
179
201
struct lan966x_mac_entry * res = NULL ;
180
202
struct lan966x_mac_entry * mac_entry ;
181
203
182
- spin_lock (& lan966x -> mac_lock );
183
204
list_for_each_entry (mac_entry , & lan966x -> mac_entries , list ) {
184
205
if (mac_entry -> vid == vid &&
185
206
ether_addr_equal (mac , mac_entry -> mac ) &&
@@ -188,7 +209,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
188
209
break ;
189
210
}
190
211
}
191
- spin_unlock (& lan966x -> mac_lock );
192
212
193
213
return res ;
194
214
}
@@ -231,8 +251,11 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
231
251
{
232
252
struct lan966x_mac_entry * mac_entry ;
233
253
234
- if (lan966x_mac_lookup (lan966x , addr , vid , ENTRYTYPE_NORMAL ))
254
+ spin_lock (& lan966x -> mac_lock );
255
+ if (lan966x_mac_lookup (lan966x , addr , vid , ENTRYTYPE_NORMAL )) {
256
+ spin_unlock (& lan966x -> mac_lock );
235
257
return 0 ;
258
+ }
236
259
237
260
/* In case the entry already exists, don't add it again to SW,
238
261
* just update HW, but we need to look in the actual HW because
@@ -241,21 +264,25 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
241
264
* add the entry but without the extern_learn flag.
242
265
*/
243
266
mac_entry = lan966x_mac_find_entry (lan966x , addr , vid , port -> chip_port );
244
- if (mac_entry )
245
- return lan966x_mac_learn (lan966x , port -> chip_port ,
246
- addr , vid , ENTRYTYPE_LOCKED );
267
+ if (mac_entry ) {
268
+ spin_unlock (& lan966x -> mac_lock );
269
+ goto mac_learn ;
270
+ }
247
271
248
272
mac_entry = lan966x_mac_alloc_entry (addr , vid , port -> chip_port );
249
- if (!mac_entry )
273
+ if (!mac_entry ) {
274
+ spin_unlock (& lan966x -> mac_lock );
250
275
return - ENOMEM ;
276
+ }
251
277
252
- spin_lock (& lan966x -> mac_lock );
253
278
list_add_tail (& mac_entry -> list , & lan966x -> mac_entries );
254
279
spin_unlock (& lan966x -> mac_lock );
255
280
256
- lan966x_mac_learn (lan966x , port -> chip_port , addr , vid , ENTRYTYPE_LOCKED );
257
281
lan966x_fdb_call_notifiers (SWITCHDEV_FDB_OFFLOADED , addr , vid , port -> dev );
258
282
283
+ mac_learn :
284
+ lan966x_mac_learn (lan966x , port -> chip_port , addr , vid , ENTRYTYPE_LOCKED );
285
+
259
286
return 0 ;
260
287
}
261
288
@@ -269,8 +296,9 @@ int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
269
296
list ) {
270
297
if (mac_entry -> vid == vid &&
271
298
ether_addr_equal (addr , mac_entry -> mac )) {
272
- lan966x_mac_forget (lan966x , mac_entry -> mac , mac_entry -> vid ,
273
- ENTRYTYPE_LOCKED );
299
+ lan966x_mac_forget_locked (lan966x , mac_entry -> mac ,
300
+ mac_entry -> vid ,
301
+ ENTRYTYPE_LOCKED );
274
302
275
303
list_del (& mac_entry -> list );
276
304
kfree (mac_entry );
@@ -288,8 +316,8 @@ void lan966x_mac_purge_entries(struct lan966x *lan966x)
288
316
spin_lock (& lan966x -> mac_lock );
289
317
list_for_each_entry_safe (mac_entry , tmp , & lan966x -> mac_entries ,
290
318
list ) {
291
- lan966x_mac_forget (lan966x , mac_entry -> mac , mac_entry -> vid ,
292
- ENTRYTYPE_LOCKED );
319
+ lan966x_mac_forget_locked (lan966x , mac_entry -> mac ,
320
+ mac_entry -> vid , ENTRYTYPE_LOCKED );
293
321
294
322
list_del (& mac_entry -> list );
295
323
kfree (mac_entry );
@@ -325,10 +353,13 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
325
353
{
326
354
struct lan966x_mac_entry * mac_entry , * tmp ;
327
355
unsigned char mac [ETH_ALEN ] __aligned (2 );
356
+ struct list_head mac_deleted_entries ;
328
357
u32 dest_idx ;
329
358
u32 column ;
330
359
u16 vid ;
331
360
361
+ INIT_LIST_HEAD (& mac_deleted_entries );
362
+
332
363
spin_lock (& lan966x -> mac_lock );
333
364
list_for_each_entry_safe (mac_entry , tmp , & lan966x -> mac_entries , list ) {
334
365
bool found = false;
@@ -362,20 +393,26 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
362
393
}
363
394
364
395
if (!found ) {
365
- /* Notify the bridge that the entry doesn't exist
366
- * anymore in the HW and remove the entry from the SW
367
- * list
368
- */
369
- lan966x_mac_notifiers (SWITCHDEV_FDB_DEL_TO_BRIDGE ,
370
- mac_entry -> mac , mac_entry -> vid ,
371
- lan966x -> ports [mac_entry -> port_index ]-> dev );
372
-
373
396
list_del (& mac_entry -> list );
374
- kfree (mac_entry );
397
+ /* Move the entry from SW list to a tmp list such that
398
+ * it would be deleted later
399
+ */
400
+ list_add_tail (& mac_entry -> list , & mac_deleted_entries );
375
401
}
376
402
}
377
403
spin_unlock (& lan966x -> mac_lock );
378
404
405
+ list_for_each_entry_safe (mac_entry , tmp , & mac_deleted_entries , list ) {
406
+ /* Notify the bridge that the entry doesn't exist
407
+ * anymore in the HW
408
+ */
409
+ lan966x_mac_notifiers (SWITCHDEV_FDB_DEL_TO_BRIDGE ,
410
+ mac_entry -> mac , mac_entry -> vid ,
411
+ lan966x -> ports [mac_entry -> port_index ]-> dev );
412
+ list_del (& mac_entry -> list );
413
+ kfree (mac_entry );
414
+ }
415
+
379
416
/* Now go to the list of columns and see if any entry was not in the SW
380
417
* list, then that means that the entry is new so it needs to notify the
381
418
* bridge.
@@ -396,13 +433,20 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
396
433
if (WARN_ON (dest_idx >= lan966x -> num_phys_ports ))
397
434
continue ;
398
435
436
+ spin_lock (& lan966x -> mac_lock );
437
+ mac_entry = lan966x_mac_find_entry (lan966x , mac , vid , dest_idx );
438
+ if (mac_entry ) {
439
+ spin_unlock (& lan966x -> mac_lock );
440
+ continue ;
441
+ }
442
+
399
443
mac_entry = lan966x_mac_alloc_entry (mac , vid , dest_idx );
400
- if (!mac_entry )
444
+ if (!mac_entry ) {
445
+ spin_unlock (& lan966x -> mac_lock );
401
446
return ;
447
+ }
402
448
403
449
mac_entry -> row = row ;
404
-
405
- spin_lock (& lan966x -> mac_lock );
406
450
list_add_tail (& mac_entry -> list , & lan966x -> mac_entries );
407
451
spin_unlock (& lan966x -> mac_lock );
408
452
@@ -424,6 +468,7 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
424
468
lan966x , ANA_MACTINDX );
425
469
426
470
while (1 ) {
471
+ spin_lock (& lan966x -> mac_lock );
427
472
lan_rmw (ANA_MACACCESS_MAC_TABLE_CMD_SET (MACACCESS_CMD_SYNC_GET_NEXT ),
428
473
ANA_MACACCESS_MAC_TABLE_CMD ,
429
474
lan966x , ANA_MACACCESS );
@@ -447,12 +492,15 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
447
492
stop = false;
448
493
449
494
if (column == LAN966X_MAC_COLUMNS - 1 &&
450
- index == 0 && stop )
495
+ index == 0 && stop ) {
496
+ spin_unlock (& lan966x -> mac_lock );
451
497
break ;
498
+ }
452
499
453
500
entry [column ].mach = lan_rd (lan966x , ANA_MACHDATA );
454
501
entry [column ].macl = lan_rd (lan966x , ANA_MACLDATA );
455
502
entry [column ].maca = lan_rd (lan966x , ANA_MACACCESS );
503
+ spin_unlock (& lan966x -> mac_lock );
456
504
457
505
/* Once all the columns are read process them */
458
506
if (column == LAN966X_MAC_COLUMNS - 1 ) {
0 commit comments