@@ -50,6 +50,7 @@ static DEFINE_MUTEX(wfs_lock);
50
50
static LIST_HEAD (deferred_sync );
51
51
static unsigned int defer_sync_state_count = 1 ;
52
52
static unsigned int defer_fw_devlink_count ;
53
+ static LIST_HEAD (deferred_fw_devlink );
53
54
static DEFINE_MUTEX (defer_fw_devlink_lock );
54
55
static bool fw_devlink_is_permissive (void );
55
56
@@ -754,11 +755,11 @@ static void __device_links_queue_sync_state(struct device *dev,
754
755
*/
755
756
dev -> state_synced = true;
756
757
757
- if (WARN_ON (!list_empty (& dev -> links .defer_sync )))
758
+ if (WARN_ON (!list_empty (& dev -> links .defer_hook )))
758
759
return ;
759
760
760
761
get_device (dev );
761
- list_add_tail (& dev -> links .defer_sync , list );
762
+ list_add_tail (& dev -> links .defer_hook , list );
762
763
}
763
764
764
765
/**
@@ -776,8 +777,8 @@ static void device_links_flush_sync_list(struct list_head *list,
776
777
{
777
778
struct device * dev , * tmp ;
778
779
779
- list_for_each_entry_safe (dev , tmp , list , links .defer_sync ) {
780
- list_del_init (& dev -> links .defer_sync );
780
+ list_for_each_entry_safe (dev , tmp , list , links .defer_hook ) {
781
+ list_del_init (& dev -> links .defer_hook );
781
782
782
783
if (dev != dont_lock_dev )
783
784
device_lock (dev );
@@ -815,12 +816,12 @@ void device_links_supplier_sync_state_resume(void)
815
816
if (defer_sync_state_count )
816
817
goto out ;
817
818
818
- list_for_each_entry_safe (dev , tmp , & deferred_sync , links .defer_sync ) {
819
+ list_for_each_entry_safe (dev , tmp , & deferred_sync , links .defer_hook ) {
819
820
/*
820
821
* Delete from deferred_sync list before queuing it to
821
- * sync_list because defer_sync is used for both lists.
822
+ * sync_list because defer_hook is used for both lists.
822
823
*/
823
- list_del_init (& dev -> links .defer_sync );
824
+ list_del_init (& dev -> links .defer_hook );
824
825
__device_links_queue_sync_state (dev , & sync_list );
825
826
}
826
827
out :
@@ -838,8 +839,8 @@ late_initcall(sync_state_resume_initcall);
838
839
839
840
static void __device_links_supplier_defer_sync (struct device * sup )
840
841
{
841
- if (list_empty (& sup -> links .defer_sync ) && dev_has_sync_state (sup ))
842
- list_add_tail (& sup -> links .defer_sync , & deferred_sync );
842
+ if (list_empty (& sup -> links .defer_hook ) && dev_has_sync_state (sup ))
843
+ list_add_tail (& sup -> links .defer_hook , & deferred_sync );
843
844
}
844
845
845
846
static void device_link_drop_managed (struct device_link * link )
@@ -1052,7 +1053,7 @@ void device_links_driver_cleanup(struct device *dev)
1052
1053
WRITE_ONCE (link -> status , DL_STATE_DORMANT );
1053
1054
}
1054
1055
1055
- list_del_init (& dev -> links .defer_sync );
1056
+ list_del_init (& dev -> links .defer_hook );
1056
1057
__device_links_no_driver (dev );
1057
1058
1058
1059
device_links_write_unlock ();
@@ -1244,6 +1245,12 @@ static void fw_devlink_link_device(struct device *dev)
1244
1245
fw_ret = - EAGAIN ;
1245
1246
} else {
1246
1247
fw_ret = - ENODEV ;
1248
+ /*
1249
+ * defer_hook is not used to add device to deferred_sync list
1250
+ * until device is bound. Since deferred fw devlink also blocks
1251
+ * probing, same list hook can be used for deferred_fw_devlink.
1252
+ */
1253
+ list_add_tail (& dev -> links .defer_hook , & deferred_fw_devlink );
1247
1254
}
1248
1255
1249
1256
if (fw_ret == - ENODEV )
@@ -1312,6 +1319,9 @@ void fw_devlink_pause(void)
1312
1319
*/
1313
1320
void fw_devlink_resume (void )
1314
1321
{
1322
+ struct device * dev , * tmp ;
1323
+ LIST_HEAD (probe_list );
1324
+
1315
1325
mutex_lock (& defer_fw_devlink_lock );
1316
1326
if (!defer_fw_devlink_count ) {
1317
1327
WARN (true, "Unmatched fw_devlink pause/resume!" );
@@ -1323,9 +1333,19 @@ void fw_devlink_resume(void)
1323
1333
goto out ;
1324
1334
1325
1335
device_link_add_missing_supplier_links ();
1326
- driver_deferred_probe_force_trigger ( );
1336
+ list_splice_tail_init ( & deferred_fw_devlink , & probe_list );
1327
1337
out :
1328
1338
mutex_unlock (& defer_fw_devlink_lock );
1339
+
1340
+ /*
1341
+ * bus_probe_device() can cause new devices to get added and they'll
1342
+ * try to grab defer_fw_devlink_lock. So, this needs to be done outside
1343
+ * the defer_fw_devlink_lock.
1344
+ */
1345
+ list_for_each_entry_safe (dev , tmp , & probe_list , links .defer_hook ) {
1346
+ list_del_init (& dev -> links .defer_hook );
1347
+ bus_probe_device (dev );
1348
+ }
1329
1349
}
1330
1350
/* Device links support end. */
1331
1351
@@ -2172,7 +2192,7 @@ void device_initialize(struct device *dev)
2172
2192
INIT_LIST_HEAD (& dev -> links .consumers );
2173
2193
INIT_LIST_HEAD (& dev -> links .suppliers );
2174
2194
INIT_LIST_HEAD (& dev -> links .needs_suppliers );
2175
- INIT_LIST_HEAD (& dev -> links .defer_sync );
2195
+ INIT_LIST_HEAD (& dev -> links .defer_hook );
2176
2196
dev -> links .status = DL_DEV_NO_DRIVER ;
2177
2197
}
2178
2198
EXPORT_SYMBOL_GPL (device_initialize );
0 commit comments