Skip to content

Commit 26e7770

Browse files
Saravana Kannangregkh
authored andcommitted
driver core: Allow device link operations inside sync_state()
Some sync_state() implementations might need to call APIs that in turn make calls to device link APIs. So, do the sync_state() callbacks without holding the device link lock. Signed-off-by: Saravana Kannan <[email protected]> Reviewed-by: Rafael J. Wysocki <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 71564a2 commit 26e7770

File tree

1 file changed

+71
-8
lines changed

1 file changed

+71
-8
lines changed

drivers/base/core.c

Lines changed: 71 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -695,7 +695,26 @@ int device_links_check_suppliers(struct device *dev)
695695
return ret;
696696
}
697697

698-
static void __device_links_supplier_sync_state(struct device *dev)
698+
/**
699+
* __device_links_queue_sync_state - Queue a device for sync_state() callback
700+
* @dev: Device to call sync_state() on
701+
* @list: List head to queue the @dev on
702+
*
703+
* Queues a device for a sync_state() callback when the device links write lock
704+
* isn't held. This allows the sync_state() execution flow to use device links
705+
* APIs. The caller must ensure this function is called with
706+
* device_links_write_lock() held.
707+
*
708+
* This function does a get_device() to make sure the device is not freed while
709+
* on this list.
710+
*
711+
* So the caller must also ensure that device_links_flush_sync_list() is called
712+
* as soon as the caller releases device_links_write_lock(). This is necessary
713+
* to make sure the sync_state() is called in a timely fashion and the
714+
* put_device() is called on this device.
715+
*/
716+
static void __device_links_queue_sync_state(struct device *dev,
717+
struct list_head *list)
699718
{
700719
struct device_link *link;
701720

@@ -709,12 +728,45 @@ static void __device_links_supplier_sync_state(struct device *dev)
709728
return;
710729
}
711730

712-
if (dev->bus->sync_state)
713-
dev->bus->sync_state(dev);
714-
else if (dev->driver && dev->driver->sync_state)
715-
dev->driver->sync_state(dev);
716-
731+
/*
732+
* Set the flag here to avoid adding the same device to a list more
733+
* than once. This can happen if new consumers get added to the device
734+
* and probed before the list is flushed.
735+
*/
717736
dev->state_synced = true;
737+
738+
if (WARN_ON(!list_empty(&dev->links.defer_sync)))
739+
return;
740+
741+
get_device(dev);
742+
list_add_tail(&dev->links.defer_sync, list);
743+
}
744+
745+
/**
746+
* device_links_flush_sync_list - Call sync_state() on a list of devices
747+
* @list: List of devices to call sync_state() on
748+
*
749+
* Calls sync_state() on all the devices that have been queued for it. This
750+
* function is used in conjunction with __device_links_queue_sync_state().
751+
*/
752+
static void device_links_flush_sync_list(struct list_head *list)
753+
{
754+
struct device *dev, *tmp;
755+
756+
list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
757+
list_del_init(&dev->links.defer_sync);
758+
759+
device_lock(dev);
760+
761+
if (dev->bus->sync_state)
762+
dev->bus->sync_state(dev);
763+
else if (dev->driver && dev->driver->sync_state)
764+
dev->driver->sync_state(dev);
765+
766+
device_unlock(dev);
767+
768+
put_device(dev);
769+
}
718770
}
719771

720772
void device_links_supplier_sync_state_pause(void)
@@ -727,6 +779,7 @@ void device_links_supplier_sync_state_pause(void)
727779
void device_links_supplier_sync_state_resume(void)
728780
{
729781
struct device *dev, *tmp;
782+
LIST_HEAD(sync_list);
730783

731784
device_links_write_lock();
732785
if (!defer_sync_state_count) {
@@ -738,11 +791,17 @@ void device_links_supplier_sync_state_resume(void)
738791
goto out;
739792

740793
list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
741-
__device_links_supplier_sync_state(dev);
794+
/*
795+
* Delete from deferred_sync list before queuing it to
796+
* sync_list because defer_sync is used for both lists.
797+
*/
742798
list_del_init(&dev->links.defer_sync);
799+
__device_links_queue_sync_state(dev, &sync_list);
743800
}
744801
out:
745802
device_links_write_unlock();
803+
804+
device_links_flush_sync_list(&sync_list);
746805
}
747806

748807
static int sync_state_resume_initcall(void)
@@ -772,6 +831,7 @@ static void __device_links_supplier_defer_sync(struct device *sup)
772831
void device_links_driver_bound(struct device *dev)
773832
{
774833
struct device_link *link;
834+
LIST_HEAD(sync_list);
775835

776836
/*
777837
* If a device probes successfully, it's expected to have created all
@@ -815,12 +875,15 @@ void device_links_driver_bound(struct device *dev)
815875
if (defer_sync_state_count)
816876
__device_links_supplier_defer_sync(link->supplier);
817877
else
818-
__device_links_supplier_sync_state(link->supplier);
878+
__device_links_queue_sync_state(link->supplier,
879+
&sync_list);
819880
}
820881

821882
dev->links.status = DL_DEV_DRIVER_BOUND;
822883

823884
device_links_write_unlock();
885+
886+
device_links_flush_sync_list(&sync_list);
824887
}
825888

826889
static void device_link_drop_managed(struct device_link *link)

0 commit comments

Comments
 (0)