@@ -1611,16 +1611,16 @@ static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
1611
1611
{
1612
1612
return sprintf (buf , "%u\n" , channel -> target_cpu );
1613
1613
}
1614
- static ssize_t target_cpu_store ( struct vmbus_channel * channel ,
1615
- const char * buf , size_t count )
1614
+
1615
+ int vmbus_channel_set_cpu ( struct vmbus_channel * channel , u32 target_cpu )
1616
1616
{
1617
- u32 target_cpu , origin_cpu ;
1618
- ssize_t ret = count ;
1617
+ u32 origin_cpu ;
1618
+ int ret = 0 ;
1619
1619
1620
- if ( vmbus_proto_version < VERSION_WIN10_V4_1 )
1621
- return - EIO ;
1620
+ lockdep_assert_cpus_held ();
1621
+ lockdep_assert_held ( & vmbus_connection . channel_mutex ) ;
1622
1622
1623
- if (sscanf ( buf , "%uu" , & target_cpu ) != 1 )
1623
+ if (vmbus_proto_version < VERSION_WIN10_V4_1 )
1624
1624
return - EIO ;
1625
1625
1626
1626
/* Validate target_cpu for the cpumask_test_cpu() operation below. */
@@ -1630,22 +1630,17 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
1630
1630
if (!cpumask_test_cpu (target_cpu , housekeeping_cpumask (HK_TYPE_MANAGED_IRQ )))
1631
1631
return - EINVAL ;
1632
1632
1633
- /* No CPUs should come up or down during this. */
1634
- cpus_read_lock ();
1635
-
1636
- if (!cpu_online (target_cpu )) {
1637
- cpus_read_unlock ();
1633
+ if (!cpu_online (target_cpu ))
1638
1634
return - EINVAL ;
1639
- }
1640
1635
1641
1636
/*
1642
- * Synchronizes target_cpu_store () and channel closure:
1637
+ * Synchronizes vmbus_channel_set_cpu () and channel closure:
1643
1638
*
1644
1639
* { Initially: state = CHANNEL_OPENED }
1645
1640
*
1646
1641
* CPU1 CPU2
1647
1642
*
1648
- * [target_cpu_store ()] [vmbus_disconnect_ring()]
1643
+ * [vmbus_channel_set_cpu ()] [vmbus_disconnect_ring()]
1649
1644
*
1650
1645
* LOCK channel_mutex LOCK channel_mutex
1651
1646
* LOAD r1 = state LOAD r2 = state
@@ -1660,25 +1655,24 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
1660
1655
* Note. The host processes the channel messages "sequentially", in
1661
1656
* the order in which they are received on a per-partition basis.
1662
1657
*/
1663
- mutex_lock (& vmbus_connection .channel_mutex );
1664
1658
1665
1659
/*
1666
1660
* Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
1667
1661
* avoid sending the message and fail here for such channels.
1668
1662
*/
1669
1663
if (channel -> state != CHANNEL_OPENED_STATE ) {
1670
1664
ret = - EIO ;
1671
- goto cpu_store_unlock ;
1665
+ goto end ;
1672
1666
}
1673
1667
1674
1668
origin_cpu = channel -> target_cpu ;
1675
1669
if (target_cpu == origin_cpu )
1676
- goto cpu_store_unlock ;
1670
+ goto end ;
1677
1671
1678
1672
if (vmbus_send_modifychannel (channel ,
1679
1673
hv_cpu_number_to_vp_number (target_cpu ))) {
1680
1674
ret = - EIO ;
1681
- goto cpu_store_unlock ;
1675
+ goto end ;
1682
1676
}
1683
1677
1684
1678
/*
@@ -1708,10 +1702,26 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
1708
1702
origin_cpu , target_cpu );
1709
1703
}
1710
1704
1711
- cpu_store_unlock :
1705
+ end :
1706
+ return ret ;
1707
+ }
1708
+
1709
+ static ssize_t target_cpu_store (struct vmbus_channel * channel ,
1710
+ const char * buf , size_t count )
1711
+ {
1712
+ u32 target_cpu ;
1713
+ ssize_t ret ;
1714
+
1715
+ if (sscanf (buf , "%uu" , & target_cpu ) != 1 )
1716
+ return - EIO ;
1717
+
1718
+ cpus_read_lock ();
1719
+ mutex_lock (& vmbus_connection .channel_mutex );
1720
+ ret = vmbus_channel_set_cpu (channel , target_cpu );
1712
1721
mutex_unlock (& vmbus_connection .channel_mutex );
1713
1722
cpus_read_unlock ();
1714
- return ret ;
1723
+
1724
+ return ret ?: count ;
1715
1725
}
1716
1726
static VMBUS_CHAN_ATTR (cpu , 0644 , target_cpu_show , target_cpu_store ) ;
1717
1727
0 commit comments