Skip to content

Commit 4827790

Browse files
Merge branch '6.18/scsi-queue' into 6.18/scsi-fixes
Pull in outstanding SCSI fixes for 6.18. Signed-off-by: Martin K. Petersen <[email protected]>
2 parents 3a86608 + 7c3321f commit 4827790

File tree

5 files changed

+56
-56
lines changed

5 files changed

+56
-56
lines changed

Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@ properties:
2424
- enum:
2525
- qcom,qcs8300-qmp-ufs-phy
2626
- const: qcom,sa8775p-qmp-ufs-phy
27+
- items:
28+
- enum:
29+
- qcom,kaanapali-qmp-ufs-phy
30+
- const: qcom,sm8750-qmp-ufs-phy
2731
- enum:
2832
- qcom,msm8996-qmp-ufs-phy
2933
- qcom,msm8998-qmp-ufs-phy

Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ select:
1515
compatible:
1616
contains:
1717
enum:
18+
- qcom,kaanapali-ufshc
1819
- qcom,sm8650-ufshc
1920
- qcom,sm8750-ufshc
2021
required:
@@ -24,6 +25,7 @@ properties:
2425
compatible:
2526
items:
2627
- enum:
28+
- qcom,kaanapali-ufshc
2729
- qcom,sm8650-ufshc
2830
- qcom,sm8750-ufshc
2931
- const: qcom,ufshc

drivers/scsi/libfc/fc_fcp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -503,7 +503,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
503503
host_bcode = FC_ERROR;
504504
goto err;
505505
}
506-
if (offset + len > fsp->data_len) {
506+
if (size_add(offset, len) > fsp->data_len) {
507507
/* this should never happen */
508508
if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
509509
fc_frame_crc_check(fp))

drivers/scsi/qla4xxx/ql4_os.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4104,7 +4104,7 @@ void qla4xxx_srb_compl(struct kref *ref)
41044104
* The mid-level driver tries to ensure that queuecommand never gets
41054105
* invoked concurrently with itself or the interrupt handler (although
41064106
* the interrupt handler may call this routine as part of request-
4107-
* completion handling). Unfortunely, it sometimes calls the scheduler
4107+
* completion handling). Unfortunately, it sometimes calls the scheduler
41084108
* in interrupt context which is a big NO! NO!.
41094109
**/
41104110
static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
@@ -4647,7 +4647,7 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
46474647
cmd = scsi_host_find_tag(ha->host, index);
46484648
/*
46494649
* We cannot just check if the index is valid,
4650-
* becase if we are run from the scsi eh, then
4650+
* because if we are run from the scsi eh, then
46514651
* the scsi/block layer is going to prevent
46524652
* the tag from being released.
46534653
*/
@@ -4952,7 +4952,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
49524952
/* Upon successful firmware/chip reset, re-initialize the adapter */
49534953
if (status == QLA_SUCCESS) {
49544954
/* For ISP-4xxx, force function 1 to always initialize
4955-
* before function 3 to prevent both funcions from
4955+
* before function 3 to prevent both functions from
49564956
* stepping on top of the other */
49574957
if (is_qla40XX(ha) && (ha->mac_index == 3))
49584958
ssleep(6);
@@ -6914,7 +6914,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
69146914
struct ddb_entry *ddb_entry = NULL;
69156915

69166916
/* Create session object, with INVALID_ENTRY,
6917-
* the targer_id would get set when we issue the login
6917+
* the target_id would get set when we issue the login
69186918
*/
69196919
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
69206920
cmds_max, sizeof(struct ddb_entry),

drivers/scsi/storvsc_drv.c

Lines changed: 45 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -1406,14 +1406,19 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
14061406
}
14071407

14081408
/*
1409-
* Our channel array is sparsley populated and we
1409+
* Our channel array could be sparsley populated and we
14101410
* initiated I/O on a processor/hw-q that does not
14111411
* currently have a designated channel. Fix this.
14121412
* The strategy is simple:
1413-
* I. Ensure NUMA locality
1414-
* II. Distribute evenly (best effort)
1413+
* I. Prefer the channel associated with the current CPU
1414+
* II. Ensure NUMA locality
1415+
* III. Distribute evenly (best effort)
14151416
*/
14161417

1418+
/* Prefer the channel on the I/O issuing processor/hw-q */
1419+
if (cpumask_test_cpu(q_num, &stor_device->alloced_cpus))
1420+
return stor_device->stor_chns[q_num];
1421+
14171422
node_mask = cpumask_of_node(cpu_to_node(q_num));
14181423

14191424
num_channels = 0;
@@ -1469,59 +1474,48 @@ static int storvsc_do_io(struct hv_device *device,
14691474
/* See storvsc_change_target_cpu(). */
14701475
outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]);
14711476
if (outgoing_channel != NULL) {
1472-
if (outgoing_channel->target_cpu == q_num) {
1473-
/*
1474-
* Ideally, we want to pick a different channel if
1475-
* available on the same NUMA node.
1476-
*/
1477-
node_mask = cpumask_of_node(cpu_to_node(q_num));
1478-
for_each_cpu_wrap(tgt_cpu,
1479-
&stor_device->alloced_cpus, q_num + 1) {
1480-
if (!cpumask_test_cpu(tgt_cpu, node_mask))
1481-
continue;
1482-
if (tgt_cpu == q_num)
1483-
continue;
1484-
channel = READ_ONCE(
1485-
stor_device->stor_chns[tgt_cpu]);
1486-
if (channel == NULL)
1487-
continue;
1488-
if (hv_get_avail_to_write_percent(
1489-
&channel->outbound)
1490-
> ring_avail_percent_lowater) {
1491-
outgoing_channel = channel;
1492-
goto found_channel;
1493-
}
1494-
}
1477+
if (hv_get_avail_to_write_percent(&outgoing_channel->outbound)
1478+
> ring_avail_percent_lowater)
1479+
goto found_channel;
14951480

1496-
/*
1497-
* All the other channels on the same NUMA node are
1498-
* busy. Try to use the channel on the current CPU
1499-
*/
1500-
if (hv_get_avail_to_write_percent(
1501-
&outgoing_channel->outbound)
1502-
> ring_avail_percent_lowater)
1481+
/*
1482+
* Channel is busy, try to find a channel on the same NUMA node
1483+
*/
1484+
node_mask = cpumask_of_node(cpu_to_node(q_num));
1485+
for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus,
1486+
q_num + 1) {
1487+
if (!cpumask_test_cpu(tgt_cpu, node_mask))
1488+
continue;
1489+
channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]);
1490+
if (!channel)
1491+
continue;
1492+
if (hv_get_avail_to_write_percent(&channel->outbound)
1493+
> ring_avail_percent_lowater) {
1494+
outgoing_channel = channel;
15031495
goto found_channel;
1496+
}
1497+
}
15041498

1505-
/*
1506-
* If we reach here, all the channels on the current
1507-
* NUMA node are busy. Try to find a channel in
1508-
* other NUMA nodes
1509-
*/
1510-
for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
1511-
if (cpumask_test_cpu(tgt_cpu, node_mask))
1512-
continue;
1513-
channel = READ_ONCE(
1514-
stor_device->stor_chns[tgt_cpu]);
1515-
if (channel == NULL)
1516-
continue;
1517-
if (hv_get_avail_to_write_percent(
1518-
&channel->outbound)
1519-
> ring_avail_percent_lowater) {
1520-
outgoing_channel = channel;
1521-
goto found_channel;
1522-
}
1499+
/*
1500+
* If we reach here, all the channels on the current
1501+
* NUMA node are busy. Try to find a channel in
1502+
* all NUMA nodes
1503+
*/
1504+
for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus,
1505+
q_num + 1) {
1506+
channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]);
1507+
if (!channel)
1508+
continue;
1509+
if (hv_get_avail_to_write_percent(&channel->outbound)
1510+
> ring_avail_percent_lowater) {
1511+
outgoing_channel = channel;
1512+
goto found_channel;
15231513
}
15241514
}
1515+
/*
1516+
* If we reach here, all the channels are busy. Use the
1517+
* original channel found.
1518+
*/
15251519
} else {
15261520
spin_lock_irqsave(&stor_device->lock, flags);
15271521
outgoing_channel = stor_device->stor_chns[q_num];

0 commit comments

Comments
 (0)