Skip to content

Commit 7cd4ecd

Browse files
committed
Merge tag 'drivers-5.10-2020-10-12' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: "Here are the driver updates for 5.10. A few SCSI updates in here too, in coordination with Martin as they depend on core block changes for the shared tag bitmap. This contains: - NVMe pull requests via Christoph: - fix keep alive timer modification (Amit Engel) - order the PCI ID list more sensibly (Andy Shevchenko) - cleanup the open by controller helper (Chaitanya Kulkarni) - use an xarray for the CSE log lookup (Chaitanya Kulkarni) - support ZNS in nvmet passthrough mode (Chaitanya Kulkarni) - fix nvme_ns_report_zones (Christoph Hellwig) - add a sanity check to nvmet-fc (James Smart) - fix interrupt allocation when too many polled queues are specified (Jeffle Xu) - small nvmet-tcp optimization (Mark Wunderlich) - fix a controller refcount leak on init failure (Chaitanya Kulkarni) - misc cleanups (Chaitanya Kulkarni) - major refactoring of the scanning code (Christoph Hellwig) - MD updates via Song: - Bug fixes in bitmap code, from Zhao Heming - Fix a work queue check, from Guoqing Jiang - Fix raid5 oops with reshape, from Song Liu - Clean up unused code, from Jason Yan - Discard improvements, from Xiao Ni - raid5/6 page offset support, from Yufen Yu - Shared tag bitmap for SCSI/hisi_sas/null_blk (John, Kashyap, Hannes) - null_blk open/active zone limit support (Niklas) - Set of bcache updates (Coly, Dongsheng, Qinglang)" * tag 'drivers-5.10-2020-10-12' of git://git.kernel.dk/linux-block: (78 commits) md/raid5: fix oops during stripe resizing md/bitmap: fix memory leak of temporary bitmap md: fix the checking of wrong work queue md/bitmap: md_bitmap_get_counter returns wrong blocks md/bitmap: md_bitmap_read_sb uses wrong bitmap blocks md/raid0: remove unused function is_io_in_chunk_boundary() nvme-core: remove extra condition for vwc nvme-core: remove extra variable nvme: remove nvme_identify_ns_list nvme: refactor nvme_validate_ns nvme: move nvme_validate_ns nvme: query namespace identifiers before adding the namespace nvme: revalidate zone bitmaps in nvme_update_ns_info nvme: remove nvme_update_formats nvme: update the known admin effects nvme: set the queue limits in nvme_update_ns_info nvme: remove the 0 lba_shift check in nvme_update_ns_info nvme: clean up the check for too large logic block sizes nvme: freeze the queue over ->lba_shift updates nvme: factor out a nvme_configure_metadata helper ...
2 parents 79ec6d9 + 79cd166 commit 7cd4ecd

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+2091
-1269
lines changed

block/scsi_ioctl.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -644,7 +644,7 @@ struct compat_cdrom_generic_command {
644644
unsigned char pad[3];
645645
compat_int_t quiet;
646646
compat_int_t timeout;
647-
compat_caddr_t reserved[1];
647+
compat_caddr_t unused;
648648
};
649649
#endif
650650

@@ -666,7 +666,7 @@ static int scsi_get_cdrom_generic_arg(struct cdrom_generic_command *cgc,
666666
.data_direction = cgc32.data_direction,
667667
.quiet = cgc32.quiet,
668668
.timeout = cgc32.timeout,
669-
.reserved[0] = compat_ptr(cgc32.reserved[0]),
669+
.unused = compat_ptr(cgc32.unused),
670670
};
671671
memcpy(&cgc->cmd, &cgc32.cmd, CDROM_PACKET_SIZE);
672672
return 0;
@@ -691,7 +691,7 @@ static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
691691
.data_direction = cgc->data_direction,
692692
.quiet = cgc->quiet,
693693
.timeout = cgc->timeout,
694-
.reserved[0] = (uintptr_t)(cgc->reserved[0]),
694+
.unused = (uintptr_t)(cgc->unused),
695695
};
696696
memcpy(&cgc32.cmd, &cgc->cmd, CDROM_PACKET_SIZE);
697697

crypto/async_tx/async_pq.c

Lines changed: 48 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ do_async_gen_syndrome(struct dma_chan *chan,
104104
* do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
105105
*/
106106
static void
107-
do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
107+
do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
108108
size_t len, struct async_submit_ctl *submit)
109109
{
110110
void **srcs;
@@ -121,7 +121,8 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
121121
BUG_ON(i > disks - 3); /* P or Q can't be zero */
122122
srcs[i] = (void*)raid6_empty_zero_page;
123123
} else {
124-
srcs[i] = page_address(blocks[i]) + offset;
124+
srcs[i] = page_address(blocks[i]) + offsets[i];
125+
125126
if (i < disks - 2) {
126127
stop = i;
127128
if (start == -1)
@@ -138,10 +139,23 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
138139
async_tx_sync_epilog(submit);
139140
}
140141

142+
static inline bool
143+
is_dma_pq_aligned_offs(struct dma_device *dev, unsigned int *offs,
144+
int src_cnt, size_t len)
145+
{
146+
int i;
147+
148+
for (i = 0; i < src_cnt; i++) {
149+
if (!is_dma_pq_aligned(dev, offs[i], 0, len))
150+
return false;
151+
}
152+
return true;
153+
}
154+
141155
/**
142156
* async_gen_syndrome - asynchronously calculate a raid6 syndrome
143157
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
144-
* @offset: common offset into each block (src and dest) to start transaction
158+
* @offsets: offset array into each block (src and dest) to start transaction
145159
* @disks: number of blocks (including missing P or Q, see below)
146160
* @len: length of operation in bytes
147161
* @submit: submission/completion modifiers
@@ -160,7 +174,7 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
160174
* path.
161175
*/
162176
struct dma_async_tx_descriptor *
163-
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
177+
async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
164178
size_t len, struct async_submit_ctl *submit)
165179
{
166180
int src_cnt = disks - 2;
@@ -179,7 +193,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
179193
if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
180194
(src_cnt <= dma_maxpq(device, 0) ||
181195
dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
182-
is_dma_pq_aligned(device, offset, 0, len)) {
196+
is_dma_pq_aligned_offs(device, offsets, disks, len)) {
183197
struct dma_async_tx_descriptor *tx;
184198
enum dma_ctrl_flags dma_flags = 0;
185199
unsigned char coefs[MAX_DISKS];
@@ -196,8 +210,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
196210
for (i = 0, j = 0; i < src_cnt; i++) {
197211
if (blocks[i] == NULL)
198212
continue;
199-
unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
200-
len, DMA_TO_DEVICE);
213+
unmap->addr[j] = dma_map_page(device->dev, blocks[i],
214+
offsets[i], len, DMA_TO_DEVICE);
201215
coefs[j] = raid6_gfexp[i];
202216
unmap->to_cnt++;
203217
j++;
@@ -210,7 +224,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
210224
unmap->bidi_cnt++;
211225
if (P(blocks, disks))
212226
unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
213-
offset, len, DMA_BIDIRECTIONAL);
227+
P(offsets, disks),
228+
len, DMA_BIDIRECTIONAL);
214229
else {
215230
unmap->addr[j++] = 0;
216231
dma_flags |= DMA_PREP_PQ_DISABLE_P;
@@ -219,7 +234,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
219234
unmap->bidi_cnt++;
220235
if (Q(blocks, disks))
221236
unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
222-
offset, len, DMA_BIDIRECTIONAL);
237+
Q(offsets, disks),
238+
len, DMA_BIDIRECTIONAL);
223239
else {
224240
unmap->addr[j++] = 0;
225241
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
@@ -240,13 +256,13 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
240256

241257
if (!P(blocks, disks)) {
242258
P(blocks, disks) = pq_scribble_page;
243-
BUG_ON(len + offset > PAGE_SIZE);
259+
P(offsets, disks) = 0;
244260
}
245261
if (!Q(blocks, disks)) {
246262
Q(blocks, disks) = pq_scribble_page;
247-
BUG_ON(len + offset > PAGE_SIZE);
263+
Q(offsets, disks) = 0;
248264
}
249-
do_sync_gen_syndrome(blocks, offset, disks, len, submit);
265+
do_sync_gen_syndrome(blocks, offsets, disks, len, submit);
250266

251267
return NULL;
252268
}
@@ -270,6 +286,7 @@ pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, si
270286
* @len: length of operation in bytes
271287
* @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
272288
* @spare: temporary result buffer for the synchronous case
289+
* @s_off: spare buffer page offset
273290
* @submit: submission / completion modifiers
274291
*
275292
* The same notes from async_gen_syndrome apply to the 'blocks',
@@ -278,9 +295,9 @@ pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, si
278295
* specified.
279296
*/
280297
struct dma_async_tx_descriptor *
281-
async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
298+
async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks,
282299
size_t len, enum sum_check_flags *pqres, struct page *spare,
283-
struct async_submit_ctl *submit)
300+
unsigned int s_off, struct async_submit_ctl *submit)
284301
{
285302
struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
286303
struct dma_device *device = chan ? chan->device : NULL;
@@ -295,7 +312,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
295312
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
296313

297314
if (unmap && disks <= dma_maxpq(device, 0) &&
298-
is_dma_pq_aligned(device, offset, 0, len)) {
315+
is_dma_pq_aligned_offs(device, offsets, disks, len)) {
299316
struct device *dev = device->dev;
300317
dma_addr_t pq[2];
301318
int i, j = 0, src_cnt = 0;
@@ -307,7 +324,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
307324
for (i = 0; i < disks-2; i++)
308325
if (likely(blocks[i])) {
309326
unmap->addr[j] = dma_map_page(dev, blocks[i],
310-
offset, len,
327+
offsets[i], len,
311328
DMA_TO_DEVICE);
312329
coefs[j] = raid6_gfexp[i];
313330
unmap->to_cnt++;
@@ -320,7 +337,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
320337
dma_flags |= DMA_PREP_PQ_DISABLE_P;
321338
} else {
322339
pq[0] = dma_map_page(dev, P(blocks, disks),
323-
offset, len,
340+
P(offsets, disks), len,
324341
DMA_TO_DEVICE);
325342
unmap->addr[j++] = pq[0];
326343
unmap->to_cnt++;
@@ -330,7 +347,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
330347
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
331348
} else {
332349
pq[1] = dma_map_page(dev, Q(blocks, disks),
333-
offset, len,
350+
Q(offsets, disks), len,
334351
DMA_TO_DEVICE);
335352
unmap->addr[j++] = pq[1];
336353
unmap->to_cnt++;
@@ -355,7 +372,9 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
355372
async_tx_submit(chan, tx, submit);
356373
} else {
357374
struct page *p_src = P(blocks, disks);
375+
unsigned int p_off = P(offsets, disks);
358376
struct page *q_src = Q(blocks, disks);
377+
unsigned int q_off = Q(offsets, disks);
359378
enum async_tx_flags flags_orig = submit->flags;
360379
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
361380
void *scribble = submit->scribble;
@@ -381,27 +400,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
381400
if (p_src) {
382401
init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
383402
NULL, NULL, scribble);
384-
tx = async_xor(spare, blocks, offset, disks-2, len, submit);
403+
tx = async_xor_offs(spare, s_off,
404+
blocks, offsets, disks-2, len, submit);
385405
async_tx_quiesce(&tx);
386-
p = page_address(p_src) + offset;
387-
s = page_address(spare) + offset;
406+
p = page_address(p_src) + p_off;
407+
s = page_address(spare) + s_off;
388408
*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
389409
}
390410

391411
if (q_src) {
392412
P(blocks, disks) = NULL;
393413
Q(blocks, disks) = spare;
414+
Q(offsets, disks) = s_off;
394415
init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
395-
tx = async_gen_syndrome(blocks, offset, disks, len, submit);
416+
tx = async_gen_syndrome(blocks, offsets, disks,
417+
len, submit);
396418
async_tx_quiesce(&tx);
397-
q = page_address(q_src) + offset;
398-
s = page_address(spare) + offset;
419+
q = page_address(q_src) + q_off;
420+
s = page_address(spare) + s_off;
399421
*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
400422
}
401423

402424
/* restore P, Q and submit */
403425
P(blocks, disks) = p_src;
426+
P(offsets, disks) = p_off;
404427
Q(blocks, disks) = q_src;
428+
Q(offsets, disks) = q_off;
405429

406430
submit->cb_fn = cb_fn_orig;
407431
submit->cb_param = cb_param_orig;

0 commit comments

Comments
 (0)