@@ -182,6 +182,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
182
182
* @nents_first_chunk: Number of entries int the (preallocated) first
183
183
* scatterlist chunk, 0 means no such preallocated first chunk
184
184
* @free_fn: Free function
185
+ * @num_ents: Number of entries in the table
185
186
*
186
187
* Description:
187
188
* Free an sg table previously allocated and setup with
@@ -190,7 +191,8 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
190
191
*
191
192
**/
192
193
void __sg_free_table (struct sg_table * table , unsigned int max_ents ,
193
- unsigned int nents_first_chunk , sg_free_fn * free_fn )
194
+ unsigned int nents_first_chunk , sg_free_fn * free_fn ,
195
+ unsigned int num_ents )
194
196
{
195
197
struct scatterlist * sgl , * next ;
196
198
unsigned curr_max_ents = nents_first_chunk ?: max_ents ;
@@ -199,8 +201,8 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
199
201
return ;
200
202
201
203
sgl = table -> sgl ;
202
- while (table -> orig_nents ) {
203
- unsigned int alloc_size = table -> orig_nents ;
204
+ while (num_ents ) {
205
+ unsigned int alloc_size = num_ents ;
204
206
unsigned int sg_size ;
205
207
206
208
/*
@@ -218,7 +220,7 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
218
220
next = NULL ;
219
221
}
220
222
221
- table -> orig_nents -= sg_size ;
223
+ num_ents -= sg_size ;
222
224
if (nents_first_chunk )
223
225
nents_first_chunk = 0 ;
224
226
else
@@ -231,14 +233,28 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
231
233
}
232
234
EXPORT_SYMBOL (__sg_free_table );
233
235
236
+ /**
237
+ * sg_free_append_table - Free a previously allocated append sg table.
238
+ * @table: The mapped sg append table header
239
+ *
240
+ **/
241
+ void sg_free_append_table (struct sg_append_table * table )
242
+ {
243
+ __sg_free_table (& table -> sgt , SG_MAX_SINGLE_ALLOC , false, sg_kfree ,
244
+ table -> total_nents );
245
+ }
246
+ EXPORT_SYMBOL (sg_free_append_table );
247
+
248
+
234
249
/**
235
250
* sg_free_table - Free a previously allocated sg table
236
251
* @table: The mapped sg table header
237
252
*
238
253
**/
239
254
void sg_free_table (struct sg_table * table )
240
255
{
241
- __sg_free_table (table , SG_MAX_SINGLE_ALLOC , false, sg_kfree );
256
+ __sg_free_table (table , SG_MAX_SINGLE_ALLOC , false, sg_kfree ,
257
+ table -> orig_nents );
242
258
}
243
259
EXPORT_SYMBOL (sg_free_table );
244
260
@@ -359,13 +375,12 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
359
375
ret = __sg_alloc_table (table , nents , SG_MAX_SINGLE_ALLOC ,
360
376
NULL , 0 , gfp_mask , sg_kmalloc );
361
377
if (unlikely (ret ))
362
- __sg_free_table (table , SG_MAX_SINGLE_ALLOC , 0 , sg_kfree );
363
-
378
+ sg_free_table (table );
364
379
return ret ;
365
380
}
366
381
EXPORT_SYMBOL (sg_alloc_table );
367
382
368
- static struct scatterlist * get_next_sg (struct sg_table * table ,
383
+ static struct scatterlist * get_next_sg (struct sg_append_table * table ,
369
384
struct scatterlist * cur ,
370
385
unsigned long needed_sges ,
371
386
gfp_t gfp_mask )
@@ -386,80 +401,79 @@ static struct scatterlist *get_next_sg(struct sg_table *table,
386
401
return ERR_PTR (- ENOMEM );
387
402
sg_init_table (new_sg , alloc_size );
388
403
if (cur ) {
404
+ table -> total_nents += alloc_size - 1 ;
389
405
__sg_chain (next_sg , new_sg );
390
- table -> orig_nents += alloc_size - 1 ;
391
406
} else {
392
- table -> sgl = new_sg ;
393
- table -> orig_nents = alloc_size ;
394
- table -> nents = 0 ;
407
+ table -> sgt .sgl = new_sg ;
408
+ table -> total_nents = alloc_size ;
395
409
}
396
410
return new_sg ;
397
411
}
398
412
399
413
/**
400
- * sg_alloc_append_table_from_pages - Allocate and initialize an sg table from
401
- * an array of pages
402
- * @sgt: The sg table header to use
403
- * @pages: Pointer to an array of page pointers
404
- * @n_pages: Number of pages in the pages array
414
+ * sg_alloc_append_table_from_pages - Allocate and initialize an append sg
415
+ * table from an array of pages
416
+ * @sgt_append: The sg append table to use
417
+ * @pages: Pointer to an array of page pointers
418
+ * @n_pages: Number of pages in the pages array
405
419
* @offset: Offset from start of the first page to the start of a buffer
406
420
* @size: Number of valid bytes in the buffer (after offset)
407
421
* @max_segment: Maximum size of a scatterlist element in bytes
408
- * @prv: Last populated sge in sgt
409
422
* @left_pages: Left pages caller have to set after this call
410
423
* @gfp_mask: GFP allocation mask
411
424
*
412
425
* Description:
413
- * If @prv is NULL, allocate and initialize an sg table from a list of pages,
414
- * else reuse the scatterlist passed in at @prv.
415
- * Contiguous ranges of the pages are squashed into a single scatterlist
416
- * entry up to the maximum size specified in @max_segment. A user may
417
- * provide an offset at a start and a size of valid data in a buffer
418
- * specified by the page array.
426
+ * In the first call it allocate and initialize an sg table from a list of
427
+ * pages, else reuse the scatterlist from sgt_append. Contiguous ranges of
428
+ * the pages are squashed into a single scatterlist entry up to the maximum
429
+ * size specified in @max_segment. A user may provide an offset at a start
430
+ * and a size of valid data in a buffer specified by the page array. The
431
+ * returned sg table is released by sg_free_append_table
419
432
*
420
433
* Returns:
421
- * Last SGE in sgt on success, PTR_ERR on otherwise.
422
- * The allocation in @sgt must be released by sg_free_table.
434
+ * 0 on success, negative error on failure
423
435
*
424
436
* Notes:
425
437
* If this function returns non-0 (eg failure), the caller must call
426
- * sg_free_table() to cleanup any leftover allocations.
438
+ * sg_free_append_table() to cleanup any leftover allocations.
439
+ *
440
+ * In the fist call, sgt_append must by initialized.
427
441
*/
428
- struct scatterlist * sg_alloc_append_table_from_pages (struct sg_table * sgt ,
442
+ int sg_alloc_append_table_from_pages (struct sg_append_table * sgt_append ,
429
443
struct page * * pages , unsigned int n_pages , unsigned int offset ,
430
444
unsigned long size , unsigned int max_segment ,
431
- struct scatterlist * prv , unsigned int left_pages ,
432
- gfp_t gfp_mask )
445
+ unsigned int left_pages , gfp_t gfp_mask )
433
446
{
434
447
unsigned int chunks , cur_page , seg_len , i , prv_len = 0 ;
435
448
unsigned int added_nents = 0 ;
436
- struct scatterlist * s = prv ;
449
+ struct scatterlist * s = sgt_append -> prv ;
437
450
438
451
/*
439
452
* The algorithm below requires max_segment to be aligned to PAGE_SIZE
440
453
* otherwise it can overshoot.
441
454
*/
442
455
max_segment = ALIGN_DOWN (max_segment , PAGE_SIZE );
443
456
if (WARN_ON (max_segment < PAGE_SIZE ))
444
- return ERR_PTR ( - EINVAL ) ;
457
+ return - EINVAL ;
445
458
446
- if (IS_ENABLED (CONFIG_ARCH_NO_SG_CHAIN ) && prv )
447
- return ERR_PTR ( - EOPNOTSUPP ) ;
459
+ if (IS_ENABLED (CONFIG_ARCH_NO_SG_CHAIN ) && sgt_append -> prv )
460
+ return - EOPNOTSUPP ;
448
461
449
- if (prv ) {
450
- unsigned long paddr = (page_to_pfn (sg_page (prv )) * PAGE_SIZE +
451
- prv -> offset + prv -> length ) /
452
- PAGE_SIZE ;
462
+ if (sgt_append -> prv ) {
463
+ unsigned long paddr =
464
+ (page_to_pfn (sg_page (sgt_append -> prv )) * PAGE_SIZE +
465
+ sgt_append -> prv -> offset + sgt_append -> prv -> length ) /
466
+ PAGE_SIZE ;
453
467
454
468
if (WARN_ON (offset ))
455
- return ERR_PTR ( - EINVAL ) ;
469
+ return - EINVAL ;
456
470
457
471
/* Merge contiguous pages into the last SG */
458
- prv_len = prv -> length ;
472
+ prv_len = sgt_append -> prv -> length ;
459
473
while (n_pages && page_to_pfn (pages [0 ]) == paddr ) {
460
- if (prv -> length + PAGE_SIZE > max_segment )
474
+ if (sgt_append -> prv -> length + PAGE_SIZE > max_segment )
461
475
break ;
462
- prv -> length += PAGE_SIZE ;
476
+ sgt_append -> prv -> length += PAGE_SIZE ;
463
477
paddr ++ ;
464
478
pages ++ ;
465
479
n_pages -- ;
@@ -496,15 +510,16 @@ struct scatterlist *sg_alloc_append_table_from_pages(struct sg_table *sgt,
496
510
}
497
511
498
512
/* Pass how many chunks might be left */
499
- s = get_next_sg (sgt , s , chunks - i + left_pages , gfp_mask );
513
+ s = get_next_sg (sgt_append , s , chunks - i + left_pages ,
514
+ gfp_mask );
500
515
if (IS_ERR (s )) {
501
516
/*
502
517
* Adjust entry length to be as before function was
503
518
* called.
504
519
*/
505
- if (prv )
506
- prv -> length = prv_len ;
507
- return s ;
520
+ if (sgt_append -> prv )
521
+ sgt_append -> prv -> length = prv_len ;
522
+ return PTR_ERR ( s ) ;
508
523
}
509
524
chunk_size = ((j - cur_page ) << PAGE_SHIFT ) - offset ;
510
525
sg_set_page (s , pages [cur_page ],
@@ -514,11 +529,13 @@ struct scatterlist *sg_alloc_append_table_from_pages(struct sg_table *sgt,
514
529
offset = 0 ;
515
530
cur_page = j ;
516
531
}
517
- sgt -> nents += added_nents ;
532
+ sgt_append -> sgt .nents += added_nents ;
533
+ sgt_append -> sgt .orig_nents = sgt_append -> sgt .nents ;
534
+ sgt_append -> prv = s ;
518
535
out :
519
536
if (!left_pages )
520
537
sg_mark_end (s );
521
- return s ;
538
+ return 0 ;
522
539
}
523
540
EXPORT_SYMBOL (sg_alloc_append_table_from_pages );
524
541
@@ -550,8 +567,18 @@ int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
550
567
unsigned long size , unsigned int max_segment ,
551
568
gfp_t gfp_mask )
552
569
{
553
- return PTR_ERR_OR_ZERO (sg_alloc_append_table_from_pages (sgt , pages ,
554
- n_pages , offset , size , max_segment , NULL , 0 , gfp_mask ));
570
+ struct sg_append_table append = {};
571
+ int err ;
572
+
573
+ err = sg_alloc_append_table_from_pages (& append , pages , n_pages , offset ,
574
+ size , max_segment , 0 , gfp_mask );
575
+ if (err ) {
576
+ sg_free_append_table (& append );
577
+ return err ;
578
+ }
579
+ memcpy (sgt , & append .sgt , sizeof (* sgt ));
580
+ WARN_ON (append .total_nents != sgt -> orig_nents );
581
+ return 0 ;
555
582
}
556
583
EXPORT_SYMBOL (sg_alloc_table_from_pages_segment );
557
584
0 commit comments