Skip to content

Commit 5c075c5

Browse files
xp4ns3brauner
authored andcommitted
fs/aio: Stop allocating aio rings from HIGHMEM
There is no need to allocate aio rings from HIGHMEM because of very little memory needed here. Therefore, use GFP_USER flag in find_or_create_page() and get rid of kmap*() mappings. Cc: Al Viro <[email protected]> Cc: Ira Weiny <[email protected]> Suggested-by: Matthew Wilcox <[email protected]> Signed-off-by: Fabio M. De Francesco <[email protected]> Reviewed-by: Ira Weiny <[email protected]> Message-Id: <[email protected]> Signed-off-by: Christian Brauner <[email protected]>
1 parent b6334e2 commit 5c075c5

File tree

1 file changed

+9
-17
lines changed

1 file changed

+9
-17
lines changed

fs/aio.c

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -530,7 +530,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
530530
for (i = 0; i < nr_pages; i++) {
531531
struct page *page;
532532
page = find_or_create_page(file->f_mapping,
533-
i, GFP_HIGHUSER | __GFP_ZERO);
533+
i, GFP_USER | __GFP_ZERO);
534534
if (!page)
535535
break;
536536
pr_debug("pid(%d) page[%d]->count=%d\n",
@@ -571,15 +571,14 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
571571
ctx->user_id = ctx->mmap_base;
572572
ctx->nr_events = nr_events; /* trusted copy */
573573

574-
ring = kmap_atomic(ctx->ring_pages[0]);
574+
ring = page_address(ctx->ring_pages[0]);
575575
ring->nr = nr_events; /* user copy */
576576
ring->id = ~0U;
577577
ring->head = ring->tail = 0;
578578
ring->magic = AIO_RING_MAGIC;
579579
ring->compat_features = AIO_RING_COMPAT_FEATURES;
580580
ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
581581
ring->header_length = sizeof(struct aio_ring);
582-
kunmap_atomic(ring);
583582
flush_dcache_page(ctx->ring_pages[0]);
584583

585584
return 0;
@@ -682,9 +681,8 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
682681
* we are protected from page migration
683682
* changes ring_pages by ->ring_lock.
684683
*/
685-
ring = kmap_atomic(ctx->ring_pages[0]);
684+
ring = page_address(ctx->ring_pages[0]);
686685
ring->id = ctx->id;
687-
kunmap_atomic(ring);
688686
return 0;
689687
}
690688

@@ -1025,9 +1023,8 @@ static void user_refill_reqs_available(struct kioctx *ctx)
10251023
* against ctx->completed_events below will make sure we do the
10261024
* safe/right thing.
10271025
*/
1028-
ring = kmap_atomic(ctx->ring_pages[0]);
1026+
ring = page_address(ctx->ring_pages[0]);
10291027
head = ring->head;
1030-
kunmap_atomic(ring);
10311028

10321029
refill_reqs_available(ctx, head, ctx->tail);
10331030
}
@@ -1133,12 +1130,11 @@ static void aio_complete(struct aio_kiocb *iocb)
11331130
if (++tail >= ctx->nr_events)
11341131
tail = 0;
11351132

1136-
ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1133+
ev_page = page_address(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
11371134
event = ev_page + pos % AIO_EVENTS_PER_PAGE;
11381135

11391136
*event = iocb->ki_res;
11401137

1141-
kunmap_atomic(ev_page);
11421138
flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
11431139

11441140
pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
@@ -1152,10 +1148,9 @@ static void aio_complete(struct aio_kiocb *iocb)
11521148

11531149
ctx->tail = tail;
11541150

1155-
ring = kmap_atomic(ctx->ring_pages[0]);
1151+
ring = page_address(ctx->ring_pages[0]);
11561152
head = ring->head;
11571153
ring->tail = tail;
1158-
kunmap_atomic(ring);
11591154
flush_dcache_page(ctx->ring_pages[0]);
11601155

11611156
ctx->completed_events++;
@@ -1215,10 +1210,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
12151210
mutex_lock(&ctx->ring_lock);
12161211

12171212
/* Access to ->ring_pages here is protected by ctx->ring_lock. */
1218-
ring = kmap_atomic(ctx->ring_pages[0]);
1213+
ring = page_address(ctx->ring_pages[0]);
12191214
head = ring->head;
12201215
tail = ring->tail;
1221-
kunmap_atomic(ring);
12221216

12231217
/*
12241218
* Ensure that once we've read the current tail pointer, that
@@ -1250,10 +1244,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
12501244
avail = min(avail, nr - ret);
12511245
avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
12521246

1253-
ev = kmap(page);
1247+
ev = page_address(page);
12541248
copy_ret = copy_to_user(event + ret, ev + pos,
12551249
sizeof(*ev) * avail);
1256-
kunmap(page);
12571250

12581251
if (unlikely(copy_ret)) {
12591252
ret = -EFAULT;
@@ -1265,9 +1258,8 @@ static long aio_read_events_ring(struct kioctx *ctx,
12651258
head %= ctx->nr_events;
12661259
}
12671260

1268-
ring = kmap_atomic(ctx->ring_pages[0]);
1261+
ring = page_address(ctx->ring_pages[0]);
12691262
ring->head = head;
1270-
kunmap_atomic(ring);
12711263
flush_dcache_page(ctx->ring_pages[0]);
12721264

12731265
pr_debug("%li h%u t%u\n", ret, head, tail);

0 commit comments

Comments
 (0)