@@ -530,7 +530,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
530
530
for (i = 0 ; i < nr_pages ; i ++ ) {
531
531
struct page * page ;
532
532
page = find_or_create_page (file -> f_mapping ,
533
- i , GFP_HIGHUSER | __GFP_ZERO );
533
+ i , GFP_USER | __GFP_ZERO );
534
534
if (!page )
535
535
break ;
536
536
pr_debug ("pid(%d) page[%d]->count=%d\n" ,
@@ -571,15 +571,14 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
571
571
ctx -> user_id = ctx -> mmap_base ;
572
572
ctx -> nr_events = nr_events ; /* trusted copy */
573
573
574
- ring = kmap_atomic (ctx -> ring_pages [0 ]);
574
+ ring = page_address (ctx -> ring_pages [0 ]);
575
575
ring -> nr = nr_events ; /* user copy */
576
576
ring -> id = ~0U ;
577
577
ring -> head = ring -> tail = 0 ;
578
578
ring -> magic = AIO_RING_MAGIC ;
579
579
ring -> compat_features = AIO_RING_COMPAT_FEATURES ;
580
580
ring -> incompat_features = AIO_RING_INCOMPAT_FEATURES ;
581
581
ring -> header_length = sizeof (struct aio_ring );
582
- kunmap_atomic (ring );
583
582
flush_dcache_page (ctx -> ring_pages [0 ]);
584
583
585
584
return 0 ;
@@ -682,9 +681,8 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
682
681
* we are protected from page migration
683
682
* changes ring_pages by ->ring_lock.
684
683
*/
685
- ring = kmap_atomic (ctx -> ring_pages [0 ]);
684
+ ring = page_address (ctx -> ring_pages [0 ]);
686
685
ring -> id = ctx -> id ;
687
- kunmap_atomic (ring );
688
686
return 0 ;
689
687
}
690
688
@@ -1025,9 +1023,8 @@ static void user_refill_reqs_available(struct kioctx *ctx)
1025
1023
* against ctx->completed_events below will make sure we do the
1026
1024
* safe/right thing.
1027
1025
*/
1028
- ring = kmap_atomic (ctx -> ring_pages [0 ]);
1026
+ ring = page_address (ctx -> ring_pages [0 ]);
1029
1027
head = ring -> head ;
1030
- kunmap_atomic (ring );
1031
1028
1032
1029
refill_reqs_available (ctx , head , ctx -> tail );
1033
1030
}
@@ -1133,12 +1130,11 @@ static void aio_complete(struct aio_kiocb *iocb)
1133
1130
if (++ tail >= ctx -> nr_events )
1134
1131
tail = 0 ;
1135
1132
1136
- ev_page = kmap_atomic (ctx -> ring_pages [pos / AIO_EVENTS_PER_PAGE ]);
1133
+ ev_page = page_address (ctx -> ring_pages [pos / AIO_EVENTS_PER_PAGE ]);
1137
1134
event = ev_page + pos % AIO_EVENTS_PER_PAGE ;
1138
1135
1139
1136
* event = iocb -> ki_res ;
1140
1137
1141
- kunmap_atomic (ev_page );
1142
1138
flush_dcache_page (ctx -> ring_pages [pos / AIO_EVENTS_PER_PAGE ]);
1143
1139
1144
1140
pr_debug ("%p[%u]: %p: %p %Lx %Lx %Lx\n" , ctx , tail , iocb ,
@@ -1152,10 +1148,9 @@ static void aio_complete(struct aio_kiocb *iocb)
1152
1148
1153
1149
ctx -> tail = tail ;
1154
1150
1155
- ring = kmap_atomic (ctx -> ring_pages [0 ]);
1151
+ ring = page_address (ctx -> ring_pages [0 ]);
1156
1152
head = ring -> head ;
1157
1153
ring -> tail = tail ;
1158
- kunmap_atomic (ring );
1159
1154
flush_dcache_page (ctx -> ring_pages [0 ]);
1160
1155
1161
1156
ctx -> completed_events ++ ;
@@ -1215,10 +1210,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
1215
1210
mutex_lock (& ctx -> ring_lock );
1216
1211
1217
1212
/* Access to ->ring_pages here is protected by ctx->ring_lock. */
1218
- ring = kmap_atomic (ctx -> ring_pages [0 ]);
1213
+ ring = page_address (ctx -> ring_pages [0 ]);
1219
1214
head = ring -> head ;
1220
1215
tail = ring -> tail ;
1221
- kunmap_atomic (ring );
1222
1216
1223
1217
/*
1224
1218
* Ensure that once we've read the current tail pointer, that
@@ -1250,10 +1244,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
1250
1244
avail = min (avail , nr - ret );
1251
1245
avail = min_t (long , avail , AIO_EVENTS_PER_PAGE - pos );
1252
1246
1253
- ev = kmap (page );
1247
+ ev = page_address (page );
1254
1248
copy_ret = copy_to_user (event + ret , ev + pos ,
1255
1249
sizeof (* ev ) * avail );
1256
- kunmap (page );
1257
1250
1258
1251
if (unlikely (copy_ret )) {
1259
1252
ret = - EFAULT ;
@@ -1265,9 +1258,8 @@ static long aio_read_events_ring(struct kioctx *ctx,
1265
1258
head %= ctx -> nr_events ;
1266
1259
}
1267
1260
1268
- ring = kmap_atomic (ctx -> ring_pages [0 ]);
1261
+ ring = page_address (ctx -> ring_pages [0 ]);
1269
1262
ring -> head = head ;
1270
- kunmap_atomic (ring );
1271
1263
flush_dcache_page (ctx -> ring_pages [0 ]);
1272
1264
1273
1265
pr_debug ("%li h%u t%u\n" , ret , head , tail );
0 commit comments