@@ -100,6 +100,7 @@ struct dmirror {
100
100
struct dmirror_chunk {
101
101
struct dev_pagemap pagemap ;
102
102
struct dmirror_device * mdevice ;
103
+ bool remove ;
103
104
};
104
105
105
106
/*
@@ -192,11 +193,15 @@ static int dmirror_fops_release(struct inode *inode, struct file *filp)
192
193
return 0 ;
193
194
}
194
195
196
+ static struct dmirror_chunk * dmirror_page_to_chunk (struct page * page )
197
+ {
198
+ return container_of (page -> pgmap , struct dmirror_chunk , pagemap );
199
+ }
200
+
195
201
static struct dmirror_device * dmirror_page_to_device (struct page * page )
196
202
197
203
{
198
- return container_of (page -> pgmap , struct dmirror_chunk ,
199
- pagemap )-> mdevice ;
204
+ return dmirror_page_to_chunk (page )-> mdevice ;
200
205
}
201
206
202
207
static int dmirror_do_fault (struct dmirror * dmirror , struct hmm_range * range )
@@ -1218,6 +1223,85 @@ static int dmirror_snapshot(struct dmirror *dmirror,
1218
1223
return ret ;
1219
1224
}
1220
1225
1226
+ static void dmirror_device_evict_chunk (struct dmirror_chunk * chunk )
1227
+ {
1228
+ unsigned long start_pfn = chunk -> pagemap .range .start >> PAGE_SHIFT ;
1229
+ unsigned long end_pfn = chunk -> pagemap .range .end >> PAGE_SHIFT ;
1230
+ unsigned long npages = end_pfn - start_pfn + 1 ;
1231
+ unsigned long i ;
1232
+ unsigned long * src_pfns ;
1233
+ unsigned long * dst_pfns ;
1234
+
1235
+ src_pfns = kcalloc (npages , sizeof (* src_pfns ), GFP_KERNEL );
1236
+ dst_pfns = kcalloc (npages , sizeof (* dst_pfns ), GFP_KERNEL );
1237
+
1238
+ migrate_device_range (src_pfns , start_pfn , npages );
1239
+ for (i = 0 ; i < npages ; i ++ ) {
1240
+ struct page * dpage , * spage ;
1241
+
1242
+ spage = migrate_pfn_to_page (src_pfns [i ]);
1243
+ if (!spage || !(src_pfns [i ] & MIGRATE_PFN_MIGRATE ))
1244
+ continue ;
1245
+
1246
+ if (WARN_ON (!is_device_private_page (spage ) &&
1247
+ !is_device_coherent_page (spage )))
1248
+ continue ;
1249
+ spage = BACKING_PAGE (spage );
1250
+ dpage = alloc_page (GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL );
1251
+ lock_page (dpage );
1252
+ copy_highpage (dpage , spage );
1253
+ dst_pfns [i ] = migrate_pfn (page_to_pfn (dpage ));
1254
+ if (src_pfns [i ] & MIGRATE_PFN_WRITE )
1255
+ dst_pfns [i ] |= MIGRATE_PFN_WRITE ;
1256
+ }
1257
+ migrate_device_pages (src_pfns , dst_pfns , npages );
1258
+ migrate_device_finalize (src_pfns , dst_pfns , npages );
1259
+ kfree (src_pfns );
1260
+ kfree (dst_pfns );
1261
+ }
1262
+
1263
+ /* Removes free pages from the free list so they can't be re-allocated */
1264
+ static void dmirror_remove_free_pages (struct dmirror_chunk * devmem )
1265
+ {
1266
+ struct dmirror_device * mdevice = devmem -> mdevice ;
1267
+ struct page * page ;
1268
+
1269
+ for (page = mdevice -> free_pages ; page ; page = page -> zone_device_data )
1270
+ if (dmirror_page_to_chunk (page ) == devmem )
1271
+ mdevice -> free_pages = page -> zone_device_data ;
1272
+ }
1273
+
1274
+ static void dmirror_device_remove_chunks (struct dmirror_device * mdevice )
1275
+ {
1276
+ unsigned int i ;
1277
+
1278
+ mutex_lock (& mdevice -> devmem_lock );
1279
+ if (mdevice -> devmem_chunks ) {
1280
+ for (i = 0 ; i < mdevice -> devmem_count ; i ++ ) {
1281
+ struct dmirror_chunk * devmem =
1282
+ mdevice -> devmem_chunks [i ];
1283
+
1284
+ spin_lock (& mdevice -> lock );
1285
+ devmem -> remove = true;
1286
+ dmirror_remove_free_pages (devmem );
1287
+ spin_unlock (& mdevice -> lock );
1288
+
1289
+ dmirror_device_evict_chunk (devmem );
1290
+ memunmap_pages (& devmem -> pagemap );
1291
+ if (devmem -> pagemap .type == MEMORY_DEVICE_PRIVATE )
1292
+ release_mem_region (devmem -> pagemap .range .start ,
1293
+ range_len (& devmem -> pagemap .range ));
1294
+ kfree (devmem );
1295
+ }
1296
+ mdevice -> devmem_count = 0 ;
1297
+ mdevice -> devmem_capacity = 0 ;
1298
+ mdevice -> free_pages = NULL ;
1299
+ kfree (mdevice -> devmem_chunks );
1300
+ mdevice -> devmem_chunks = NULL ;
1301
+ }
1302
+ mutex_unlock (& mdevice -> devmem_lock );
1303
+ }
1304
+
1221
1305
static long dmirror_fops_unlocked_ioctl (struct file * filp ,
1222
1306
unsigned int command ,
1223
1307
unsigned long arg )
@@ -1272,6 +1356,11 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp,
1272
1356
ret = dmirror_snapshot (dmirror , & cmd );
1273
1357
break ;
1274
1358
1359
+ case HMM_DMIRROR_RELEASE :
1360
+ dmirror_device_remove_chunks (dmirror -> mdevice );
1361
+ ret = 0 ;
1362
+ break ;
1363
+
1275
1364
default :
1276
1365
return - EINVAL ;
1277
1366
}
@@ -1326,9 +1415,13 @@ static void dmirror_devmem_free(struct page *page)
1326
1415
1327
1416
mdevice = dmirror_page_to_device (page );
1328
1417
spin_lock (& mdevice -> lock );
1329
- mdevice -> cfree ++ ;
1330
- page -> zone_device_data = mdevice -> free_pages ;
1331
- mdevice -> free_pages = page ;
1418
+
1419
+ /* Return page to our allocator if not freeing the chunk */
1420
+ if (!dmirror_page_to_chunk (page )-> remove ) {
1421
+ mdevice -> cfree ++ ;
1422
+ page -> zone_device_data = mdevice -> free_pages ;
1423
+ mdevice -> free_pages = page ;
1424
+ }
1332
1425
spin_unlock (& mdevice -> lock );
1333
1426
}
1334
1427
@@ -1408,22 +1501,7 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id)
1408
1501
1409
1502
static void dmirror_device_remove (struct dmirror_device * mdevice )
1410
1503
{
1411
- unsigned int i ;
1412
-
1413
- if (mdevice -> devmem_chunks ) {
1414
- for (i = 0 ; i < mdevice -> devmem_count ; i ++ ) {
1415
- struct dmirror_chunk * devmem =
1416
- mdevice -> devmem_chunks [i ];
1417
-
1418
- memunmap_pages (& devmem -> pagemap );
1419
- if (devmem -> pagemap .type == MEMORY_DEVICE_PRIVATE )
1420
- release_mem_region (devmem -> pagemap .range .start ,
1421
- range_len (& devmem -> pagemap .range ));
1422
- kfree (devmem );
1423
- }
1424
- kfree (mdevice -> devmem_chunks );
1425
- }
1426
-
1504
+ dmirror_device_remove_chunks (mdevice );
1427
1505
cdev_device_del (& mdevice -> cdevice , & mdevice -> device );
1428
1506
}
1429
1507
0 commit comments