@@ -23,6 +23,9 @@ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
23
23
static bool tid_rb_invalidate (struct mmu_interval_notifier * mni ,
24
24
const struct mmu_notifier_range * range ,
25
25
unsigned long cur_seq );
26
+ static bool tid_cover_invalidate (struct mmu_interval_notifier * mni ,
27
+ const struct mmu_notifier_range * range ,
28
+ unsigned long cur_seq );
26
29
static int program_rcvarray (struct hfi1_filedata * fd , struct tid_user_buf * ,
27
30
struct tid_group * grp ,
28
31
unsigned int start , u16 count ,
@@ -36,6 +39,9 @@ static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
36
39
static const struct mmu_interval_notifier_ops tid_mn_ops = {
37
40
.invalidate = tid_rb_invalidate ,
38
41
};
42
+ static const struct mmu_interval_notifier_ops tid_cover_ops = {
43
+ .invalidate = tid_cover_invalidate ,
44
+ };
39
45
40
46
/*
41
47
* Initialize context and file private data needed for Expected
@@ -254,6 +260,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
254
260
tididx = 0 , mapped , mapped_pages = 0 ;
255
261
u32 * tidlist = NULL ;
256
262
struct tid_user_buf * tidbuf ;
263
+ unsigned long mmu_seq = 0 ;
257
264
258
265
if (!PAGE_ALIGNED (tinfo -> vaddr ))
259
266
return - EINVAL ;
@@ -264,6 +271,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
264
271
if (!tidbuf )
265
272
return - ENOMEM ;
266
273
274
+ mutex_init (& tidbuf -> cover_mutex );
267
275
tidbuf -> vaddr = tinfo -> vaddr ;
268
276
tidbuf -> length = tinfo -> length ;
269
277
tidbuf -> psets = kcalloc (uctxt -> expected_count , sizeof (* tidbuf -> psets ),
@@ -273,6 +281,16 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
273
281
goto fail_release_mem ;
274
282
}
275
283
284
+ if (fd -> use_mn ) {
285
+ ret = mmu_interval_notifier_insert (
286
+ & tidbuf -> notifier , current -> mm ,
287
+ tidbuf -> vaddr , tidbuf -> npages * PAGE_SIZE ,
288
+ & tid_cover_ops );
289
+ if (ret )
290
+ goto fail_release_mem ;
291
+ mmu_seq = mmu_interval_read_begin (& tidbuf -> notifier );
292
+ }
293
+
276
294
pinned = pin_rcv_pages (fd , tidbuf );
277
295
if (pinned <= 0 ) {
278
296
ret = (pinned < 0 ) ? pinned : - ENOSPC ;
@@ -415,6 +433,20 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
415
433
unpin_rcv_pages (fd , tidbuf , NULL , mapped_pages , pinned - mapped_pages ,
416
434
false);
417
435
436
+ if (fd -> use_mn ) {
437
+ /* check for an invalidate during setup */
438
+ bool fail = false;
439
+
440
+ mutex_lock (& tidbuf -> cover_mutex );
441
+ fail = mmu_interval_read_retry (& tidbuf -> notifier , mmu_seq );
442
+ mutex_unlock (& tidbuf -> cover_mutex );
443
+
444
+ if (fail ) {
445
+ ret = - EBUSY ;
446
+ goto fail_unprogram ;
447
+ }
448
+ }
449
+
418
450
tinfo -> tidcnt = tididx ;
419
451
tinfo -> length = mapped_pages * PAGE_SIZE ;
420
452
@@ -424,6 +456,8 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
424
456
goto fail_unprogram ;
425
457
}
426
458
459
+ if (fd -> use_mn )
460
+ mmu_interval_notifier_remove (& tidbuf -> notifier );
427
461
kfree (tidbuf -> pages );
428
462
kfree (tidbuf -> psets );
429
463
kfree (tidbuf );
@@ -442,6 +476,8 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
442
476
fd -> tid_used -= pageset_count ;
443
477
spin_unlock (& fd -> tid_lock );
444
478
fail_unpin :
479
+ if (fd -> use_mn )
480
+ mmu_interval_notifier_remove (& tidbuf -> notifier );
445
481
if (pinned > 0 )
446
482
unpin_rcv_pages (fd , tidbuf , NULL , 0 , pinned , false);
447
483
fail_release_mem :
@@ -740,11 +776,6 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
740
776
& tid_mn_ops );
741
777
if (ret )
742
778
goto out_unmap ;
743
- /*
744
- * FIXME: This is in the wrong order, the notifier should be
745
- * established before the pages are pinned by pin_rcv_pages.
746
- */
747
- mmu_interval_read_begin (& node -> notifier );
748
779
}
749
780
fd -> entry_to_rb [node -> rcventry - uctxt -> expected_base ] = node ;
750
781
@@ -919,6 +950,23 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
919
950
return true;
920
951
}
921
952
953
+ static bool tid_cover_invalidate (struct mmu_interval_notifier * mni ,
954
+ const struct mmu_notifier_range * range ,
955
+ unsigned long cur_seq )
956
+ {
957
+ struct tid_user_buf * tidbuf =
958
+ container_of (mni , struct tid_user_buf , notifier );
959
+
960
+ /* take action only if unmapping */
961
+ if (range -> event == MMU_NOTIFY_UNMAP ) {
962
+ mutex_lock (& tidbuf -> cover_mutex );
963
+ mmu_interval_set_seq (mni , cur_seq );
964
+ mutex_unlock (& tidbuf -> cover_mutex );
965
+ }
966
+
967
+ return true;
968
+ }
969
+
922
970
static void cacheless_tid_rb_remove (struct hfi1_filedata * fdata ,
923
971
struct tid_rb_node * tnode )
924
972
{
0 commit comments