@@ -115,13 +115,14 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
115
115
116
116
static unsigned int nvmet_max_nsid (struct nvmet_subsys * subsys )
117
117
{
118
- struct nvmet_ns * ns ;
118
+ unsigned long nsid = 0 ;
119
+ struct nvmet_ns * cur ;
120
+ unsigned long idx ;
119
121
120
- if ( list_empty ( & subsys -> namespaces ) )
121
- return 0 ;
122
+ xa_for_each ( & subsys -> namespaces , idx , cur )
123
+ nsid = cur -> nsid ;
122
124
123
- ns = list_last_entry (& subsys -> namespaces , struct nvmet_ns , dev_link );
124
- return ns -> nsid ;
125
+ return nsid ;
125
126
}
126
127
127
128
static u32 nvmet_async_event_result (struct nvmet_async_event * aen )
@@ -410,28 +411,13 @@ static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
410
411
cancel_delayed_work_sync (& ctrl -> ka_work );
411
412
}
412
413
413
- static struct nvmet_ns * __nvmet_find_namespace (struct nvmet_ctrl * ctrl ,
414
- __le32 nsid )
415
- {
416
- struct nvmet_ns * ns ;
417
-
418
- list_for_each_entry_rcu (ns , & ctrl -> subsys -> namespaces , dev_link ) {
419
- if (ns -> nsid == le32_to_cpu (nsid ))
420
- return ns ;
421
- }
422
-
423
- return NULL ;
424
- }
425
-
426
414
struct nvmet_ns * nvmet_find_namespace (struct nvmet_ctrl * ctrl , __le32 nsid )
427
415
{
428
416
struct nvmet_ns * ns ;
429
417
430
- rcu_read_lock ();
431
- ns = __nvmet_find_namespace (ctrl , nsid );
418
+ ns = xa_load (& ctrl -> subsys -> namespaces , le32_to_cpu (nsid ));
432
419
if (ns )
433
420
percpu_ref_get (& ns -> ref );
434
- rcu_read_unlock ();
435
421
436
422
return ns ;
437
423
}
@@ -586,24 +572,10 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
586
572
if (ns -> nsid > subsys -> max_nsid )
587
573
subsys -> max_nsid = ns -> nsid ;
588
574
589
- /*
590
- * The namespaces list needs to be sorted to simplify the implementation
591
- * of the Identify Namepace List subcommand.
592
- */
593
- if (list_empty (& subsys -> namespaces )) {
594
- list_add_tail_rcu (& ns -> dev_link , & subsys -> namespaces );
595
- } else {
596
- struct nvmet_ns * old ;
597
-
598
- list_for_each_entry_rcu (old , & subsys -> namespaces , dev_link ,
599
- lockdep_is_held (& subsys -> lock )) {
600
- BUG_ON (ns -> nsid == old -> nsid );
601
- if (ns -> nsid < old -> nsid )
602
- break ;
603
- }
575
+ ret = xa_insert (& subsys -> namespaces , ns -> nsid , ns , GFP_KERNEL );
576
+ if (ret )
577
+ goto out_restore_subsys_maxnsid ;
604
578
605
- list_add_tail_rcu (& ns -> dev_link , & old -> dev_link );
606
- }
607
579
subsys -> nr_namespaces ++ ;
608
580
609
581
nvmet_ns_changed (subsys , ns -> nsid );
@@ -612,6 +584,10 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
612
584
out_unlock :
613
585
mutex_unlock (& subsys -> lock );
614
586
return ret ;
587
+
588
+ out_restore_subsys_maxnsid :
589
+ subsys -> max_nsid = nvmet_max_nsid (subsys );
590
+ percpu_ref_exit (& ns -> ref );
615
591
out_dev_put :
616
592
list_for_each_entry (ctrl , & subsys -> ctrls , subsys_entry )
617
593
pci_dev_put (radix_tree_delete (& ctrl -> p2p_ns_map , ns -> nsid ));
@@ -630,7 +606,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
630
606
goto out_unlock ;
631
607
632
608
ns -> enabled = false;
633
- list_del_rcu (& ns -> dev_link );
609
+ xa_erase (& ns -> subsys -> namespaces , ns -> nsid );
634
610
if (ns -> nsid == subsys -> max_nsid )
635
611
subsys -> max_nsid = nvmet_max_nsid (subsys );
636
612
@@ -681,7 +657,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
681
657
if (!ns )
682
658
return NULL ;
683
659
684
- INIT_LIST_HEAD (& ns -> dev_link );
685
660
init_completion (& ns -> disable_done );
686
661
687
662
ns -> nsid = nsid ;
@@ -1263,14 +1238,14 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1263
1238
struct nvmet_req * req )
1264
1239
{
1265
1240
struct nvmet_ns * ns ;
1241
+ unsigned long idx ;
1266
1242
1267
1243
if (!req -> p2p_client )
1268
1244
return ;
1269
1245
1270
1246
ctrl -> p2p_client = get_device (req -> p2p_client );
1271
1247
1272
- list_for_each_entry_rcu (ns , & ctrl -> subsys -> namespaces , dev_link ,
1273
- lockdep_is_held (& ctrl -> subsys -> lock ))
1248
+ xa_for_each (& ctrl -> subsys -> namespaces , idx , ns )
1274
1249
nvmet_p2pmem_ns_add_p2p (ctrl , ns );
1275
1250
}
1276
1251
@@ -1523,7 +1498,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1523
1498
kref_init (& subsys -> ref );
1524
1499
1525
1500
mutex_init (& subsys -> lock );
1526
- INIT_LIST_HEAD (& subsys -> namespaces );
1501
+ xa_init (& subsys -> namespaces );
1527
1502
INIT_LIST_HEAD (& subsys -> ctrls );
1528
1503
INIT_LIST_HEAD (& subsys -> hosts );
1529
1504
@@ -1535,8 +1510,9 @@ static void nvmet_subsys_free(struct kref *ref)
1535
1510
struct nvmet_subsys * subsys =
1536
1511
container_of (ref , struct nvmet_subsys , ref );
1537
1512
1538
- WARN_ON_ONCE (!list_empty (& subsys -> namespaces ));
1513
+ WARN_ON_ONCE (!xa_empty (& subsys -> namespaces ));
1539
1514
1515
+ xa_destroy (& subsys -> namespaces );
1540
1516
kfree (subsys -> subsysnqn );
1541
1517
kfree_rcu (subsys -> model , rcuhead );
1542
1518
kfree (subsys );
0 commit comments