@@ -88,6 +88,7 @@ static void nvmet_execute_get_supported_log_pages(struct nvmet_req *req)
88
88
logs -> lids [NVME_LOG_FW_SLOT ] = cpu_to_le32 (NVME_LIDS_LSUPP );
89
89
logs -> lids [NVME_LOG_CHANGED_NS ] = cpu_to_le32 (NVME_LIDS_LSUPP );
90
90
logs -> lids [NVME_LOG_CMD_EFFECTS ] = cpu_to_le32 (NVME_LIDS_LSUPP );
91
+ logs -> lids [NVME_LOG_ENDURANCE_GROUP ] = cpu_to_le32 (NVME_LIDS_LSUPP );
91
92
logs -> lids [NVME_LOG_ANA ] = cpu_to_le32 (NVME_LIDS_LSUPP );
92
93
logs -> lids [NVME_LOG_FEATURES ] = cpu_to_le32 (NVME_LIDS_LSUPP );
93
94
logs -> lids [NVME_LOG_RESERVATION ] = cpu_to_le32 (NVME_LIDS_LSUPP );
@@ -303,6 +304,49 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
303
304
return struct_size (desc , nsids , count );
304
305
}
305
306
307
+ static void nvmet_execute_get_log_page_endgrp (struct nvmet_req * req )
308
+ {
309
+ u64 host_reads , host_writes , data_units_read , data_units_written ;
310
+ struct nvme_endurance_group_log * log ;
311
+ u16 status ;
312
+
313
+ /*
314
+ * The target driver emulates each endurance group as its own
315
+ * namespace, reusing the nsid as the endurance group identifier.
316
+ */
317
+ req -> cmd -> common .nsid = cpu_to_le32 (le16_to_cpu (
318
+ req -> cmd -> get_log_page .lsi ));
319
+ status = nvmet_req_find_ns (req );
320
+ if (status )
321
+ goto out ;
322
+
323
+ log = kzalloc (sizeof (* log ), GFP_KERNEL );
324
+ if (!log ) {
325
+ status = NVME_SC_INTERNAL ;
326
+ goto out ;
327
+ }
328
+
329
+ if (!req -> ns -> bdev )
330
+ goto copy ;
331
+
332
+ host_reads = part_stat_read (req -> ns -> bdev , ios [READ ]);
333
+ data_units_read =
334
+ DIV_ROUND_UP (part_stat_read (req -> ns -> bdev , sectors [READ ]), 1000 );
335
+ host_writes = part_stat_read (req -> ns -> bdev , ios [WRITE ]);
336
+ data_units_written =
337
+ DIV_ROUND_UP (part_stat_read (req -> ns -> bdev , sectors [WRITE ]), 1000 );
338
+
339
+ put_unaligned_le64 (host_reads , & log -> hrc [0 ]);
340
+ put_unaligned_le64 (data_units_read , & log -> dur [0 ]);
341
+ put_unaligned_le64 (host_writes , & log -> hwc [0 ]);
342
+ put_unaligned_le64 (data_units_written , & log -> duw [0 ]);
343
+ copy :
344
+ status = nvmet_copy_to_sgl (req , 0 , log , sizeof (* log ));
345
+ kfree (log );
346
+ out :
347
+ nvmet_req_complete (req , status );
348
+ }
349
+
306
350
static void nvmet_execute_get_log_page_ana (struct nvmet_req * req )
307
351
{
308
352
struct nvme_ana_rsp_hdr hdr = { 0 , };
@@ -401,6 +445,8 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
401
445
return nvmet_execute_get_log_changed_ns (req );
402
446
case NVME_LOG_CMD_EFFECTS :
403
447
return nvmet_execute_get_log_cmd_effects_ns (req );
448
+ case NVME_LOG_ENDURANCE_GROUP :
449
+ return nvmet_execute_get_log_page_endgrp (req );
404
450
case NVME_LOG_ANA :
405
451
return nvmet_execute_get_log_page_ana (req );
406
452
case NVME_LOG_FEATURES :
@@ -535,6 +581,13 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
535
581
536
582
id -> msdbd = ctrl -> ops -> msdbd ;
537
583
584
+ /*
585
+ * Endurance group identifier is 16 bits, so we can't let namespaces
586
+ * overflow that since we reuse the nsid
587
+ */
588
+ BUILD_BUG_ON (NVMET_MAX_NAMESPACES > USHRT_MAX );
589
+ id -> endgidmax = cpu_to_le16 (NVMET_MAX_NAMESPACES );
590
+
538
591
id -> anacap = (1 << 0 ) | (1 << 1 ) | (1 << 2 ) | (1 << 3 ) | (1 << 4 );
539
592
id -> anatt = 10 ; /* random value */
540
593
id -> anagrpmax = cpu_to_le32 (NVMET_MAX_ANAGRPS );
@@ -628,6 +681,12 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
628
681
NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS |
629
682
NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF ;
630
683
684
+ /*
685
+ * Since we don't know any better, every namespace is its own endurance
686
+ * group.
687
+ */
688
+ id -> endgid = cpu_to_le16 (req -> ns -> nsid );
689
+
631
690
memcpy (& id -> nguid , & req -> ns -> nguid , sizeof (id -> nguid ));
632
691
633
692
id -> lbaf [0 ].ds = req -> ns -> blksize_shift ;
@@ -653,6 +712,39 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
653
712
nvmet_req_complete (req , status );
654
713
}
655
714
715
+ static void nvmet_execute_identify_endgrp_list (struct nvmet_req * req )
716
+ {
717
+ u16 min_endgid = le16_to_cpu (req -> cmd -> identify .cnssid );
718
+ static const int buf_size = NVME_IDENTIFY_DATA_SIZE ;
719
+ struct nvmet_ctrl * ctrl = req -> sq -> ctrl ;
720
+ struct nvmet_ns * ns ;
721
+ unsigned long idx ;
722
+ __le16 * list ;
723
+ u16 status ;
724
+ int i = 1 ;
725
+
726
+ list = kzalloc (buf_size , GFP_KERNEL );
727
+ if (!list ) {
728
+ status = NVME_SC_INTERNAL ;
729
+ goto out ;
730
+ }
731
+
732
+ xa_for_each (& ctrl -> subsys -> namespaces , idx , ns ) {
733
+ if (ns -> nsid <= min_endgid )
734
+ continue ;
735
+
736
+ list [i ++ ] = cpu_to_le16 (ns -> nsid );
737
+ if (i == buf_size / sizeof (__le16 ))
738
+ break ;
739
+ }
740
+
741
+ list [0 ] = cpu_to_le16 (i - 1 );
742
+ status = nvmet_copy_to_sgl (req , 0 , list , buf_size );
743
+ kfree (list );
744
+ out :
745
+ nvmet_req_complete (req , status );
746
+ }
747
+
656
748
static void nvmet_execute_identify_nslist (struct nvmet_req * req , bool match_css )
657
749
{
658
750
static const int buf_size = NVME_IDENTIFY_DATA_SIZE ;
@@ -825,6 +917,9 @@ static void nvmet_execute_identify(struct nvmet_req *req)
825
917
case NVME_ID_CNS_NS_ACTIVE_LIST_CS :
826
918
nvmet_execute_identify_nslist (req , true);
827
919
return ;
920
+ case NVME_ID_CNS_ENDGRP_LIST :
921
+ nvmet_execute_identify_endgrp_list (req );
922
+ return ;
828
923
}
829
924
830
925
pr_debug ("unhandled identify cns %d on qid %d\n" ,
0 commit comments