@@ -1508,8 +1508,8 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1508
1508
if (rc )
1509
1509
goto error ;
1510
1510
1511
- device -> raid_bypass_cnt = alloc_percpu (u64 );
1512
- if (!device -> raid_bypass_cnt ) {
1511
+ device -> raid_io_stats = alloc_percpu (struct pqi_raid_io_stats );
1512
+ if (!device -> raid_io_stats ) {
1513
1513
rc = - ENOMEM ;
1514
1514
goto error ;
1515
1515
}
@@ -2105,9 +2105,9 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2105
2105
/* To prevent this from being freed later. */
2106
2106
new_device -> raid_map = NULL ;
2107
2107
}
2108
- if (new_device -> raid_bypass_enabled && existing_device -> raid_bypass_cnt == NULL ) {
2109
- existing_device -> raid_bypass_cnt = new_device -> raid_bypass_cnt ;
2110
- new_device -> raid_bypass_cnt = NULL ;
2108
+ if (new_device -> raid_bypass_enabled && existing_device -> raid_io_stats == NULL ) {
2109
+ existing_device -> raid_io_stats = new_device -> raid_io_stats ;
2110
+ new_device -> raid_io_stats = NULL ;
2111
2111
}
2112
2112
existing_device -> raid_bypass_configured = new_device -> raid_bypass_configured ;
2113
2113
existing_device -> raid_bypass_enabled = new_device -> raid_bypass_enabled ;
@@ -2131,7 +2131,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2131
2131
static inline void pqi_free_device (struct pqi_scsi_dev * device )
2132
2132
{
2133
2133
if (device ) {
2134
- free_percpu (device -> raid_bypass_cnt );
2134
+ free_percpu (device -> raid_io_stats );
2135
2135
kfree (device -> raid_map );
2136
2136
kfree (device );
2137
2137
}
@@ -5984,6 +5984,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5984
5984
pqi_stream_data -> next_lba = rmd .first_block +
5985
5985
rmd .block_cnt ;
5986
5986
pqi_stream_data -> last_accessed = jiffies ;
5987
+ per_cpu_ptr (device -> raid_io_stats , smp_processor_id ())-> write_stream_cnt ++ ;
5987
5988
return true;
5988
5989
}
5989
5990
@@ -6016,7 +6017,6 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
6016
6017
u16 hw_queue ;
6017
6018
struct pqi_queue_group * queue_group ;
6018
6019
bool raid_bypassed ;
6019
- u64 * raid_bypass_cnt ;
6020
6020
u8 lun ;
6021
6021
6022
6022
scmd -> host_scribble = PQI_NO_COMPLETION ;
@@ -6063,8 +6063,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
6063
6063
rc = pqi_raid_bypass_submit_scsi_cmd (ctrl_info , device , scmd , queue_group );
6064
6064
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY ) {
6065
6065
raid_bypassed = true;
6066
- raid_bypass_cnt = per_cpu_ptr (device -> raid_bypass_cnt , smp_processor_id ());
6067
- (* raid_bypass_cnt )++ ;
6066
+ per_cpu_ptr (device -> raid_io_stats , smp_processor_id ())-> raid_bypass_cnt ++ ;
6068
6067
}
6069
6068
}
6070
6069
if (!raid_bypassed )
@@ -7363,7 +7362,6 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7363
7362
unsigned long flags ;
7364
7363
u64 raid_bypass_cnt ;
7365
7364
int cpu ;
7366
- u64 * per_cpu_bypass_cnt_ptr ;
7367
7365
7368
7366
sdev = to_scsi_device (dev );
7369
7367
ctrl_info = shost_to_hba (sdev -> host );
@@ -7381,10 +7379,9 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7381
7379
7382
7380
raid_bypass_cnt = 0 ;
7383
7381
7384
- if (device -> raid_bypass_cnt ) {
7382
+ if (device -> raid_io_stats ) {
7385
7383
for_each_online_cpu (cpu ) {
7386
- per_cpu_bypass_cnt_ptr = per_cpu_ptr (device -> raid_bypass_cnt , cpu );
7387
- raid_bypass_cnt += * per_cpu_bypass_cnt_ptr ;
7384
+ raid_bypass_cnt += per_cpu_ptr (device -> raid_io_stats , cpu )-> raid_bypass_cnt ;
7388
7385
}
7389
7386
}
7390
7387
@@ -7472,6 +7469,43 @@ static ssize_t pqi_numa_node_show(struct device *dev,
7472
7469
return scnprintf (buffer , PAGE_SIZE , "%d\n" , ctrl_info -> numa_node );
7473
7470
}
7474
7471
7472
+ static ssize_t pqi_write_stream_cnt_show (struct device * dev ,
7473
+ struct device_attribute * attr , char * buffer )
7474
+ {
7475
+ struct pqi_ctrl_info * ctrl_info ;
7476
+ struct scsi_device * sdev ;
7477
+ struct pqi_scsi_dev * device ;
7478
+ unsigned long flags ;
7479
+ u64 write_stream_cnt ;
7480
+ int cpu ;
7481
+
7482
+ sdev = to_scsi_device (dev );
7483
+ ctrl_info = shost_to_hba (sdev -> host );
7484
+
7485
+ if (pqi_ctrl_offline (ctrl_info ))
7486
+ return - ENODEV ;
7487
+
7488
+ spin_lock_irqsave (& ctrl_info -> scsi_device_list_lock , flags );
7489
+
7490
+ device = sdev -> hostdata ;
7491
+ if (!device ) {
7492
+ spin_unlock_irqrestore (& ctrl_info -> scsi_device_list_lock , flags );
7493
+ return - ENODEV ;
7494
+ }
7495
+
7496
+ write_stream_cnt = 0 ;
7497
+
7498
+ if (device -> raid_io_stats ) {
7499
+ for_each_online_cpu (cpu ) {
7500
+ write_stream_cnt += per_cpu_ptr (device -> raid_io_stats , cpu )-> write_stream_cnt ;
7501
+ }
7502
+ }
7503
+
7504
+ spin_unlock_irqrestore (& ctrl_info -> scsi_device_list_lock , flags );
7505
+
7506
+ return scnprintf (buffer , PAGE_SIZE , "0x%llx\n" , write_stream_cnt );
7507
+ }
7508
+
7475
7509
static DEVICE_ATTR (lunid , 0444 , pqi_lunid_show , NULL) ;
7476
7510
static DEVICE_ATTR (unique_id , 0444 , pqi_unique_id_show , NULL) ;
7477
7511
static DEVICE_ATTR (path_info , 0444 , pqi_path_info_show , NULL) ;
@@ -7482,6 +7516,7 @@ static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7482
7516
static DEVICE_ATTR (sas_ncq_prio_enable , 0644 ,
7483
7517
pqi_sas_ncq_prio_enable_show , pqi_sas_ncq_prio_enable_store ) ;
7484
7518
static DEVICE_ATTR (numa_node , 0444 , pqi_numa_node_show , NULL) ;
7519
+ static DEVICE_ATTR (write_stream_cnt , 0444 , pqi_write_stream_cnt_show , NULL) ;
7485
7520
7486
7521
static struct attribute * pqi_sdev_attrs [] = {
7487
7522
& dev_attr_lunid .attr ,
@@ -7493,6 +7528,7 @@ static struct attribute *pqi_sdev_attrs[] = {
7493
7528
& dev_attr_raid_bypass_cnt .attr ,
7494
7529
& dev_attr_sas_ncq_prio_enable .attr ,
7495
7530
& dev_attr_numa_node .attr ,
7531
+ & dev_attr_write_stream_cnt .attr ,
7496
7532
NULL
7497
7533
};
7498
7534
0 commit comments