@@ -300,11 +300,6 @@ struct tape_block {
300300
301301#define SDEB_XA_NOT_IN_USE XA_MARK_1
302302
303- static struct kmem_cache * queued_cmd_cache ;
304-
305- #define TO_QUEUED_CMD (scmd ) ((void *)(scmd)->host_scribble)
306- #define ASSIGN_QUEUED_CMD (scmnd , qc ) { (scmnd)->host_scribble = (void *) qc; }
307-
308303/* Zone types (zbcr05 table 25) */
309304enum sdebug_z_type {
310305 ZBC_ZTYPE_CNV = 0x1 ,
@@ -460,17 +455,9 @@ struct sdebug_defer {
460455 enum sdeb_defer_type defer_t ;
461456};
462457
463-
464- struct sdebug_queued_cmd {
465- /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
466- * instance indicates this slot is in use.
467- */
468- struct sdebug_defer sd_dp ;
469- struct scsi_cmnd * scmd ;
470- };
471-
472458struct sdebug_scsi_cmd {
473459 spinlock_t lock ;
460+ struct sdebug_defer sd_dp ;
474461};
475462
476463static atomic_t sdebug_cmnd_count ; /* number of incoming commands */
@@ -636,8 +623,6 @@ static int sdebug_add_store(void);
636623static void sdebug_erase_store (int idx , struct sdeb_store_info * sip );
637624static void sdebug_erase_all_stores (bool apart_from_first );
638625
639- static void sdebug_free_queued_cmd (struct sdebug_queued_cmd * sqcp );
640-
641626/*
642627 * The following are overflow arrays for cdbs that "hit" the same index in
643628 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
@@ -6333,10 +6318,10 @@ static u32 get_tag(struct scsi_cmnd *cmnd)
63336318/* Queued (deferred) command completions converge here. */
63346319static void sdebug_q_cmd_complete (struct sdebug_defer * sd_dp )
63356320{
6336- struct sdebug_queued_cmd * sqcp = container_of (sd_dp , struct sdebug_queued_cmd , sd_dp );
6321+ struct sdebug_scsi_cmd * sdsc = container_of (sd_dp ,
6322+ typeof (* sdsc ), sd_dp );
6323+ struct scsi_cmnd * scp = (struct scsi_cmnd * )sdsc - 1 ;
63376324 unsigned long flags ;
6338- struct scsi_cmnd * scp = sqcp -> scmd ;
6339- struct sdebug_scsi_cmd * sdsc ;
63406325 bool aborted ;
63416326
63426327 if (sdebug_statistics ) {
@@ -6347,27 +6332,23 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
63476332
63486333 if (!scp ) {
63496334 pr_err ("scmd=NULL\n" );
6350- goto out ;
6335+ return ;
63516336 }
63526337
6353- sdsc = scsi_cmd_priv (scp );
63546338 spin_lock_irqsave (& sdsc -> lock , flags );
63556339 aborted = sd_dp -> aborted ;
63566340 if (unlikely (aborted ))
63576341 sd_dp -> aborted = false;
6358- ASSIGN_QUEUED_CMD (scp , NULL );
63596342
63606343 spin_unlock_irqrestore (& sdsc -> lock , flags );
63616344
63626345 if (aborted ) {
63636346 pr_info ("bypassing scsi_done() due to aborted cmd, kicking-off EH\n" );
63646347 blk_abort_request (scsi_cmd_to_rq (scp ));
6365- goto out ;
6348+ return ;
63666349 }
63676350
63686351 scsi_done (scp ); /* callback to mid level */
6369- out :
6370- sdebug_free_queued_cmd (sqcp );
63716352}
63726353
63736354/* When high resolution timer goes off this function is called. */
@@ -6674,10 +6655,15 @@ static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
66746655 sdp -> hostdata = NULL ;
66756656}
66766657
6677- /* Returns true if we require the queued memory to be freed by the caller. */
6678- static bool stop_qc_helper (struct sdebug_defer * sd_dp ,
6679- enum sdeb_defer_type defer_t )
6658+ /* Returns true if it is safe to complete @cmnd. */
6659+ static bool scsi_debug_stop_cmnd (struct scsi_cmnd * cmnd )
66806660{
6661+ struct sdebug_scsi_cmd * sdsc = scsi_cmd_priv (cmnd );
6662+ struct sdebug_defer * sd_dp = & sdsc -> sd_dp ;
6663+ enum sdeb_defer_type defer_t = READ_ONCE (sd_dp -> defer_t );
6664+
6665+ lockdep_assert_held (& sdsc -> lock );
6666+
66816667 if (defer_t == SDEB_DEFER_HRT ) {
66826668 int res = hrtimer_try_to_cancel (& sd_dp -> hrt );
66836669
@@ -6702,28 +6688,6 @@ static bool stop_qc_helper(struct sdebug_defer *sd_dp,
67026688 return false;
67036689}
67046690
6705-
6706- static bool scsi_debug_stop_cmnd (struct scsi_cmnd * cmnd )
6707- {
6708- enum sdeb_defer_type l_defer_t ;
6709- struct sdebug_defer * sd_dp ;
6710- struct sdebug_scsi_cmd * sdsc = scsi_cmd_priv (cmnd );
6711- struct sdebug_queued_cmd * sqcp = TO_QUEUED_CMD (cmnd );
6712-
6713- lockdep_assert_held (& sdsc -> lock );
6714-
6715- if (!sqcp )
6716- return false;
6717- sd_dp = & sqcp -> sd_dp ;
6718- l_defer_t = READ_ONCE (sd_dp -> defer_t );
6719- ASSIGN_QUEUED_CMD (cmnd , NULL );
6720-
6721- if (stop_qc_helper (sd_dp , l_defer_t ))
6722- sdebug_free_queued_cmd (sqcp );
6723-
6724- return true;
6725- }
6726-
67276691/*
67286692 * Called from scsi_debug_abort() only, which is for timed-out cmd.
67296693 */
@@ -7106,33 +7070,6 @@ static bool inject_on_this_cmd(void)
71067070
71077071#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
71087072
7109-
7110- void sdebug_free_queued_cmd (struct sdebug_queued_cmd * sqcp )
7111- {
7112- if (sqcp )
7113- kmem_cache_free (queued_cmd_cache , sqcp );
7114- }
7115-
7116- static struct sdebug_queued_cmd * sdebug_alloc_queued_cmd (struct scsi_cmnd * scmd )
7117- {
7118- struct sdebug_queued_cmd * sqcp ;
7119- struct sdebug_defer * sd_dp ;
7120-
7121- sqcp = kmem_cache_zalloc (queued_cmd_cache , GFP_ATOMIC );
7122- if (!sqcp )
7123- return NULL ;
7124-
7125- sd_dp = & sqcp -> sd_dp ;
7126-
7127- hrtimer_init (& sd_dp -> hrt , CLOCK_MONOTONIC , HRTIMER_MODE_REL_PINNED );
7128- sd_dp -> hrt .function = sdebug_q_cmd_hrt_complete ;
7129- INIT_WORK (& sd_dp -> ew .work , sdebug_q_cmd_wq_complete );
7130-
7131- sqcp -> scmd = scmd ;
7132-
7133- return sqcp ;
7134- }
7135-
71367073/* Complete the processing of the thread that queued a SCSI command to this
71377074 * driver. It either completes the command by calling cmnd_done() or
71387075 * schedules a hr timer or work queue then returns 0. Returns
@@ -7149,7 +7086,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
71497086 struct sdebug_scsi_cmd * sdsc = scsi_cmd_priv (cmnd );
71507087 unsigned long flags ;
71517088 u64 ns_from_boot = 0 ;
7152- struct sdebug_queued_cmd * sqcp ;
71537089 struct scsi_device * sdp ;
71547090 struct sdebug_defer * sd_dp ;
71557091
@@ -7181,12 +7117,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
71817117 }
71827118 }
71837119
7184- sqcp = sdebug_alloc_queued_cmd (cmnd );
7185- if (!sqcp ) {
7186- pr_err ("%s no alloc\n" , __func__ );
7187- return SCSI_MLQUEUE_HOST_BUSY ;
7188- }
7189- sd_dp = & sqcp -> sd_dp ;
7120+ sd_dp = & sdsc -> sd_dp ;
71907121
71917122 if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS ))
71927123 ns_from_boot = ktime_get_boottime_ns ();
@@ -7234,7 +7165,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
72347165
72357166 if (kt <= d ) { /* elapsed duration >= kt */
72367167 /* call scsi_done() from this thread */
7237- sdebug_free_queued_cmd (sqcp );
72387168 scsi_done (cmnd );
72397169 return 0 ;
72407170 }
@@ -7247,13 +7177,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
72477177 if (polled ) {
72487178 spin_lock_irqsave (& sdsc -> lock , flags );
72497179 sd_dp -> cmpl_ts = ktime_add (ns_to_ktime (ns_from_boot ), kt );
7250- ASSIGN_QUEUED_CMD (cmnd , sqcp );
72517180 WRITE_ONCE (sd_dp -> defer_t , SDEB_DEFER_POLL );
72527181 spin_unlock_irqrestore (& sdsc -> lock , flags );
72537182 } else {
72547183 /* schedule the invocation of scsi_done() for a later time */
72557184 spin_lock_irqsave (& sdsc -> lock , flags );
7256- ASSIGN_QUEUED_CMD (cmnd , sqcp );
72577185 WRITE_ONCE (sd_dp -> defer_t , SDEB_DEFER_HRT );
72587186 hrtimer_start (& sd_dp -> hrt , kt , HRTIMER_MODE_REL_PINNED );
72597187 /*
@@ -7277,13 +7205,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
72777205 sd_dp -> issuing_cpu = raw_smp_processor_id ();
72787206 if (polled ) {
72797207 spin_lock_irqsave (& sdsc -> lock , flags );
7280- ASSIGN_QUEUED_CMD (cmnd , sqcp );
72817208 sd_dp -> cmpl_ts = ns_to_ktime (ns_from_boot );
72827209 WRITE_ONCE (sd_dp -> defer_t , SDEB_DEFER_POLL );
72837210 spin_unlock_irqrestore (& sdsc -> lock , flags );
72847211 } else {
72857212 spin_lock_irqsave (& sdsc -> lock , flags );
7286- ASSIGN_QUEUED_CMD (cmnd , sqcp );
72877213 WRITE_ONCE (sd_dp -> defer_t , SDEB_DEFER_WQ );
72887214 schedule_work (& sd_dp -> ew .work );
72897215 spin_unlock_irqrestore (& sdsc -> lock , flags );
@@ -8650,12 +8576,6 @@ static int __init scsi_debug_init(void)
86508576 hosts_to_add = sdebug_add_host ;
86518577 sdebug_add_host = 0 ;
86528578
8653- queued_cmd_cache = KMEM_CACHE (sdebug_queued_cmd , SLAB_HWCACHE_ALIGN );
8654- if (!queued_cmd_cache ) {
8655- ret = - ENOMEM ;
8656- goto driver_unreg ;
8657- }
8658-
86598579 sdebug_debugfs_root = debugfs_create_dir ("scsi_debug" , NULL );
86608580 if (IS_ERR_OR_NULL (sdebug_debugfs_root ))
86618581 pr_info ("%s: failed to create initial debugfs directory\n" , __func__ );
@@ -8682,8 +8602,6 @@ static int __init scsi_debug_init(void)
86828602
86838603 return 0 ;
86848604
8685- driver_unreg :
8686- driver_unregister (& sdebug_driverfs_driver );
86878605bus_unreg :
86888606 bus_unregister (& pseudo_lld_bus );
86898607dev_unreg :
@@ -8699,7 +8617,6 @@ static void __exit scsi_debug_exit(void)
86998617
87008618 for (; k ; k -- )
87018619 sdebug_do_remove_host (true);
8702- kmem_cache_destroy (queued_cmd_cache );
87038620 driver_unregister (& sdebug_driverfs_driver );
87048621 bus_unregister (& pseudo_lld_bus );
87058622 root_device_unregister (pseudo_primary );
@@ -9083,7 +9000,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
90839000 struct sdebug_defer * sd_dp ;
90849001 u32 unique_tag = blk_mq_unique_tag (rq );
90859002 u16 hwq = blk_mq_unique_tag_to_hwq (unique_tag );
9086- struct sdebug_queued_cmd * sqcp ;
90879003 unsigned long flags ;
90889004 int queue_num = data -> queue_num ;
90899005 ktime_t time ;
@@ -9099,13 +9015,7 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
90999015 time = ktime_get_boottime ();
91009016
91019017 spin_lock_irqsave (& sdsc -> lock , flags );
9102- sqcp = TO_QUEUED_CMD (cmd );
9103- if (!sqcp ) {
9104- spin_unlock_irqrestore (& sdsc -> lock , flags );
9105- return true;
9106- }
9107-
9108- sd_dp = & sqcp -> sd_dp ;
9018+ sd_dp = & sdsc -> sd_dp ;
91099019 if (READ_ONCE (sd_dp -> defer_t ) != SDEB_DEFER_POLL ) {
91109020 spin_unlock_irqrestore (& sdsc -> lock , flags );
91119021 return true;
@@ -9115,8 +9025,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
91159025 spin_unlock_irqrestore (& sdsc -> lock , flags );
91169026 return true;
91179027 }
9118-
9119- ASSIGN_QUEUED_CMD (cmd , NULL );
91209028 spin_unlock_irqrestore (& sdsc -> lock , flags );
91219029
91229030 if (sdebug_statistics ) {
@@ -9125,8 +9033,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
91259033 atomic_inc (& sdebug_miss_cpus );
91269034 }
91279035
9128- sdebug_free_queued_cmd (sqcp );
9129-
91309036 scsi_done (cmd ); /* callback to mid level */
91319037 (* data -> num_entries )++ ;
91329038 return true;
@@ -9441,8 +9347,12 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
94419347static int sdebug_init_cmd_priv (struct Scsi_Host * shost , struct scsi_cmnd * cmd )
94429348{
94439349 struct sdebug_scsi_cmd * sdsc = scsi_cmd_priv (cmd );
9350+ struct sdebug_defer * sd_dp = & sdsc -> sd_dp ;
94449351
94459352 spin_lock_init (& sdsc -> lock );
9353+ hrtimer_init (& sd_dp -> hrt , CLOCK_MONOTONIC , HRTIMER_MODE_REL_PINNED );
9354+ sd_dp -> hrt .function = sdebug_q_cmd_hrt_complete ;
9355+ INIT_WORK (& sd_dp -> ew .work , sdebug_q_cmd_wq_complete );
94469356
94479357 return 0 ;
94489358}
0 commit comments