4141#include "blk-mq-sched.h"
4242#include "blk-rq-qos.h"
4343
44- static DEFINE_PER_CPU (struct list_head , blk_cpu_done ) ;
44+ static DEFINE_PER_CPU (struct llist_head , blk_cpu_done ) ;
4545
4646static void blk_mq_poll_stats_start (struct request_queue * q );
4747static void blk_mq_poll_stats_fn (struct blk_stat_callback * cb );
@@ -567,80 +567,29 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)
567567}
568568EXPORT_SYMBOL (blk_mq_end_request );
569569
570- /*
571- * Softirq action handler - move entries to local list and loop over them
572- * while passing them to the queue registered handler.
573- */
574- static __latent_entropy void blk_done_softirq (struct softirq_action * h )
570+ static void blk_complete_reqs (struct llist_head * list )
575571{
576- struct list_head * cpu_list , local_list ;
577-
578- local_irq_disable ();
579- cpu_list = this_cpu_ptr (& blk_cpu_done );
580- list_replace_init (cpu_list , & local_list );
581- local_irq_enable ();
582-
583- while (!list_empty (& local_list )) {
584- struct request * rq ;
572+ struct llist_node * entry = llist_reverse_order (llist_del_all (list ));
573+ struct request * rq , * next ;
585574
586- rq = list_entry (local_list .next , struct request , ipi_list );
587- list_del_init (& rq -> ipi_list );
575+ llist_for_each_entry_safe (rq , next , entry , ipi_list )
588576 rq -> q -> mq_ops -> complete (rq );
589- }
590577}
591578
592- static void blk_mq_trigger_softirq (struct request * rq )
579+ static __latent_entropy void blk_done_softirq (struct softirq_action * h )
593580{
594- struct list_head * list ;
595- unsigned long flags ;
596-
597- local_irq_save (flags );
598- list = this_cpu_ptr (& blk_cpu_done );
599- list_add_tail (& rq -> ipi_list , list );
600-
601- /*
602- * If the list only contains our just added request, signal a raise of
603- * the softirq. If there are already entries there, someone already
604- * raised the irq but it hasn't run yet.
605- */
606- if (list -> next == & rq -> ipi_list )
607- raise_softirq_irqoff (BLOCK_SOFTIRQ );
608- local_irq_restore (flags );
581+ blk_complete_reqs (this_cpu_ptr (& blk_cpu_done ));
609582}
610583
611584static int blk_softirq_cpu_dead (unsigned int cpu )
612585{
613- /*
614- * If a CPU goes away, splice its entries to the current CPU
615- * and trigger a run of the softirq
616- */
617- local_irq_disable ();
618- list_splice_init (& per_cpu (blk_cpu_done , cpu ),
619- this_cpu_ptr (& blk_cpu_done ));
620- raise_softirq_irqoff (BLOCK_SOFTIRQ );
621- local_irq_enable ();
622-
586+ blk_complete_reqs (& per_cpu (blk_cpu_done , cpu ));
623587 return 0 ;
624588}
625589
626-
627590static void __blk_mq_complete_request_remote (void * data )
628591{
629- struct request * rq = data ;
630-
631- /*
632- * For most of single queue controllers, there is only one irq vector
633- * for handling I/O completion, and the only irq's affinity is set
634- * to all possible CPUs. On most of ARCHs, this affinity means the irq
635- * is handled on one specific CPU.
636- *
637- * So complete I/O requests in softirq context in case of single queue
638- * devices to avoid degrading I/O performance due to irqsoff latency.
639- */
640- if (rq -> q -> nr_hw_queues == 1 )
641- blk_mq_trigger_softirq (rq );
642- else
643- rq -> q -> mq_ops -> complete (rq );
592+ __raise_softirq_irqoff (BLOCK_SOFTIRQ );
644593}
645594
646595static inline bool blk_mq_complete_need_ipi (struct request * rq )
@@ -669,6 +618,30 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
669618 return cpu_online (rq -> mq_ctx -> cpu );
670619}
671620
621+ static void blk_mq_complete_send_ipi (struct request * rq )
622+ {
623+ struct llist_head * list ;
624+ unsigned int cpu ;
625+
626+ cpu = rq -> mq_ctx -> cpu ;
627+ list = & per_cpu (blk_cpu_done , cpu );
628+ if (llist_add (& rq -> ipi_list , list )) {
629+ INIT_CSD (& rq -> csd , __blk_mq_complete_request_remote , rq );
630+ smp_call_function_single_async (cpu , & rq -> csd );
631+ }
632+ }
633+
634+ static void blk_mq_raise_softirq (struct request * rq )
635+ {
636+ struct llist_head * list ;
637+
638+ preempt_disable ();
639+ list = this_cpu_ptr (& blk_cpu_done );
640+ if (llist_add (& rq -> ipi_list , list ))
641+ raise_softirq (BLOCK_SOFTIRQ );
642+ preempt_enable ();
643+ }
644+
672645bool blk_mq_complete_request_remote (struct request * rq )
673646{
674647 WRITE_ONCE (rq -> state , MQ_RQ_COMPLETE );
@@ -681,15 +654,15 @@ bool blk_mq_complete_request_remote(struct request *rq)
681654 return false;
682655
683656 if (blk_mq_complete_need_ipi (rq )) {
684- INIT_CSD (& rq -> csd , __blk_mq_complete_request_remote , rq );
685- smp_call_function_single_async (rq -> mq_ctx -> cpu , & rq -> csd );
686- } else {
687- if (rq -> q -> nr_hw_queues > 1 )
688- return false;
689- blk_mq_trigger_softirq (rq );
657+ blk_mq_complete_send_ipi (rq );
658+ return true;
690659 }
691660
692- return true;
661+ if (rq -> q -> nr_hw_queues == 1 ) {
662+ blk_mq_raise_softirq (rq );
663+ return true;
664+ }
665+ return false;
693666}
694667EXPORT_SYMBOL_GPL (blk_mq_complete_request_remote );
695668
@@ -3957,7 +3930,7 @@ static int __init blk_mq_init(void)
39573930 int i ;
39583931
39593932 for_each_possible_cpu (i )
3960- INIT_LIST_HEAD (& per_cpu (blk_cpu_done , i ));
3933+ init_llist_head (& per_cpu (blk_cpu_done , i ));
39613934 open_softirq (BLOCK_SOFTIRQ , blk_done_softirq );
39623935
39633936 cpuhp_setup_state_nocalls (CPUHP_BLOCK_SOFTIRQ_DEAD ,
0 commit comments