41
41
#include "blk-mq-sched.h"
42
42
#include "blk-rq-qos.h"
43
43
44
- static DEFINE_PER_CPU (struct list_head , blk_cpu_done ) ;
44
+ static DEFINE_PER_CPU (struct llist_head , blk_cpu_done ) ;
45
45
46
46
static void blk_mq_poll_stats_start (struct request_queue * q );
47
47
static void blk_mq_poll_stats_fn (struct blk_stat_callback * cb );
@@ -567,80 +567,29 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)
567
567
}
568
568
EXPORT_SYMBOL (blk_mq_end_request );
569
569
570
- /*
571
- * Softirq action handler - move entries to local list and loop over them
572
- * while passing them to the queue registered handler.
573
- */
574
- static __latent_entropy void blk_done_softirq (struct softirq_action * h )
570
+ static void blk_complete_reqs (struct llist_head * list )
575
571
{
576
- struct list_head * cpu_list , local_list ;
577
-
578
- local_irq_disable ();
579
- cpu_list = this_cpu_ptr (& blk_cpu_done );
580
- list_replace_init (cpu_list , & local_list );
581
- local_irq_enable ();
582
-
583
- while (!list_empty (& local_list )) {
584
- struct request * rq ;
572
+ struct llist_node * entry = llist_reverse_order (llist_del_all (list ));
573
+ struct request * rq , * next ;
585
574
586
- rq = list_entry (local_list .next , struct request , ipi_list );
587
- list_del_init (& rq -> ipi_list );
575
+ llist_for_each_entry_safe (rq , next , entry , ipi_list )
588
576
rq -> q -> mq_ops -> complete (rq );
589
- }
590
577
}
591
578
592
- static void blk_mq_trigger_softirq (struct request * rq )
579
+ static __latent_entropy void blk_done_softirq (struct softirq_action * h )
593
580
{
594
- struct list_head * list ;
595
- unsigned long flags ;
596
-
597
- local_irq_save (flags );
598
- list = this_cpu_ptr (& blk_cpu_done );
599
- list_add_tail (& rq -> ipi_list , list );
600
-
601
- /*
602
- * If the list only contains our just added request, signal a raise of
603
- * the softirq. If there are already entries there, someone already
604
- * raised the irq but it hasn't run yet.
605
- */
606
- if (list -> next == & rq -> ipi_list )
607
- raise_softirq_irqoff (BLOCK_SOFTIRQ );
608
- local_irq_restore (flags );
581
+ blk_complete_reqs (this_cpu_ptr (& blk_cpu_done ));
609
582
}
610
583
611
584
static int blk_softirq_cpu_dead (unsigned int cpu )
612
585
{
613
- /*
614
- * If a CPU goes away, splice its entries to the current CPU
615
- * and trigger a run of the softirq
616
- */
617
- local_irq_disable ();
618
- list_splice_init (& per_cpu (blk_cpu_done , cpu ),
619
- this_cpu_ptr (& blk_cpu_done ));
620
- raise_softirq_irqoff (BLOCK_SOFTIRQ );
621
- local_irq_enable ();
622
-
586
+ blk_complete_reqs (& per_cpu (blk_cpu_done , cpu ));
623
587
return 0 ;
624
588
}
625
589
626
-
627
590
static void __blk_mq_complete_request_remote (void * data )
628
591
{
629
- struct request * rq = data ;
630
-
631
- /*
632
- * For most of single queue controllers, there is only one irq vector
633
- * for handling I/O completion, and the only irq's affinity is set
634
- * to all possible CPUs. On most of ARCHs, this affinity means the irq
635
- * is handled on one specific CPU.
636
- *
637
- * So complete I/O requests in softirq context in case of single queue
638
- * devices to avoid degrading I/O performance due to irqsoff latency.
639
- */
640
- if (rq -> q -> nr_hw_queues == 1 )
641
- blk_mq_trigger_softirq (rq );
642
- else
643
- rq -> q -> mq_ops -> complete (rq );
592
+ __raise_softirq_irqoff (BLOCK_SOFTIRQ );
644
593
}
645
594
646
595
static inline bool blk_mq_complete_need_ipi (struct request * rq )
@@ -669,6 +618,30 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
669
618
return cpu_online (rq -> mq_ctx -> cpu );
670
619
}
671
620
621
+ static void blk_mq_complete_send_ipi (struct request * rq )
622
+ {
623
+ struct llist_head * list ;
624
+ unsigned int cpu ;
625
+
626
+ cpu = rq -> mq_ctx -> cpu ;
627
+ list = & per_cpu (blk_cpu_done , cpu );
628
+ if (llist_add (& rq -> ipi_list , list )) {
629
+ INIT_CSD (& rq -> csd , __blk_mq_complete_request_remote , rq );
630
+ smp_call_function_single_async (cpu , & rq -> csd );
631
+ }
632
+ }
633
+
634
+ static void blk_mq_raise_softirq (struct request * rq )
635
+ {
636
+ struct llist_head * list ;
637
+
638
+ preempt_disable ();
639
+ list = this_cpu_ptr (& blk_cpu_done );
640
+ if (llist_add (& rq -> ipi_list , list ))
641
+ raise_softirq (BLOCK_SOFTIRQ );
642
+ preempt_enable ();
643
+ }
644
+
672
645
bool blk_mq_complete_request_remote (struct request * rq )
673
646
{
674
647
WRITE_ONCE (rq -> state , MQ_RQ_COMPLETE );
@@ -681,15 +654,15 @@ bool blk_mq_complete_request_remote(struct request *rq)
681
654
return false;
682
655
683
656
if (blk_mq_complete_need_ipi (rq )) {
684
- INIT_CSD (& rq -> csd , __blk_mq_complete_request_remote , rq );
685
- smp_call_function_single_async (rq -> mq_ctx -> cpu , & rq -> csd );
686
- } else {
687
- if (rq -> q -> nr_hw_queues > 1 )
688
- return false;
689
- blk_mq_trigger_softirq (rq );
657
+ blk_mq_complete_send_ipi (rq );
658
+ return true;
690
659
}
691
660
692
- return true;
661
+ if (rq -> q -> nr_hw_queues == 1 ) {
662
+ blk_mq_raise_softirq (rq );
663
+ return true;
664
+ }
665
+ return false;
693
666
}
694
667
EXPORT_SYMBOL_GPL (blk_mq_complete_request_remote );
695
668
@@ -3957,7 +3930,7 @@ static int __init blk_mq_init(void)
3957
3930
int i ;
3958
3931
3959
3932
for_each_possible_cpu (i )
3960
- INIT_LIST_HEAD (& per_cpu (blk_cpu_done , i ));
3933
+ init_llist_head (& per_cpu (blk_cpu_done , i ));
3961
3934
open_softirq (BLOCK_SOFTIRQ , blk_done_softirq );
3962
3935
3963
3936
cpuhp_setup_state_nocalls (CPUHP_BLOCK_SOFTIRQ_DEAD ,
0 commit comments