17
17
#include <errno.h>
18
18
#include <ksched.h>
19
19
#include <zephyr/sys/printk.h>
20
+ #include <zephyr/logging/log.h>
21
+
22
+ LOG_MODULE_DECLARE (os , CONFIG_KERNEL_LOG_LEVEL );
20
23
21
24
static inline void flag_clear (uint32_t * flagp ,
22
25
uint32_t bit )
@@ -599,6 +602,50 @@ bool k_work_cancel_sync(struct k_work *work,
599
602
return pending ;
600
603
}
601
604
605
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
606
+ static void work_timeout_handler (struct _timeout * record )
607
+ {
608
+ struct k_work_q * queue = CONTAINER_OF (record , struct k_work_q , work_timeout_record );
609
+ const char * name ;
610
+ struct k_work * work ;
611
+ k_work_handler_t handler ;
612
+
613
+ name = k_thread_name_get (queue -> thread_id );
614
+
615
+ K_SPINLOCK (& lock ) {
616
+ work = queue -> work ;
617
+ handler = work -> handler ;
618
+ }
619
+
620
+ if (name != NULL ) {
621
+ LOG_ERR ("queue %s blocked by work %p with handler %p" , name , work , handler );
622
+ } else {
623
+ LOG_ERR ("queue %p blocked by work %p with handler %p" , queue , work , handler );
624
+ }
625
+
626
+ k_thread_abort (queue -> thread_id );
627
+ }
628
+
629
+ static void work_timeout_start_locked (struct k_work_q * queue , struct k_work * work )
630
+ {
631
+ if (K_TIMEOUT_EQ (queue -> work_timeout , K_FOREVER )) {
632
+ return ;
633
+ }
634
+
635
+ queue -> work = work ;
636
+ z_add_timeout (& queue -> work_timeout_record , work_timeout_handler , queue -> work_timeout );
637
+ }
638
+
639
+ static void work_timeout_stop_locked (struct k_work_q * queue )
640
+ {
641
+ if (K_TIMEOUT_EQ (queue -> work_timeout , K_FOREVER )) {
642
+ return ;
643
+ }
644
+
645
+ z_abort_timeout (& queue -> work_timeout_record );
646
+ }
647
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
648
+
602
649
/* Loop executed by a work queue thread.
603
650
*
604
651
* @param workq_ptr pointer to the work queue structure
@@ -678,6 +725,10 @@ static void work_queue_main(void *workq_ptr, void *p2, void *p3)
678
725
continue ;
679
726
}
680
727
728
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
729
+ work_timeout_start_locked (queue , work );
730
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
731
+
681
732
k_spin_unlock (& lock , key );
682
733
683
734
__ASSERT_NO_MSG (handler != NULL );
@@ -690,6 +741,10 @@ static void work_queue_main(void *workq_ptr, void *p2, void *p3)
690
741
*/
691
742
key = k_spin_lock (& lock );
692
743
744
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
745
+ work_timeout_stop_locked (queue );
746
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
747
+
693
748
flag_clear (& work -> flags , K_WORK_RUNNING_BIT );
694
749
if (flag_test (& work -> flags , K_WORK_FLUSHING_BIT )) {
695
750
finalize_flush_locked (work );
@@ -736,6 +791,14 @@ void k_work_queue_run(struct k_work_q *queue, const struct k_work_queue_config *
736
791
k_thread_name_set (_current , cfg -> name );
737
792
}
738
793
794
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
795
+ if ((cfg != NULL ) && (cfg -> work_timeout_ms )) {
796
+ queue -> work_timeout = K_MSEC (cfg -> work_timeout_ms );
797
+ } else {
798
+ queue -> work_timeout = K_FOREVER ;
799
+ }
800
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
801
+
739
802
sys_slist_init (& queue -> pending );
740
803
z_waitq_init (& queue -> notifyq );
741
804
z_waitq_init (& queue -> drainq );
@@ -784,6 +847,14 @@ void k_work_queue_start(struct k_work_q *queue,
784
847
queue -> thread .base .user_options |= K_ESSENTIAL ;
785
848
}
786
849
850
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
851
+ if ((cfg != NULL ) && (cfg -> work_timeout_ms )) {
852
+ queue -> work_timeout = K_MSEC (cfg -> work_timeout_ms );
853
+ } else {
854
+ queue -> work_timeout = K_FOREVER ;
855
+ }
856
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
857
+
787
858
k_thread_start (& queue -> thread );
788
859
queue -> thread_id = & queue -> thread ;
789
860
0 commit comments