@@ -459,6 +459,390 @@ void sys_trace_k_condvar_broadcast_exit(struct k_condvar *condvar, int ret)
459459 );
460460}
461461
462+ /* Work Queue */
463+ void sys_trace_k_work_init (struct k_work * work )
464+ {
465+ ctf_top_work_init (
466+ (uint32_t )(uintptr_t )work
467+ );
468+ }
469+
470+ void sys_trace_k_work_submit_to_queue_enter (struct k_work_q * queue , struct k_work * work )
471+ {
472+ ctf_top_work_submit_to_queue_enter (
473+ (uint32_t )(uintptr_t )queue ,
474+ (uint32_t )(uintptr_t )work
475+ );
476+ }
477+
478+ void sys_trace_k_work_submit_to_queue_exit (struct k_work_q * queue , struct k_work * work , int ret )
479+ {
480+ ctf_top_work_submit_to_queue_exit (
481+ (uint32_t )(uintptr_t )queue ,
482+ (uint32_t )(uintptr_t )work ,
483+ (int32_t )ret
484+ );
485+ }
486+
487+ void sys_trace_k_work_submit_enter (struct k_work * work )
488+ {
489+ ctf_top_work_submit_enter (
490+ (uint32_t )(uintptr_t )work
491+ );
492+ }
493+
494+ void sys_trace_k_work_submit_exit (struct k_work * work , int ret )
495+ {
496+ ctf_top_work_submit_exit (
497+ (uint32_t )(uintptr_t )work ,
498+ (int32_t )ret
499+ );
500+ }
501+
502+ void sys_trace_k_work_flush_enter (struct k_work * work )
503+ {
504+ ctf_top_work_flush_enter (
505+ (uint32_t )(uintptr_t )work
506+ );
507+ }
508+
509+ void sys_trace_k_work_flush_blocking (struct k_work * work , k_timeout_t timeout )
510+ {
511+ ctf_top_work_flush_blocking (
512+ (uint32_t )(uintptr_t )work ,
513+ k_ticks_to_us_floor32 ((uint32_t )timeout .ticks )
514+ );
515+ }
516+
517+ void sys_trace_k_work_flush_exit (struct k_work * work , int ret )
518+ {
519+ ctf_top_work_flush_exit (
520+ (uint32_t )(uintptr_t )work ,
521+ (int32_t )ret
522+ );
523+ }
524+
525+ void sys_trace_k_work_cancel_enter (struct k_work * work )
526+ {
527+ ctf_top_work_cancel_enter (
528+ (uint32_t )(uintptr_t )work
529+ );
530+ }
531+
532+ void sys_trace_k_work_cancel_exit (struct k_work * work , int ret )
533+ {
534+ ctf_top_work_cancel_exit (
535+ (uint32_t )(uintptr_t )work ,
536+ (int32_t )ret
537+ );
538+ }
539+
540+ void sys_trace_k_work_cancel_sync_enter (struct k_work * work , struct k_work_sync * sync )
541+ {
542+ ctf_top_work_cancel_sync_enter (
543+ (uint32_t )(uintptr_t )work ,
544+ (uint32_t )(uintptr_t )sync
545+ );
546+ }
547+
548+ void sys_trace_k_work_cancel_sync_blocking (struct k_work * work , struct k_work_sync * sync )
549+ {
550+ ctf_top_work_cancel_sync_blocking (
551+ (uint32_t )(uintptr_t )work ,
552+ (uint32_t )(uintptr_t )sync
553+ );
554+ }
555+
556+ void sys_trace_k_work_cancel_sync_exit (struct k_work * work , struct k_work_sync * sync , int ret )
557+ {
558+ ctf_top_work_cancel_sync_exit (
559+ (uint32_t )(uintptr_t )work ,
560+ (uint32_t )(uintptr_t )sync ,
561+ (int32_t )ret
562+ );
563+ }
564+
565+ /* Work Queue Management */
566+ void sys_trace_k_work_queue_init (struct k_work_q * queue )
567+ {
568+ ctf_top_work_queue_init (
569+ (uint32_t )(uintptr_t )queue
570+ );
571+ }
572+
573+ void sys_trace_k_work_queue_start_enter (struct k_work_q * queue )
574+ {
575+ ctf_top_work_queue_start_enter (
576+ (uint32_t )(uintptr_t )queue
577+ );
578+ }
579+
580+ void sys_trace_k_work_queue_start_exit (struct k_work_q * queue )
581+ {
582+ ctf_top_work_queue_start_exit (
583+ (uint32_t )(uintptr_t )queue
584+ );
585+ }
586+
587+ void sys_trace_k_work_queue_stop_enter (struct k_work_q * queue , k_timeout_t timeout )
588+ {
589+ ctf_top_work_queue_stop_enter (
590+ (uint32_t )(uintptr_t )queue ,
591+ k_ticks_to_us_floor32 ((uint32_t )timeout .ticks )
592+ );
593+ }
594+
595+ void sys_trace_k_work_queue_stop_blocking (struct k_work_q * queue , k_timeout_t timeout )
596+ {
597+ ctf_top_work_queue_stop_blocking (
598+ (uint32_t )(uintptr_t )queue ,
599+ k_ticks_to_us_floor32 ((uint32_t )timeout .ticks )
600+ );
601+ }
602+
603+ void sys_trace_k_work_queue_stop_exit (struct k_work_q * queue , k_timeout_t timeout , int ret )
604+ {
605+ ctf_top_work_queue_stop_exit (
606+ (uint32_t )(uintptr_t )queue ,
607+ k_ticks_to_us_floor32 ((uint32_t )timeout .ticks ),
608+ (int32_t )ret
609+ );
610+ }
611+
612+ void sys_trace_k_work_queue_drain_enter (struct k_work_q * queue )
613+ {
614+ ctf_top_work_queue_drain_enter (
615+ (uint32_t )(uintptr_t )queue
616+ );
617+ }
618+
619+ void sys_trace_k_work_queue_drain_exit (struct k_work_q * queue , int ret )
620+ {
621+ ctf_top_work_queue_drain_exit (
622+ (uint32_t )(uintptr_t )queue ,
623+ (int32_t )ret
624+ );
625+ }
626+
627+ void sys_trace_k_work_queue_unplug_enter (struct k_work_q * queue )
628+ {
629+ ctf_top_work_queue_unplug_enter (
630+ (uint32_t )(uintptr_t )queue
631+ );
632+ }
633+
634+ void sys_trace_k_work_queue_unplug_exit (struct k_work_q * queue , int ret )
635+ {
636+ ctf_top_work_queue_unplug_exit (
637+ (uint32_t )(uintptr_t )queue ,
638+ (int32_t )ret
639+ );
640+ }
641+
642+ /* Delayable Work */
643+ void sys_trace_k_work_delayable_init (struct k_work_delayable * dwork )
644+ {
645+ ctf_top_work_delayable_init (
646+ (uint32_t )(uintptr_t )dwork
647+ );
648+ }
649+
650+ void sys_trace_k_work_schedule_for_queue_enter (struct k_work_q * queue , struct k_work_delayable * dwork , k_timeout_t delay )
651+ {
652+ ctf_top_work_schedule_for_queue_enter (
653+ (uint32_t )(uintptr_t )queue ,
654+ (uint32_t )(uintptr_t )dwork ,
655+ k_ticks_to_us_floor32 ((uint32_t )delay .ticks )
656+ );
657+ }
658+
659+ void sys_trace_k_work_schedule_for_queue_exit (struct k_work_q * queue , struct k_work_delayable * dwork , k_timeout_t delay , int ret )
660+ {
661+ ctf_top_work_schedule_for_queue_exit (
662+ (uint32_t )(uintptr_t )queue ,
663+ (uint32_t )(uintptr_t )dwork ,
664+ k_ticks_to_us_floor32 ((uint32_t )delay .ticks ),
665+ (int32_t )ret
666+ );
667+ }
668+
669+ void sys_trace_k_work_schedule_enter (struct k_work_delayable * dwork , k_timeout_t delay )
670+ {
671+ ctf_top_work_schedule_enter (
672+ (uint32_t )(uintptr_t )dwork ,
673+ k_ticks_to_us_floor32 ((uint32_t )delay .ticks )
674+ );
675+ }
676+
677+ void sys_trace_k_work_schedule_exit (struct k_work_delayable * dwork , k_timeout_t delay , int ret )
678+ {
679+ ctf_top_work_schedule_exit (
680+ (uint32_t )(uintptr_t )dwork ,
681+ k_ticks_to_us_floor32 ((uint32_t )delay .ticks ),
682+ (int32_t )ret
683+ );
684+ }
685+
686+ void sys_trace_k_work_reschedule_for_queue_enter (struct k_work_q * queue , struct k_work_delayable * dwork , k_timeout_t delay )
687+ {
688+ ctf_top_work_reschedule_for_queue_enter (
689+ (uint32_t )(uintptr_t )queue ,
690+ (uint32_t )(uintptr_t )dwork ,
691+ k_ticks_to_us_floor32 ((uint32_t )delay .ticks )
692+ );
693+ }
694+
695+ void sys_trace_k_work_reschedule_for_queue_exit (struct k_work_q * queue , struct k_work_delayable * dwork , k_timeout_t delay , int ret )
696+ {
697+ ctf_top_work_reschedule_for_queue_exit (
698+ (uint32_t )(uintptr_t )queue ,
699+ (uint32_t )(uintptr_t )dwork ,
700+ k_ticks_to_us_floor32 ((uint32_t )delay .ticks ),
701+ (int32_t )ret
702+ );
703+ }
704+
705+ void sys_trace_k_work_reschedule_enter (struct k_work_delayable * dwork , k_timeout_t delay )
706+ {
707+ ctf_top_work_reschedule_enter (
708+ (uint32_t )(uintptr_t )dwork ,
709+ k_ticks_to_us_floor32 ((uint32_t )delay .ticks )
710+ );
711+ }
712+
713+ void sys_trace_k_work_reschedule_exit (struct k_work_delayable * dwork , k_timeout_t delay , int ret )
714+ {
715+ ctf_top_work_reschedule_exit (
716+ (uint32_t )(uintptr_t )dwork ,
717+ k_ticks_to_us_floor32 ((uint32_t )delay .ticks ),
718+ (int32_t )ret
719+ );
720+ }
721+
722+ void sys_trace_k_work_flush_delayable_enter (struct k_work_delayable * dwork , struct k_work_sync * sync )
723+ {
724+ ctf_top_work_flush_delayable_enter (
725+ (uint32_t )(uintptr_t )dwork ,
726+ (uint32_t )(uintptr_t )sync
727+ );
728+ }
729+
730+ void sys_trace_k_work_flush_delayable_exit (struct k_work_delayable * dwork , struct k_work_sync * sync , int ret )
731+ {
732+ ctf_top_work_flush_delayable_exit (
733+ (uint32_t )(uintptr_t )dwork ,
734+ (uint32_t )(uintptr_t )sync ,
735+ (int32_t )ret
736+ );
737+ }
738+
739+ void sys_trace_k_work_cancel_delayable_enter (struct k_work_delayable * dwork )
740+ {
741+ ctf_top_work_cancel_delayable_enter (
742+ (uint32_t )(uintptr_t )dwork
743+ );
744+ }
745+
746+ void sys_trace_k_work_cancel_delayable_exit (struct k_work_delayable * dwork , int ret )
747+ {
748+ ctf_top_work_cancel_delayable_exit (
749+ (uint32_t )(uintptr_t )dwork ,
750+ (int32_t )ret
751+ );
752+ }
753+
754+ void sys_trace_k_work_cancel_delayable_sync_enter (struct k_work_delayable * dwork , struct k_work_sync * sync )
755+ {
756+ ctf_top_work_cancel_delayable_sync_enter (
757+ (uint32_t )(uintptr_t )dwork ,
758+ (uint32_t )(uintptr_t )sync
759+ );
760+ }
761+
762+ void sys_trace_k_work_cancel_delayable_sync_exit (struct k_work_delayable * dwork , struct k_work_sync * sync , int ret )
763+ {
764+ ctf_top_work_cancel_delayable_sync_exit (
765+ (uint32_t )(uintptr_t )dwork ,
766+ (uint32_t )(uintptr_t )sync ,
767+ (int32_t )ret
768+ );
769+ }
770+
771+ /* Poll Work */
772+ void sys_trace_k_work_poll_init_enter (struct k_work_poll * work )
773+ {
774+ ctf_top_work_poll_init_enter (
775+ (uint32_t )(uintptr_t )work
776+ );
777+ }
778+
779+ void sys_trace_k_work_poll_init_exit (struct k_work_poll * work )
780+ {
781+ ctf_top_work_poll_init_exit (
782+ (uint32_t )(uintptr_t )work
783+ );
784+ }
785+
786+ void sys_trace_k_work_poll_submit_to_queue_enter (struct k_work_q * work_q , struct k_work_poll * work , k_timeout_t timeout )
787+ {
788+ ctf_top_work_poll_submit_to_queue_enter (
789+ (uint32_t )(uintptr_t )work_q ,
790+ (uint32_t )(uintptr_t )work ,
791+ k_ticks_to_us_floor32 ((uint32_t )timeout .ticks )
792+ );
793+ }
794+
795+ void sys_trace_k_work_poll_submit_to_queue_blocking (struct k_work_q * work_q , struct k_work_poll * work , k_timeout_t timeout )
796+ {
797+ ctf_top_work_poll_submit_to_queue_blocking (
798+ (uint32_t )(uintptr_t )work_q ,
799+ (uint32_t )(uintptr_t )work ,
800+ k_ticks_to_us_floor32 ((uint32_t )timeout .ticks )
801+ );
802+ }
803+
804+ void sys_trace_k_work_poll_submit_to_queue_exit (struct k_work_q * work_q , struct k_work_poll * work , k_timeout_t timeout , int ret )
805+ {
806+ ctf_top_work_poll_submit_to_queue_exit (
807+ (uint32_t )(uintptr_t )work_q ,
808+ (uint32_t )(uintptr_t )work ,
809+ k_ticks_to_us_floor32 ((uint32_t )timeout .ticks ),
810+ (int32_t )ret
811+ );
812+ }
813+
814+ void sys_trace_k_work_poll_submit_enter (struct k_work_poll * work , k_timeout_t timeout )
815+ {
816+ ctf_top_work_poll_submit_enter (
817+ (uint32_t )(uintptr_t )work ,
818+ k_ticks_to_us_floor32 ((uint32_t )timeout .ticks )
819+ );
820+ }
821+
822+ void sys_trace_k_work_poll_submit_exit (struct k_work_poll * work , k_timeout_t timeout , int ret )
823+ {
824+ ctf_top_work_poll_submit_exit (
825+ (uint32_t )(uintptr_t )work ,
826+ k_ticks_to_us_floor32 ((uint32_t )timeout .ticks ),
827+ (int32_t )ret
828+ );
829+ }
830+
831+ void sys_trace_k_work_poll_cancel_enter (struct k_work_poll * work )
832+ {
833+ ctf_top_work_poll_cancel_enter (
834+ (uint32_t )(uintptr_t )work
835+ );
836+ }
837+
838+ void sys_trace_k_work_poll_cancel_exit (struct k_work_poll * work , int ret )
839+ {
840+ ctf_top_work_poll_cancel_exit (
841+ (uint32_t )(uintptr_t )work ,
842+ (int32_t )ret
843+ );
844+ }
845+
462846
463847/* Semaphore */
464848void sys_trace_k_sem_init (struct k_sem * sem , int ret )
0 commit comments