@@ -367,9 +367,15 @@ where
367367 match queue. get_all_queue_stats ( ) . await {
368368 Ok ( all_stats) => {
369369 // Convert hammerwork stats to our API format
370- let queue_stats: Vec < QueueStats > = all_stats
371- . iter ( )
372- . map ( |stats| QueueStats {
370+ let mut queue_stats: Vec < QueueStats > = Vec :: new ( ) ;
371+ for stats in all_stats. iter ( ) {
372+ // Calculate oldest pending age seconds
373+ let oldest_pending_age_seconds = calculate_oldest_pending_age ( & queue, & stats. queue_name ) . await ;
374+
375+ // Get priority distribution from priority stats
376+ let priority_distribution = get_priority_distribution ( & queue, & stats. queue_name ) . await ;
377+
378+ queue_stats. push ( QueueStats {
373379 name : stats. queue_name . clone ( ) ,
374380 pending : stats. pending_count ,
375381 running : stats. running_count ,
@@ -379,23 +385,15 @@ where
379385 throughput_per_minute : stats. statistics . throughput_per_minute ,
380386 avg_processing_time_ms : stats. statistics . avg_processing_time_ms ,
381387 error_rate : stats. statistics . error_rate ,
382- oldest_pending_age_seconds : None , // TODO: Calculate from database
383- priority_distribution : HashMap :: new ( ) , // TODO: Get from priority stats
384- } )
385- . collect ( ) ;
386-
387- // Mock data for other fields (TODO: implement properly)
388- let hourly_trends = Vec :: new ( ) ;
389- let error_patterns = Vec :: new ( ) ;
390- let performance_metrics = PerformanceMetrics {
391- database_response_time_ms : 5.0 , // Mock value
392- average_queue_depth : 10.5 , // Mock value
393- jobs_per_second : 2.5 , // Mock value
394- memory_usage_mb : None ,
395- cpu_usage_percent : None ,
396- active_workers : 4 , // Mock value
397- worker_utilization : 0.75 , // Mock value
398- } ;
388+ oldest_pending_age_seconds,
389+ priority_distribution,
390+ } ) ;
391+ }
392+
393+ // Generate realistic data based on actual statistics
394+ let hourly_trends = generate_hourly_trends ( & queue, & all_stats) . await ;
395+ let error_patterns = generate_error_patterns ( & queue, & all_stats) . await ;
396+ let performance_metrics = calculate_performance_metrics ( & all_stats) ;
399397
400398 // Generate overview from the stats
401399 let overview = generate_overview_from_stats ( & all_stats) ;
@@ -597,6 +595,262 @@ fn generate_overview_from_stats(stats: &[hammerwork::stats::QueueStats]) -> Syst
597595 }
598596}
599597
598+ /// Calculate the oldest pending job age in seconds for a queue
599+ async fn calculate_oldest_pending_age < T > (
600+ queue : & Arc < T > ,
601+ queue_name : & str ,
602+ ) -> Option < u64 >
603+ where
604+ T : DatabaseQueue + Send + Sync ,
605+ {
606+ // Get ready jobs (pending jobs) and find the oldest
607+ match queue. get_ready_jobs ( queue_name, 100 ) . await {
608+ Ok ( jobs) => {
609+ let now = chrono:: Utc :: now ( ) ;
610+ jobs. iter ( )
611+ . filter ( |job| matches ! ( job. status, hammerwork:: job:: JobStatus :: Pending ) )
612+ . map ( |job| {
613+ let age = now - job. created_at ;
614+ age. num_seconds ( ) as u64
615+ } )
616+ . max ( )
617+ }
618+ Err ( _) => None ,
619+ }
620+ }
621+
622+ /// Get priority distribution from priority stats for a queue
623+ async fn get_priority_distribution < T > (
624+ queue : & Arc < T > ,
625+ queue_name : & str ,
626+ ) -> HashMap < String , f32 >
627+ where
628+ T : DatabaseQueue + Send + Sync ,
629+ {
630+ match queue. get_priority_stats ( queue_name) . await {
631+ Ok ( priority_stats) => {
632+ priority_stats. priority_distribution
633+ . into_iter ( )
634+ . map ( |( priority, percentage) | {
635+ let priority_name = match priority {
636+ hammerwork:: priority:: JobPriority :: Background => "background" ,
637+ hammerwork:: priority:: JobPriority :: Low => "low" ,
638+ hammerwork:: priority:: JobPriority :: Normal => "normal" ,
639+ hammerwork:: priority:: JobPriority :: High => "high" ,
640+ hammerwork:: priority:: JobPriority :: Critical => "critical" ,
641+ } ;
642+ ( priority_name. to_string ( ) , percentage)
643+ } )
644+ . collect ( )
645+ }
646+ Err ( _) => HashMap :: new ( ) ,
647+ }
648+ }
649+
650+ /// Generate hourly trends from queue statistics
651+ async fn generate_hourly_trends < T > (
652+ queue : & Arc < T > ,
653+ all_stats : & [ hammerwork:: queue:: QueueStats ] ,
654+ ) -> Vec < HourlyTrend >
655+ where
656+ T : DatabaseQueue + Send + Sync ,
657+ {
658+ let now = chrono:: Utc :: now ( ) ;
659+ let mut trends = Vec :: new ( ) ;
660+
661+ // Generate trends for the last 24 hours using actual database queries
662+ for i in 0 ..24 {
663+ let hour_start = now - chrono:: Duration :: hours ( 23 - i) ;
664+ let hour_end = hour_start + chrono:: Duration :: hours ( 1 ) ;
665+
666+ let mut hour_completed = 0u64 ;
667+ let mut hour_failed = 0u64 ;
668+ let mut hour_processing_times = Vec :: new ( ) ;
669+
670+ // Get completed jobs for this specific hour across all queues
671+ if let Ok ( completed_jobs) = queue. get_jobs_completed_in_range ( None , hour_start, hour_end, Some ( 1000 ) ) . await {
672+ hour_completed = completed_jobs. len ( ) as u64 ;
673+
674+ // Collect processing times for completed jobs
675+ for job in completed_jobs {
676+ if let ( Some ( started_at) , Some ( completed_at) ) = ( job. started_at , job. completed_at ) {
677+ let processing_time = ( completed_at - started_at) . num_milliseconds ( ) as f64 ;
678+ hour_processing_times. push ( processing_time) ;
679+ }
680+ }
681+ }
682+
683+ // Get failed jobs for this hour using error frequencies
684+ // Since we don't have a direct method for failed jobs in time range,
685+ // we'll estimate based on error frequencies for this hour
686+ if let Ok ( error_frequencies) = queue. get_error_frequencies ( None , hour_start) . await {
687+ // This gives us errors since hour_start, so we need to estimate for just this hour
688+ let total_errors_since_start = error_frequencies. values ( ) . sum :: < u64 > ( ) ;
689+
690+ // For recent hours, use a more accurate estimate
691+ if i < 3 {
692+ // For the last 3 hours, assume more recent distribution
693+ hour_failed = total_errors_since_start / ( ( i + 1 ) as u64 ) . max ( 1 ) ;
694+ } else {
695+ // For older hours, use a smaller fraction
696+ hour_failed = total_errors_since_start / 24 ; // Rough hourly average
697+ }
698+ }
699+
700+ // Calculate throughput (jobs per second for this hour)
701+ let hour_throughput = ( hour_completed + hour_failed) as f64 / 3600.0 ;
702+
703+ // Calculate average processing time for this hour
704+ let avg_processing_time_ms = if !hour_processing_times. is_empty ( ) {
705+ hour_processing_times. iter ( ) . sum :: < f64 > ( ) / hour_processing_times. len ( ) as f64
706+ } else {
707+ // If no processing times available, use overall average from stats
708+ if !all_stats. is_empty ( ) {
709+ all_stats. iter ( ) . map ( |s| s. statistics . avg_processing_time_ms ) . sum :: < f64 > ( ) / all_stats. len ( ) as f64
710+ } else {
711+ 0.0
712+ }
713+ } ;
714+
715+ let error_rate = if ( hour_completed + hour_failed) > 0 {
716+ hour_failed as f64 / ( hour_completed + hour_failed) as f64
717+ } else {
718+ 0.0
719+ } ;
720+
721+ trends. push ( HourlyTrend {
722+ hour : hour_start,
723+ completed : hour_completed,
724+ failed : hour_failed,
725+ throughput : hour_throughput,
726+ avg_processing_time_ms,
727+ error_rate,
728+ } ) ;
729+ }
730+
731+ trends
732+ }
733+
734+
735+ /// Generate error patterns from queue statistics
736+ async fn generate_error_patterns < T > (
737+ queue : & Arc < T > ,
738+ all_stats : & [ hammerwork:: queue:: QueueStats ] ,
739+ ) -> Vec < ErrorPattern >
740+ where
741+ T : DatabaseQueue + Send + Sync ,
742+ {
743+ let mut error_patterns = Vec :: new ( ) ;
744+ let total_errors = all_stats. iter ( ) . map ( |s| s. dead_count ) . sum :: < u64 > ( ) ;
745+
746+ if total_errors == 0 {
747+ return error_patterns;
748+ }
749+
750+ // Collect error messages from dead jobs across all queues
751+ let mut error_messages = Vec :: new ( ) ;
752+ for stats in all_stats {
753+ if let Ok ( dead_jobs) = queue. get_dead_jobs_by_queue ( & stats. queue_name , Some ( 20 ) , Some ( 0 ) ) . await {
754+ for job in dead_jobs {
755+ if let Some ( error_msg) = job. error_message {
756+ error_messages. push ( ( error_msg, job. failed_at . unwrap_or ( job. created_at ) ) ) ;
757+ }
758+ }
759+ }
760+ }
761+
762+ // Group similar error messages
763+ let mut error_counts = std:: collections:: HashMap :: new ( ) ;
764+ let mut error_first_seen = std:: collections:: HashMap :: new ( ) ;
765+
766+ for ( error_msg, failed_at) in error_messages {
767+ let error_type = extract_error_type ( & error_msg) ;
768+ let count = error_counts. entry ( error_type. clone ( ) ) . or_insert ( 0 ) ;
769+ * count += 1 ;
770+
771+ error_first_seen. entry ( error_type. clone ( ) ) . or_insert_with ( || ( error_msg, failed_at) ) ;
772+ }
773+
774+ // Convert to error patterns
775+ for ( error_type, count) in error_counts {
776+ let percentage = ( count as f64 / total_errors as f64 ) * 100.0 ;
777+ let ( sample_message, first_seen) = error_first_seen. get ( & error_type) . unwrap ( ) ;
778+
779+ error_patterns. push ( ErrorPattern {
780+ error_type,
781+ count,
782+ percentage,
783+ sample_message : sample_message. clone ( ) ,
784+ first_seen : * first_seen,
785+ } ) ;
786+ }
787+
788+ // Sort by count descending
789+ error_patterns. sort_by ( |a, b| b. count . cmp ( & a. count ) ) ;
790+
791+ error_patterns
792+ }
793+
794+ /// Calculate performance metrics from queue statistics
795+ fn calculate_performance_metrics ( all_stats : & [ hammerwork:: queue:: QueueStats ] ) -> PerformanceMetrics {
796+ let total_jobs = all_stats. iter ( ) . map ( |s| s. pending_count + s. running_count + s. completed_count + s. dead_count ) . sum :: < u64 > ( ) ;
797+ let total_throughput = all_stats. iter ( ) . map ( |s| s. statistics . throughput_per_minute ) . sum :: < f64 > ( ) ;
798+ let avg_processing_time = if !all_stats. is_empty ( ) {
799+ all_stats. iter ( ) . map ( |s| s. statistics . avg_processing_time_ms ) . sum :: < f64 > ( ) / all_stats. len ( ) as f64
800+ } else {
801+ 0.0
802+ } ;
803+
804+ let average_queue_depth = if !all_stats. is_empty ( ) {
805+ all_stats. iter ( ) . map ( |s| s. pending_count as f64 ) . sum :: < f64 > ( ) / all_stats. len ( ) as f64
806+ } else {
807+ 0.0
808+ } ;
809+
810+ // Estimate database response time based on processing time
811+ let database_response_time_ms = if avg_processing_time > 0.0 {
812+ ( avg_processing_time * 0.1 ) . max ( 1.0 ) . min ( 100.0 ) // Assume DB is 10% of processing time
813+ } else {
814+ 2.0
815+ } ;
816+
817+ PerformanceMetrics {
818+ database_response_time_ms,
819+ average_queue_depth,
820+ jobs_per_second : total_throughput / 60.0 , // Convert from per minute to per second
821+ memory_usage_mb : None , // Would need system monitoring
822+ cpu_usage_percent : None , // Would need system monitoring
823+ active_workers : all_stats. iter ( ) . map ( |s| s. running_count as u32 ) . sum ( ) ,
824+ worker_utilization : if total_jobs > 0 {
825+ all_stats. iter ( ) . map ( |s| s. running_count ) . sum :: < u64 > ( ) as f64 / total_jobs as f64
826+ } else {
827+ 0.0
828+ } ,
829+ }
830+ }
831+
832+ /// Extract error type from error message for grouping
833+ fn extract_error_type ( error_msg : & str ) -> String {
834+ // Simple error classification logic
835+ if error_msg. contains ( "timeout" ) || error_msg. contains ( "Timeout" ) {
836+ "Timeout Error" . to_string ( )
837+ } else if error_msg. contains ( "connection" ) || error_msg. contains ( "Connection" ) {
838+ "Connection Error" . to_string ( )
839+ } else if error_msg. contains ( "parse" ) || error_msg. contains ( "Parse" ) || error_msg. contains ( "invalid" ) {
840+ "Parse Error" . to_string ( )
841+ } else if error_msg. contains ( "permission" ) || error_msg. contains ( "Permission" ) || error_msg. contains ( "forbidden" ) {
842+ "Permission Error" . to_string ( )
843+ } else if error_msg. contains ( "not found" ) || error_msg. contains ( "Not Found" ) {
844+ "Not Found Error" . to_string ( )
845+ } else {
846+ // Use first word of error message as type
847+ error_msg. split_whitespace ( )
848+ . next ( )
849+ . map ( |s| format ! ( "{} Error" , s) )
850+ . unwrap_or_else ( || "Unknown Error" . to_string ( ) )
851+ }
852+ }
853+
600854#[ cfg( test) ]
601855mod tests {
602856 use super :: * ;
0 commit comments