|
| 1 | +//! Comprehensive status command implementation for Redis Enterprise |
| 2 | +//! |
| 3 | +//! Provides a single command to view cluster, nodes, databases, and shards status, |
| 4 | +//! similar to `rladmin status extra all`. |
| 5 | +
|
| 6 | +use crate::cli::OutputFormat; |
| 7 | +use crate::connection::ConnectionManager; |
| 8 | +use crate::error::Result as CliResult; |
| 9 | +use anyhow::Context; |
| 10 | +use redis_enterprise::bdb::BdbHandler; |
| 11 | +use redis_enterprise::cluster::ClusterHandler; |
| 12 | +use redis_enterprise::nodes::NodeHandler; |
| 13 | +use redis_enterprise::shards::ShardHandler; |
| 14 | +use serde::{Deserialize, Serialize}; |
| 15 | +use serde_json::{Value, json}; |
| 16 | + |
| 17 | +use super::utils::*; |
| 18 | + |
| 19 | +/// Comprehensive cluster status information |
| 20 | +#[derive(Debug, Clone, Serialize, Deserialize)] |
| 21 | +pub struct ClusterStatus { |
| 22 | + /// Cluster information |
| 23 | + pub cluster: Value, |
| 24 | + /// List of nodes |
| 25 | + pub nodes: Value, |
| 26 | + /// List of databases |
| 27 | + pub databases: Value, |
| 28 | + /// List of shards |
| 29 | + pub shards: Value, |
| 30 | + /// Summary statistics |
| 31 | + pub summary: StatusSummary, |
| 32 | +} |
| 33 | + |
| 34 | +/// Summary statistics for cluster health |
| 35 | +#[derive(Debug, Clone, Serialize, Deserialize)] |
| 36 | +pub struct StatusSummary { |
| 37 | + /// Total number of nodes |
| 38 | + pub total_nodes: usize, |
| 39 | + /// Number of healthy nodes |
| 40 | + pub healthy_nodes: usize, |
| 41 | + /// Total number of databases |
| 42 | + pub total_databases: usize, |
| 43 | + /// Number of active databases |
| 44 | + pub active_databases: usize, |
| 45 | + /// Total number of shards |
| 46 | + pub total_shards: usize, |
| 47 | + /// Cluster health status |
| 48 | + pub cluster_health: String, |
| 49 | +} |
| 50 | + |
| 51 | +/// Sections to display in status output |
| 52 | +#[derive(Debug, Clone, Default)] |
| 53 | +pub struct StatusSections { |
| 54 | + /// Show cluster information |
| 55 | + pub cluster: bool, |
| 56 | + /// Show nodes information |
| 57 | + pub nodes: bool, |
| 58 | + /// Show databases information |
| 59 | + pub databases: bool, |
| 60 | + /// Show shards information |
| 61 | + pub shards: bool, |
| 62 | +} |
| 63 | + |
| 64 | +impl StatusSections { |
| 65 | + /// Create sections showing all information |
| 66 | + pub fn all() -> Self { |
| 67 | + Self { |
| 68 | + cluster: true, |
| 69 | + nodes: true, |
| 70 | + databases: true, |
| 71 | + shards: true, |
| 72 | + } |
| 73 | + } |
| 74 | + |
| 75 | + /// Check if any section is enabled |
| 76 | + pub fn any_enabled(&self) -> bool { |
| 77 | + self.cluster || self.nodes || self.databases || self.shards |
| 78 | + } |
| 79 | +} |
| 80 | + |
| 81 | +/// Get comprehensive cluster status |
| 82 | +pub async fn get_status( |
| 83 | + conn_mgr: &ConnectionManager, |
| 84 | + profile_name: Option<&str>, |
| 85 | + sections: StatusSections, |
| 86 | + output_format: OutputFormat, |
| 87 | + query: Option<&str>, |
| 88 | +) -> CliResult<()> { |
| 89 | + let client = conn_mgr.create_enterprise_client(profile_name).await?; |
| 90 | + |
| 91 | + // Use provided sections, or default to all if none specified |
| 92 | + let sections = if sections.any_enabled() { |
| 93 | + sections |
| 94 | + } else { |
| 95 | + StatusSections::all() |
| 96 | + }; |
| 97 | + |
| 98 | + // Collect cluster info |
| 99 | + let cluster_result = if sections.cluster { |
| 100 | + ClusterHandler::new(client.clone()) |
| 101 | + .info() |
| 102 | + .await |
| 103 | + .map(|v| serde_json::to_value(v).unwrap_or(json!({}))) |
| 104 | + .context("Failed to get cluster info")? |
| 105 | + } else { |
| 106 | + json!({}) |
| 107 | + }; |
| 108 | + |
| 109 | + // Collect nodes |
| 110 | + let nodes_result = if sections.nodes { |
| 111 | + NodeHandler::new(client.clone()) |
| 112 | + .list() |
| 113 | + .await |
| 114 | + .map(|v| serde_json::to_value(v).unwrap_or(json!([]))) |
| 115 | + .context("Failed to list nodes")? |
| 116 | + } else { |
| 117 | + json!([]) |
| 118 | + }; |
| 119 | + |
| 120 | + // Collect databases |
| 121 | + let databases_result = if sections.databases { |
| 122 | + BdbHandler::new(client.clone()) |
| 123 | + .list() |
| 124 | + .await |
| 125 | + .map(|v| serde_json::to_value(v).unwrap_or(json!([]))) |
| 126 | + .context("Failed to list databases")? |
| 127 | + } else { |
| 128 | + json!([]) |
| 129 | + }; |
| 130 | + |
| 131 | + // Collect shards |
| 132 | + let shards_result = if sections.shards { |
| 133 | + ShardHandler::new(client.clone()) |
| 134 | + .list() |
| 135 | + .await |
| 136 | + .map(|v| serde_json::to_value(v).unwrap_or(json!([]))) |
| 137 | + .context("Failed to list shards")? |
| 138 | + } else { |
| 139 | + json!([]) |
| 140 | + }; |
| 141 | + |
| 142 | + // Calculate summary statistics |
| 143 | + let summary = calculate_summary(&nodes_result, &databases_result, &shards_result); |
| 144 | + |
| 145 | + // Build comprehensive status |
| 146 | + let status = ClusterStatus { |
| 147 | + cluster: cluster_result, |
| 148 | + nodes: nodes_result, |
| 149 | + databases: databases_result, |
| 150 | + shards: shards_result, |
| 151 | + summary, |
| 152 | + }; |
| 153 | + |
| 154 | + let status_json = serde_json::to_value(status).context("Failed to serialize cluster status")?; |
| 155 | + |
| 156 | + // Apply query if provided |
| 157 | + let data = handle_output(status_json, output_format, query)?; |
| 158 | + |
| 159 | + // Format and display |
| 160 | + print_formatted_output(data, output_format)?; |
| 161 | + |
| 162 | + Ok(()) |
| 163 | +} |
| 164 | + |
| 165 | +/// Calculate summary statistics from collected data |
| 166 | +fn calculate_summary(nodes: &Value, databases: &Value, shards: &Value) -> StatusSummary { |
| 167 | + let empty_vec = vec![]; |
| 168 | + let nodes_array = nodes.as_array().unwrap_or(&empty_vec); |
| 169 | + let databases_array = databases.as_array().unwrap_or(&empty_vec); |
| 170 | + let shards_array = shards.as_array().unwrap_or(&empty_vec); |
| 171 | + |
| 172 | + let total_nodes = nodes_array.len(); |
| 173 | + let healthy_nodes = nodes_array |
| 174 | + .iter() |
| 175 | + .filter(|n| { |
| 176 | + n.get("status") |
| 177 | + .and_then(|s| s.as_str()) |
| 178 | + .map(|s| s == "active" || s == "ok") |
| 179 | + .unwrap_or(false) |
| 180 | + }) |
| 181 | + .count(); |
| 182 | + |
| 183 | + let total_databases = databases_array.len(); |
| 184 | + let active_databases = databases_array |
| 185 | + .iter() |
| 186 | + .filter(|db| { |
| 187 | + db.get("status") |
| 188 | + .and_then(|s| s.as_str()) |
| 189 | + .map(|s| s == "active") |
| 190 | + .unwrap_or(false) |
| 191 | + }) |
| 192 | + .count(); |
| 193 | + |
| 194 | + let total_shards = shards_array.len(); |
| 195 | + |
| 196 | + // Determine cluster health |
| 197 | + let cluster_health = if healthy_nodes == total_nodes && active_databases == total_databases { |
| 198 | + "healthy".to_string() |
| 199 | + } else if healthy_nodes == 0 || active_databases == 0 { |
| 200 | + "critical".to_string() |
| 201 | + } else { |
| 202 | + "degraded".to_string() |
| 203 | + }; |
| 204 | + |
| 205 | + StatusSummary { |
| 206 | + total_nodes, |
| 207 | + healthy_nodes, |
| 208 | + total_databases, |
| 209 | + active_databases, |
| 210 | + total_shards, |
| 211 | + cluster_health, |
| 212 | + } |
| 213 | +} |
0 commit comments