|
| 1 | +use std::{ |
| 2 | + collections::HashMap, |
| 3 | + fs::{self, remove_file}, |
| 4 | + io::{BufRead, BufReader}, |
| 5 | + iter, |
| 6 | + process::{Child, Command, Stdio}, |
| 7 | + sync::Mutex, |
| 8 | +}; |
| 9 | + |
| 10 | +use lazy_static::lazy_static; |
| 11 | +use std::sync::Arc; |
| 12 | + |
| 13 | +use rand::{random, Rng}; |
| 14 | + |
| 15 | +use swss_common::*; |
| 16 | + |
| 17 | +lazy_static! { |
| 18 | + static ref CONFIG_DB: Mutex<Option<Arc<Redis>>> = Mutex::new(None); |
| 19 | +} |
| 20 | + |
| 21 | +pub struct Redis { |
| 22 | + pub proc: Child, |
| 23 | + pub sock: String, |
| 24 | +} |
| 25 | + |
| 26 | +impl Redis { |
| 27 | + /// Start a Redis instance with a random unix socket. Multiple instances can be started. |
| 28 | + /// It is mutually exclusive with start_config_db(). |
| 29 | + pub fn start() -> Self { |
| 30 | + sonic_db_config_init_for_test(); |
| 31 | + Redis::start_with_sock(random_unix_sock()) |
| 32 | + } |
| 33 | + |
| 34 | + /// Start a Redis with config_db. Only one instance can be started at a time. |
| 35 | + /// It is mutually exclusive with start(). |
| 36 | + pub fn start_config_db() -> Arc<Redis> { |
| 37 | + CONFIG_DB |
| 38 | + .lock() |
| 39 | + .unwrap() |
| 40 | + .get_or_insert_with(|| { |
| 41 | + let sock_str = random_unix_sock(); |
| 42 | + config_db_config_init_for_test(&sock_str); |
| 43 | + Arc::new(Redis::start_with_sock(sock_str)) |
| 44 | + }) |
| 45 | + .clone() |
| 46 | + } |
| 47 | + |
| 48 | + fn start_with_sock(sock: String) -> Self { |
| 49 | + #[rustfmt::skip] |
| 50 | + #[allow(clippy::zombie_processes)] |
| 51 | + let mut child = Command::new("timeout") |
| 52 | + .args([ |
| 53 | + "--signal=KILL", |
| 54 | + "15s", |
| 55 | + "redis-server", |
| 56 | + "--appendonly", "no", |
| 57 | + "--save", "", |
| 58 | + "--notify-keyspace-events", "AKE", |
| 59 | + "--port", "0", |
| 60 | + "--unixsocket", &sock, |
| 61 | + ]) |
| 62 | + .stdout(Stdio::piped()) |
| 63 | + .spawn() |
| 64 | + .unwrap(); |
| 65 | + let mut stdout = BufReader::new(child.stdout.take().unwrap()); |
| 66 | + let mut buf = String::new(); |
| 67 | + loop { |
| 68 | + buf.clear(); |
| 69 | + if stdout.read_line(&mut buf).unwrap() == 0 { |
| 70 | + panic!("Redis didn't start"); |
| 71 | + } |
| 72 | + // Some redis version capitalize "Ready", others have lowercase "ready" :P |
| 73 | + if buf.contains("eady to accept connections") { |
| 74 | + break Self { proc: child, sock }; |
| 75 | + } |
| 76 | + } |
| 77 | + } |
| 78 | + |
| 79 | + pub fn db_connector(&self) -> DbConnector { |
| 80 | + DbConnector::new_unix(0, &self.sock, 0).unwrap() |
| 81 | + } |
| 82 | +} |
| 83 | + |
| 84 | +impl Drop for Redis { |
| 85 | + fn drop(&mut self) { |
| 86 | + Command::new("kill") |
| 87 | + .args(["-s", "TERM", &self.proc.id().to_string()]) |
| 88 | + .status() |
| 89 | + .unwrap(); |
| 90 | + self.proc.wait().unwrap(); |
| 91 | + } |
| 92 | +} |
| 93 | + |
| 94 | +pub struct Defer<F: FnOnce()>(Option<F>); |
| 95 | + |
| 96 | +impl<F: FnOnce()> Drop for Defer<F> { |
| 97 | + fn drop(&mut self) { |
| 98 | + self.0.take().unwrap()() |
| 99 | + } |
| 100 | +} |
| 101 | + |
| 102 | +const DB_CONFIG_JSON: &str = r#" |
| 103 | + { |
| 104 | + "DATABASES": { |
| 105 | + "db name doesn't matter": { |
| 106 | + "id": 0, |
| 107 | + "separator": ":", |
| 108 | + "instance": "redis" |
| 109 | + } |
| 110 | + } |
| 111 | + } |
| 112 | + "#; |
| 113 | + |
| 114 | +const CONFIG_DB_REDIS_CONFIG_JSON: &str = r#" |
| 115 | + { |
| 116 | + "INSTANCES": { |
| 117 | + "redis": { |
| 118 | + "hostname": "127.0.0.1", |
| 119 | + "port": {port}, |
| 120 | + "unix_socket_path": "{path}", |
| 121 | + "persistence_for_warm_boot": "yes" |
| 122 | + } |
| 123 | + }, |
| 124 | + "DATABASES": { |
| 125 | + "APPL_DB": { |
| 126 | + "id": 0, |
| 127 | + "separator": ":", |
| 128 | + "instance": "redis" |
| 129 | + }, |
| 130 | + "CONFIG_DB": { |
| 131 | + "id": 1, |
| 132 | + "separator": "|", |
| 133 | + "instance": "redis" |
| 134 | + }, |
| 135 | + "STATE_DB": { |
| 136 | + "id": 2, |
| 137 | + "separator": "|", |
| 138 | + "instance": "redis" |
| 139 | + }, |
| 140 | + "DPU_STATE_DB": { |
| 141 | + "id": 3, |
| 142 | + "separator": "|", |
| 143 | + "instance": "redis" |
| 144 | + }, |
| 145 | + "DPU_APPL_DB": { |
| 146 | + "id": 4, |
| 147 | + "separator": ":", |
| 148 | + "instance": "redis" |
| 149 | + } |
| 150 | + } |
| 151 | + } |
| 152 | +"#; |
| 153 | + |
| 154 | +const DB_GLOBAL_CONFIG_JSON: &str = r#" |
| 155 | + { |
| 156 | + "INCLUDES" : [ |
| 157 | + { |
| 158 | + "include" : "db_config_test.json" |
| 159 | + }, |
| 160 | + { |
| 161 | + "container_name" : "dpu0", |
| 162 | + "include" : "db_config_test.json" |
| 163 | +
|
| 164 | + } |
| 165 | + ] |
| 166 | + } |
| 167 | +"#; |
| 168 | + |
| 169 | +static SONIC_DB_INITIALIZED: Mutex<bool> = Mutex::new(false); |
| 170 | +static CONIFG_DB_INITIALIZED: Mutex<bool> = Mutex::new(false); |
| 171 | + |
| 172 | +pub fn sonic_db_config_init_for_test() { |
| 173 | + // HACK |
| 174 | + // We need to do our own locking here because locking is not correctly implemented in |
| 175 | + // swss::SonicDBConfig :/ |
| 176 | + let config_db_init = CONIFG_DB_INITIALIZED.lock().unwrap(); |
| 177 | + // config_db and sonic_db are mutually exclusive because the lock in swss::SonicDBConfig |
| 178 | + assert!(!*config_db_init); |
| 179 | + |
| 180 | + let mut sonic_db_init = SONIC_DB_INITIALIZED.lock().unwrap(); |
| 181 | + if !*sonic_db_init { |
| 182 | + fs::write("/tmp/db_config_test.json", DB_CONFIG_JSON).unwrap(); |
| 183 | + fs::write("/tmp/db_global_config_test.json", DB_GLOBAL_CONFIG_JSON).unwrap(); |
| 184 | + sonic_db_config_initialize("/tmp/db_config_test.json").unwrap(); |
| 185 | + sonic_db_config_initialize_global("/tmp/db_global_config_test.json").unwrap(); |
| 186 | + fs::remove_file("/tmp/db_config_test.json").unwrap(); |
| 187 | + fs::remove_file("/tmp/db_global_config_test.json").unwrap(); |
| 188 | + *sonic_db_init = true; |
| 189 | + } |
| 190 | +} |
| 191 | + |
| 192 | +fn config_db_config_init_for_test(sock_str: &str) { |
| 193 | + // HACK |
| 194 | + // We need to do our own locking here because locking is not correctly implemented in |
| 195 | + // swss::SonicDBConfig :/ |
| 196 | + let sonic_db_init = SONIC_DB_INITIALIZED.lock().unwrap(); |
| 197 | + // config_db and sonic_db are mutually exclusive because the lock in swss::SonicDBConfig |
| 198 | + assert!(!*sonic_db_init); |
| 199 | + |
| 200 | + let mut config_db_init = CONIFG_DB_INITIALIZED.lock().unwrap(); |
| 201 | + if !*config_db_init { |
| 202 | + let port = random_port(); |
| 203 | + let db_config_json = CONFIG_DB_REDIS_CONFIG_JSON |
| 204 | + .replace("{port}", &port.to_string()) |
| 205 | + .replace("{path}", sock_str); |
| 206 | + fs::write("/tmp/db_config_test.json", db_config_json).unwrap(); |
| 207 | + fs::write("/tmp/db_global_config_test.json", DB_GLOBAL_CONFIG_JSON).unwrap(); |
| 208 | + sonic_db_config_initialize("/tmp/db_config_test.json").unwrap(); |
| 209 | + sonic_db_config_initialize_global("/tmp/db_global_config_test.json").unwrap(); |
| 210 | + fs::remove_file("/tmp/db_config_test.json").unwrap(); |
| 211 | + fs::remove_file("/tmp/db_global_config_test.json").unwrap(); |
| 212 | + *config_db_init = true; |
| 213 | + } |
| 214 | +} |
| 215 | + |
| 216 | +pub fn random_string() -> String { |
| 217 | + format!("{:0X}", random::<u64>()) |
| 218 | +} |
| 219 | + |
| 220 | +pub fn random_cxx_string() -> CxxString { |
| 221 | + CxxString::new(random_string()) |
| 222 | +} |
| 223 | + |
| 224 | +pub fn random_fvs() -> FieldValues { |
| 225 | + let mut field_values = HashMap::new(); |
| 226 | + for _ in 0..rand::thread_rng().gen_range(100..1000) { |
| 227 | + field_values.insert(random_string(), random_cxx_string()); |
| 228 | + } |
| 229 | + field_values |
| 230 | +} |
| 231 | + |
| 232 | +pub fn random_kfv() -> KeyOpFieldValues { |
| 233 | + let key = random_string(); |
| 234 | + let operation = if random() { KeyOperation::Set } else { KeyOperation::Del }; |
| 235 | + let field_values = if operation == KeyOperation::Set { |
| 236 | + // We need at least one field-value pair, otherwise swss::BinarySerializer infers that |
| 237 | + // the operation is DEL even if the .operation field is SET |
| 238 | + random_fvs() |
| 239 | + } else { |
| 240 | + HashMap::new() |
| 241 | + }; |
| 242 | + |
| 243 | + KeyOpFieldValues { |
| 244 | + key, |
| 245 | + operation, |
| 246 | + field_values, |
| 247 | + } |
| 248 | +} |
| 249 | + |
| 250 | +pub fn random_kfvs() -> Vec<KeyOpFieldValues> { |
| 251 | + iter::repeat_with(random_kfv).take(100).collect() |
| 252 | +} |
| 253 | + |
| 254 | +pub fn random_unix_sock() -> String { |
| 255 | + format!("/tmp/swss-common-testing-{}.sock", random_string()) |
| 256 | +} |
| 257 | + |
| 258 | +pub fn random_port() -> u16 { |
| 259 | + let mut rng = rand::thread_rng(); |
| 260 | + rng.gen_range(1000..65535) |
| 261 | +} |
| 262 | + |
| 263 | +// zmq doesn't clean up its own ipc sockets, so we include a deferred operation for that |
| 264 | +pub fn random_zmq_endpoint() -> (String, impl Drop) { |
| 265 | + let sock = random_unix_sock(); |
| 266 | + let endpoint = format!("ipc://{sock}"); |
| 267 | + (endpoint, Defer(Some(|| remove_file(sock).unwrap()))) |
| 268 | +} |
0 commit comments