diff --git a/host_modules/debug_info.py b/host_modules/debug_info.py new file mode 100644 index 00000000..baa30e1f --- /dev/null +++ b/host_modules/debug_info.py @@ -0,0 +1,324 @@ +"""Debug Info. + +This host service module implements the backend support for +collecting host debug artifacts.Depending on the log level +and board type input,the relevant log files, DB snapshots, +counters, record files,and various command outputs are collected +and aggregated under a specified artifact directory and the directory is +compressed to a *.tar.gz in the host. + +As part of the SONiC supported common debug commands,below are the list of files. +core,log,db,counter files,routing.txt and version.txt +""" + +from datetime import datetime +import json +import logging +import os +import shutil +import subprocess +import time + +from host_modules import host_service +# Import SONiC debug commands for SONiC platform. +from utils.sonic_debug_cmds import * + +MOD_NAME = "debug_info" +ARTIFACT_DIR = "/tmp/dump" +NONVOLATILE_PARTITION = "/var/log/" +NONVOLATILE_ARTIFACT_DIR = "/var/log/dump" +NONVOLATILE_STORAGE_REQUIRED = 5 * 10**8 +NONVOLATILE_TMP_FLAG = "/tmp/nonvolatile_saved" +ARTIFACT_DIR_CONTAINER = "/var/dump" +ARTIFACT_DIR_HOST = "host" +CORE_DIR = "core" +DB_ARTIFACT_DIR = ARTIFACT_DIR_HOST + "/db" +ARTIFACT_LEVEL_ALERT = "alert" +ARTIFACT_LEVEL_CRITICAL = "critical" +ARTIFACT_LEVEL_ALL = "all" +LOG_LEVEL_KEY = "level" +PERSISTENT_STORAGE_KEY = "use_persistent_storage" + +STATE_DB_SEPARATOR = "|" +DEBUG_INFO_FLAG = "debug_info" + +log_dir = "/var/log" +os.makedirs(log_dir, exist_ok=True) + +log_file = os.path.join(log_dir, "debug_info.log") +logging.basicConfig( + filename=log_file, + filemode='a', # append mode + format='%(asctime)s - %(levelname)s - %(message)s', + level=logging.DEBUG +) + +logger = logging.getLogger(__name__) + +class DebugInfo(host_service.HostModule): + """DBus endpoint that collects debug artifacts.""" + + def __init__(self, mod_name): + self._board_type = DebugInfo.get_board_type() + self._hostname = DebugInfo.get_hostname() + super(DebugInfo, self).__init__(mod_name) + + @staticmethod + def _run_command(cmd: str, timeout: int = 20): + proc = subprocess.Popen( + cmd, + shell=True, + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True) + try: + stdout, stderr = proc.communicate(timeout=timeout) + except subprocess.TimeoutExpired: + proc.kill() + return 1, "command timeout", "command timeout" + return proc.returncode, stdout, stderr + + @staticmethod + def get_board_type() -> str: + rc, stdout, err = DebugInfo._run_command(BOARD_TYPE_CMD) + board_type = "" + if rc != 0: + logger.warning("fail to execute command '%s': %s", BOARD_TYPE_CMD, err) + else: + board_type = stdout.strip() + return board_type + + @staticmethod + def get_hostname() -> str: + cmd = "hostname" + rc, stdout, err = DebugInfo._run_command(cmd) + hostname = "switch" + if rc != 0: + logger.warning("fail to execute command '%s': %s", cmd, err) + else: + hostname = stdout.strip() + return hostname + + @staticmethod + def _collect_counter_artifacts(directory: str, prefix: str, + board_type: str) -> None: + counter_artifact_dir = os.path.join( + directory, + datetime.now().strftime(prefix + "counter_%Y%m%d_%H%M%S")) + os.makedirs(counter_artifact_dir, exist_ok=True) + + for cmd in COUNTER_CMDS: + rc, _, err = DebugInfo._run_command(cmd.format(counter_artifact_dir), timeout=60) + if rc != 0: + # Continue the artifact collection in case of error. + logger.warning("fail to execute command '%s': %s", cmd, err) + + @staticmethod + def _collect_teamdctl_data(artifact_dir_host): + try: + redis_result = subprocess.run( + REDIS_LIST_PORTCHANNEL_CMD, shell=True, capture_output=True, text=True, check=True) + trunks = redis_result.stdout.strip().split('\n') + for trunk in trunks: + try: + trk = trunk.split('|')[1] + except IndexError: + # No trunk is found in the DB or the trunk table format is incorrect. + continue + teamdctl_cmd = TEAMD_CTL_CMD.format(trk) + teamdctl_result = subprocess.run( + teamdctl_cmd, shell=True, capture_output=True, text=True) + if teamdctl_result.returncode == 0: + filepath = os.path.join(artifact_dir_host, f'teamdctl_{trk}.txt') + try: + with open(filepath, 'w') as f: + f.write(teamdctl_result.stdout) + # If the filepath is invalid, then just return silently. If the + # filepath is valid, the file will be created. + except FileNotFoundError: + return + else: + logger.warning( + f"Error running teamdctl for {trk}: {teamdctl_result.stderr}") + except subprocess.CalledProcessError as e: + logger.warning(f"Error running Redis command: {e}") + + @staticmethod + def _save_persistent_storage(artifact_name: str) -> None: + if os.path.isfile(NONVOLATILE_TMP_FLAG): + logger.warning( + "%s already exists, skipping saving artifacts to " + "persistent storage", NONVOLATILE_TMP_FLAG) + return + try: + with open(NONVOLATILE_TMP_FLAG, "w+"): + pass + except OSError as e: + logger.warning("error creating flag in tmp: %s. Error: %s", + NONVOLATILE_TMP_FLAG, str(e)) + return + + host_artifact_name = ARTIFACT_DIR + "/" + artifact_name + shutil.rmtree(NONVOLATILE_ARTIFACT_DIR, ignore_errors=True) + try: + artifact_size = os.path.getsize(host_artifact_name) + except OSError: + logger.warning("path %s did not exist", host_artifact_name) + return + + _, _, free = shutil.disk_usage(NONVOLATILE_PARTITION) + if free < NONVOLATILE_STORAGE_REQUIRED + artifact_size: + logger.warning( + "free space remaining on %s is less than %d: %d. Not saving " + "artifacts to persistent storage", NONVOLATILE_PARTITION, + NONVOLATILE_STORAGE_REQUIRED + artifact_size, free) + return + + os.makedirs(NONVOLATILE_ARTIFACT_DIR, exist_ok=True) + + cmd = ( + f"cp {host_artifact_name} {NONVOLATILE_ARTIFACT_DIR}/{artifact_name}") + + rc, _, err = DebugInfo._run_command(cmd) + if rc != 0: + # Report success overall if saving to persistent storage fails, saving + # to persistent storage is best-effort. + logger.warning("fail to execute command '%s': %s", cmd, err) + + @staticmethod + def collect_artifacts(req: str, timestamp: str, board_type: str, + hostname: str): + """Collect all artifacts for a given board type. + + Currently only host-level and DB artifcats are collected. + Component-level (e.g., gnmi/orch) artifact collection is not supported + + This method can also be called by the CLI. + + Args: + req: = string, a single JSON string that contains the log level, + and optional with persistent_storage flag to indicate if the artifacst should + be stored in persistent storage, in addition to volatile storage + timestamp: = string, a timestamp string that is used in the artifact name. + board_type: = string, a string representation of the board type. + hostname: = string, the hostname of the device, used to name the output + directory. + + Returns: + string: a return code and a return string to indicate the output artifact + in the host. + """ + try: + request = json.loads(req) + except json.JSONDecodeError: + return 1, "invalid input: " + req + log_level = request.get(LOG_LEVEL_KEY, ARTIFACT_LEVEL_ALERT) + use_persistent_storage = request.get( + PERSISTENT_STORAGE_KEY) if PERSISTENT_STORAGE_KEY in request else False + + dir_name = hostname + "_" + timestamp + artifact_dir_host = os.path.join(ARTIFACT_DIR, dir_name, ARTIFACT_DIR_HOST) + db_artifact_dir = os.path.join(ARTIFACT_DIR, dir_name, DB_ARTIFACT_DIR) + + os.makedirs(artifact_dir_host, exist_ok=True) + + # Collect counter artifacts at the beginning of the collection. + if log_level == ARTIFACT_LEVEL_CRITICAL or log_level == ARTIFACT_LEVEL_ALL: + DebugInfo._collect_counter_artifacts(artifact_dir_host, "pre_", + board_type) + + for cmd in COMMON_CMDS: + rc, _, err = DebugInfo._run_command(cmd.format(artifact_dir_host)) + if rc != 0: + # Continue the artifact collection in case of error. + logger.warning("fail to execute command '%s': %s", cmd, err) + + # create host/core dir if it does not exist + os.makedirs(artifact_dir_host + "/" + CORE_DIR, exist_ok=True) + + DebugInfo._collect_teamdctl_data(artifact_dir_host) + + if log_level == ARTIFACT_LEVEL_CRITICAL or log_level == ARTIFACT_LEVEL_ALL: + os.makedirs(db_artifact_dir, exist_ok=True) + for cmd in DB_CMDS: + rc, _, err = DebugInfo._run_command(cmd.format(db_artifact_dir), timeout=60) + if rc != 0: + # Continue the artifact collection in case of error. + logger.warning("fail to execute command '%s': %s", cmd, err) + + # Collect counter artifacts at the end of the collection. + if log_level == ARTIFACT_LEVEL_CRITICAL or log_level == ARTIFACT_LEVEL_ALL: + DebugInfo._collect_counter_artifacts(artifact_dir_host, "post_", + board_type) + + artifact_name = dir_name + ".tar.gz" + host_artifact_name = ARTIFACT_DIR + "/" + artifact_name + + cmd = ("tar -C " + ARTIFACT_DIR + " -zcvf " + host_artifact_name + " " + + dir_name) + + rc, _, err = DebugInfo._run_command(cmd, timeout=60) + shutil.rmtree(os.path.join(ARTIFACT_DIR, dir_name), ignore_errors=True) + if rc != 0: + return rc, "fail to execute command '" + cmd + "': " + err + + if use_persistent_storage: + DebugInfo._save_persistent_storage(artifact_name) + + return 0, host_artifact_name + + @host_service.method( + host_service.bus_name(MOD_NAME), in_signature="as", out_signature="is") + def collect(self, options): + """DBus entrypoint to collect debug artifacts from host""" + # Converts single string input into a one-element list. + if isinstance(options, str): + options = [options] + try: + json.loads(options[0]) + except json.JSONDecodeError: + return 1, "invalid input: " + options[0] + + if not self._board_type: + self._board_type = self.get_board_type() + if self._hostname == "switch": + self._hostname = self.get_hostname() + + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S%f") + try: + rc, artifact_path = self.collect_artifacts(options[0], timestamp, self._board_type, self._hostname) + except Exception as error: + return 1, "Artifact collection failed: " + str( + error) + if rc != 0: + return rc, artifact_path + return 0, artifact_path + + @host_service.method( + host_service.bus_name(MOD_NAME), in_signature="as", out_signature="is") + def check(self, options): + """Always ready because artifact collection is synchronous.""" + return 0, "Artifact ready" + + @host_service.method( + host_service.bus_name(MOD_NAME), in_signature="as", out_signature="is") + def ack(self, options): + # The artifact name in container has a different prefix. Convert it to the + # host. + if isinstance(options, str): + options = [options] + artifact = ARTIFACT_DIR + options[0].removeprefix(ARTIFACT_DIR) + try: + os.remove(artifact) + except FileNotFoundError: + return 1, "Artifact file not found: " + str(artifact) + except PermissionError: + return 1, "Artifact file permission denied: " + str(artifact) + except OSError as error: + return 1, "Failed to delete artifact file with error: " + str(error) + return 0, "" + +def register(): + """Return class name.""" + return DebugInfo, MOD_NAME diff --git a/scripts/sonic-host-server b/scripts/sonic-host-server index 60dc7608..9878faff 100755 --- a/scripts/sonic-host-server +++ b/scripts/sonic-host-server @@ -12,20 +12,7 @@ import dbus.service import dbus.mainloop.glib from gi.repository import GObject -from host_modules import ( - config_engine, - gcu, - host_service, - showtech, - systemd_service, - file_service, - image_service, - docker_service, - reboot, - debug_service, - gnoi_reset -) - +from host_modules import config_engine, debug_info, debug_service, docker_service, file_service, gcu, gnoi_reset, host_service, image_service, reboot, showtech, systemd_service def register_dbus(): """Register DBus handlers for individual modules""" @@ -40,7 +27,8 @@ def register_dbus(): 'docker_service': docker_service.DockerService('docker_service'), 'file_stat': file_service.FileService('file'), 'debug_service': debug_service.DebugExecutor('DebugExecutor'), - 'gnoi_reset': gnoi_reset.GnoiReset('gnoi_reset') + 'gnoi_reset': gnoi_reset.GnoiReset('gnoi_reset'), + 'debug_info': debug_info.DebugInfo('debug_info') } for mod_name, handler_class in mod_dict.items(): handlers[mod_name] = handler_class diff --git a/tests/debug_info_test.py b/tests/debug_info_test.py new file mode 100644 index 00000000..c3c4ecee --- /dev/null +++ b/tests/debug_info_test.py @@ -0,0 +1,228 @@ +"""Tests for debug_info.""" + +import imp +import itertools +import json +import logging +import os +import shutil +import sys +import tempfile + +if sys.version_info >= (3, 3): + from unittest import mock +else: + import mock + +test_path = os.path.dirname(os.path.abspath(__file__)) +sonic_host_service_path = os.path.dirname(test_path) +host_modules_path = os.path.join(sonic_host_service_path, "host_modules") +sys.path.insert(0, sonic_host_service_path) + +imp.load_source("host_service", host_modules_path + "/host_service.py") +imp.load_source("debug_info", host_modules_path + "/debug_info.py") + +from debug_info import * + +class TestDebugInfo: + @classmethod + def setup_class(cls): + with mock.patch("debug_info.super"): + cls.debug_info_module = DebugInfo(MOD_NAME) + + def setup_method(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.base_path = self.tmpdir.name + + def teardown_method(self): + self.tmpdir.cleanup() + + def test_run_command_success(self): + with mock.patch("debug_info.subprocess.Popen") as mp: + proc = mock.Mock() + proc.communicate.return_value = ("out", "err") + proc.returncode = 0 + mp.return_value = proc + rc, out, err = self.debug_info_module._run_command("echo test") + assert rc == 0 + assert out == "out" + assert err == "err" + + def test_run_command_timeout(self): + with mock.patch("debug_info.subprocess.Popen") as mp: + proc = mock.Mock() + proc.communicate.side_effect = subprocess.TimeoutExpired(cmd="sleep 5", timeout=1) + proc.kill = mock.Mock() + mp.return_value = proc + rc, out, err = self.debug_info_module._run_command("sleep 5", timeout =1) + assert rc == 1 + assert "timeout" in out + + def test_get_board_type_failure(self): + with mock.patch("debug_info.DebugInfo._run_command", return_value=(1, "", "err")) as mock_run, \ + mock.patch("debug_info.logger.warning") as mock_warn: + bt = self.debug_info_module.get_board_type() + mock_run.assert_called_once_with(BOARD_TYPE_CMD) + mock_warn.assert_called_with("fail to execute command '%s': %s", BOARD_TYPE_CMD, "err") + assert bt == "" + + def test_get_board_type_success(self): + with mock.patch("debug_info.DebugInfo._run_command", return_value=(0, "x86_64-kvm_x86_64-r0", "")): + bt = self.debug_info_module.get_board_type() + assert bt == "x86_64-kvm_x86_64-r0" + + def test_get_hostname_failure(self): + with mock.patch("debug_info.DebugInfo._run_command", return_value=(1, "", "err")): + hn = self.debug_info_module.get_hostname() + assert hn == "switch" + + def test_get_hostname_success(self): + with mock.patch("debug_info.DebugInfo._run_command", return_value=(0, "sonic-host", "")): + hn = self.debug_info_module.get_hostname() + assert hn == "sonic-host" + + def test_collect_invalid_json(self): + rc, msg = self.debug_info_module.collect("not a json") + assert rc == 1 + assert "invalid input" in msg + + def test_collect_success(self): + rc, msg = self.debug_info_module.collect("{}") + assert rc == 0 + assert msg.startswith(ARTIFACT_DIR) + + def test_collect_artifact_throw_exception(self): + with mock.patch("debug_info.DebugInfo.collect_artifacts", side_effect=Exception("bad")): + rc, msg = self.debug_info_module.collect("[]") + assert rc == 1 + assert "Artifact collection failed" in msg + + def test_check_always_ready(self): + rc, msg = self.debug_info_module.check("anything") + assert rc == 0 + assert "ready" in msg.lower() + + def test_ack_failure(self): + with mock.patch("debug_info.os.remove", side_effect=OSError("fail")): + rc, msg = self.debug_info_module.ack("/tmp/foo") + assert rc == 1 + assert "Failed to delete" in msg + + def test_register(self): + cls, modname = register() + assert cls is DebugInfo + assert modname == "debug_info" + + def test_collect_counter_artifacts_with_errors(self, tmp_path): + fake_dir = tmp_path + with mock.patch("debug_info.COUNTER_CMDS", ["badcmd {0}"]), \ + mock.patch("debug_info.DebugInfo._run_command", side_effect=[(1, "", "err"), (1, "", "err")]): + DebugInfo._collect_counter_artifacts(str(fake_dir), "ut_", "x86_64") + + def test_save_persistent_storage_flag_exists(self): + fake_flag = os.path.join(self.base_path, "nv.tmp") + with open(fake_flag, "w") as f: + f.write("") + with mock.patch("debug_info.NONVOLATILE_TMP_FLAG", fake_flag): + DebugInfo._save_persistent_storage("artifact") + assert os.path.exists(fake_flag) + + def test_save_persistent_oserror_creating_flag(self): + bad_flag = os.path.join(self.base_path, "bad", "nv.tmp") + with mock.patch("debug_info.NONVOLATILE_TMP_FLAG", bad_flag): + DebugInfo._save_persistent_storage("artifact") + + def test_save_persistent_path_not_exists(self): + fake_flag = os.path.join(self.base_path, "nv.tmp") + with mock.patch("debug_info.NONVOLATILE_TMP_FLAG", fake_flag), \ + mock.patch("debug_info.ARTIFACT_DIR", self.base_path), \ + mock.patch("os.path.getsize", side_effect=OSError): + DebugInfo._save_persistent_storage("artifact.tar") + + def test_save_persistent_not_enough_space(self): + fake_flag = os.path.join(self.base_path, "nv.tmp") + artifact = os.path.join(self.base_path, "artifact.tar") + with open(artifact, "w") as f: + f.write("data") + + with mock.patch("debug_info.NONVOLATILE_TMP_FLAG", fake_flag), \ + mock.patch("debug_info.ARTIFACT_DIR", self.base_path), \ + mock.patch("os.path.getsize", return_value=100), \ + mock.patch("shutil.disk_usage", return_value=(0, 0, 0)): + DebugInfo._save_persistent_storage("artifact.tar") + assert os.path.exists(fake_flag) + + def test_save_persistent_storage_success(self): + fake_flag = os.path.join(self.base_path, "nv.tmp") + artifact = os.path.join(self.base_path, "artifact.tar") + with open(artifact, "w") as f: + f.write("data") + + with mock.patch("debug_info.NONVOLATILE_TMP_FLAG", fake_flag), \ + mock.patch("debug_info.ARTIFACT_DIR", self.base_path), \ + mock.patch("os.path.getsize", return_value=100), \ + mock.patch("shutil.disk_usage", return_value=(0, 1000, 1000)), \ + mock.patch("debug_info.DebugInfo._run_command", return_value=(0, "", "")): + DebugInfo._save_persistent_storage("artifact.tar") + assert os.path.exists(fake_flag) + + def test_collect_teamdctl_redis_command_fails(self): + with mock.patch("debug_info.subprocess.run", + side_effect=subprocess.CalledProcessError(1, "redis")): + DebugInfo._collect_teamdctl_data(self.base_path) + + def test_collect_teamdctl_empty_trunks(self): + fake_result = mock.Mock() + fake_result.stdout = "" + with mock.patch("debug_info.subprocess.run", return_value=fake_result): + DebugInfo._collect_teamdctl_data(self.base_path) + assert len(os.listdir(self.base_path)) == 0 + + def test_teamdctl_valid_trunk_success(self): + redis_result = mock.Mock(stdout="id|PortChannel01\n") + teamdctl_result = mock.Mock(returncode=0, stdout="teamdctl output") + + def fake_run(cmd, **kwargs): + if "redis" in cmd: + return redis_result + return teamdctl_result + + with mock.patch("debug_info.subprocess.run", side_effect=fake_run): + DebugInfo._collect_teamdctl_data(self.base_path) + + filepath = os.path.join(self.base_path, "teamdctl_PortChannel01.txt") + assert os.path.exists(filepath) + with open(filepath) as f: + assert "teamdctl output" in f.read() + + def test_teamdctl_valid_trunk_failure(self): + redis_result = mock.Mock(stdout="id|PortChannel02\n") + teamdctl_result = mock.Mock(returncode=1, stderr="some error") + + def fake_run(cmd, **kwargs): + if "redis" in cmd: + return redis_result + return teamdctl_result + + with mock.patch("debug_info.subprocess.run", side_effect=fake_run): + DebugInfo._collect_teamdctl_data(self.base_path) + # File should not exist + filepath = os.path.join(self.base_path, "teamdctl_PortChannel02.txt") + assert not os.path.exists(filepath) + + def test_teamdctl_file_write_failure(self): + redis_result = mock.Mock(stdout="id|PortChannel03\n") + teamdctl_result = mock.Mock(returncode=0, stdout="teamdctl data") + + def fake_open(*args, **kwargs): + raise FileNotFoundError + + def fake_run(cmd, **kwargs): + if "redis" in cmd: + return redis_result + return teamdctl_result + + with mock.patch("debug_info.subprocess.run", side_effect=fake_run), \ + mock.patch("builtins.open", side_effect=fake_open): + DebugInfo._collect_teamdctl_data(self.base_path) + diff --git a/utils/platform_info.py b/utils/platform_info.py new file mode 100644 index 00000000..662a5565 --- /dev/null +++ b/utils/platform_info.py @@ -0,0 +1,76 @@ +import csv +import logging +import os +import yaml + +logger = logging.getLogger(__name__) + +debian_name = 'Debian GNU/Linux' + +debian_version_path = '/etc/os-release' +sonic_version_paths = [ + '/etc/sonic/sonic_version.yml', +] + +# Use a global to cache platform info, which does not change at runtime +_platform = None + +def _read_yaml_file(path): + with open(path, 'r') as f: + try: + data = yaml.safe_load(f) + return data + except yaml.YAMLError as e: + logger.error(f"Error parsing {path}: {e}") + return {} + +def _read_os_version_file(path): + with open(path, 'r') as f: + reader = csv.reader(f, delimiter='=', quotechar='"') + data = dict(reader) + return data + +def _read_platform_info(): + global _platform + if _platform is not None: + return _platform + + platform = { + 'os': None, + 'asic_type': None, + 'is_debian': False, + 'is_sonic': False, + } + + if os.path.isfile(debian_version_path): + debian_data = _read_os_version_file(debian_version_path) + os_name = debian_data.get('NAME') + platform['os'] = os_name + platform['is_debian'] = debian_name in os_name + else: + logger.debug('OS version file not found') + + for path in sonic_version_paths: + if os.path.isfile(path): + sonic_version_data = _read_yaml_file(path) + platform['asic_type'] = sonic_version_data.get('asic_type') + platform['is_sonic'] = True + break + if platform.get('asic_type') is None: + logger.debug('SONiC version file not found') + + logger.info(f'Platform info: {platform}') + # Note: the value of platform is deterministic and simple assignment is threadsafe + _platform = platform + +# Populate the global _platform variable when the module is loaded +_read_platform_info() + +def get_platform_info(): + 'Return information about the platform, including OS name and switch ASIC type' + if _platform is None: + _read_platform_info() + return _platform + +if __name__ == "__main__": + print(get_platform_info()) diff --git a/utils/sonic_debug_cmds.py b/utils/sonic_debug_cmds.py new file mode 100644 index 00000000..b3be872d --- /dev/null +++ b/utils/sonic_debug_cmds.py @@ -0,0 +1,39 @@ +"""SONiC-supported commands for debug_info.py. + +Switches running SONiC support a different set of commands to collect +debug artifacts. This module contains those commands. +""" + +BOARD_TYPE_CMD = "show platform summary | grep Platform | awk '{print $2}'" +REDIS_LIST_PORTCHANNEL_CMD = ('docker exec -i database redis-cli -h localhost ' + '-n 4 --raw KEYS \'*PORTCHANNEL\\|PortChannel*\'') +TEAMD_CTL_CMD = 'docker exec -i teamd teamdctl {} state dump' + +COMMON_CMDS = [ + "cp -r /var/log {}", + "[ -d /var/core ] && cp -r /var/core {}", + "show version > {}/version.txt", + "ip -6 route > {}/routing.txt", + "ip neigh >> {}/routing.txt", + "ip route >> {}/routing.txt", + "netstat -tplnaW | grep telemetry >> {}/routing.txt", + "ip link >> {}/routing.txt", +] + +COUNTER_CMDS = [ + "top -b -n 1 -w 500 > {}/top.txt", + ('docker exec -i database ' + 'redis-dump -H 127.0.0.1 -p 6379 -d 2 -y > {}/counter_db.json'), +] + +DB_CMDS = [ + ('docker exec -i database ' + 'redis-dump -H 127.0.0.1 -p 6379 -d 0 -y > {}/appl_db.json'), + ('docker exec -i database ' + 'redis-dump -H 127.0.0.1 -p 6379 -d 1 -y > {}/asic_db.json'), + ('docker exec -i database ' + 'redis-dump -H 127.0.0.1 -p 6379 -d 4 -y > {}/config_db.json'), + ('docker exec -i database ' + 'redis-cli -n 1 hgetall VIDTORID > {}/vidtorid.txt'), +] +