diff --git a/callback_plugins/fail_if_no_hosts.py b/callback_plugins/fail_if_no_hosts.py new file mode 100644 index 0000000..8b60c17 --- /dev/null +++ b/callback_plugins/fail_if_no_hosts.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_NAME = 'fail_if_no_hosts' + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display) + + def v2_playbook_on_stats(self, stats): + if len(stats.processed.keys()) == 0: + sys.exit(10) diff --git a/callback_plugins/validation_json.py b/callback_plugins/validation_json.py new file mode 100644 index 0000000..8290dd9 --- /dev/null +++ b/callback_plugins/validation_json.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import datetime +import json +import time +import os +import os.path + +from functools import partial + +from ansible.module_utils.six.moves import reduce +from ansible.parsing.ajson import AnsibleJSONEncoder +from ansible.plugins.callback import CallbackBase + +DOCUMENTATION = ''' + callback: json + short_description: Ansible screen output as JSON file + version_added: "1.0" + description: + - This callback converts all events into a JSON file + stored in /var/log/validations + type: stdout + requirements: None +''' + +VALIDATIONS_LOG_DIR = "/var/log/validations" + + +def current_time(): + return '%sZ' % datetime.datetime.utcnow().isoformat() + + +def secondsToStr(t): + def rediv(ll, b): + return list(divmod(ll[0], b)) + ll[1:] + + return "%d:%02d:%02d.%03d" % tuple( + reduce(rediv, [[ + t * 1000, + ], 1000, 60, 60])) + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'validation_json' + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display) + self.results = [] + self.simple_results = [] + self.env = {} + self.t0 = None + self.current_time = current_time() + + def _new_play(self, play): + return { + 'play': { + 'host': play.get_name(), + 'validation_id': self.env['playbook_name'], + 'validation_path': self.env['playbook_path'], + 'id': str(play._uuid), + 'duration': { + 'start': current_time() + } + }, + 'tasks': [] + } + + def _new_task(self, task): + return { + 'task': { + 'name': task.get_name(), + 'id': str(task._uuid), + 'duration': { + 'start': current_time() + } + }, + 'hosts': {} + } + + def _val_task(self, task_name): + return { + 'task': { + 'name': task_name, + 'hosts': {} + } + } + + def _val_task_host(self, task_name): + return { + 'task': { + 'name': task_name, + 'hosts': {} + } + } + + def v2_playbook_on_start(self, playbook): + self.t0 = time.time() + pl = playbook._file_name + validation_id = os.path.splitext(os.path.basename(pl))[0] + self.env = { + "playbook_name": validation_id, + "playbook_path": playbook._basedir + } + + def v2_playbook_on_play_start(self, play): + self.results.append(self._new_play(play)) + + def v2_playbook_on_task_start(self, task, is_conditional): + self.results[-1]['tasks'].append(self._new_task(task)) + + def v2_playbook_on_handler_task_start(self, task): + self.results[-1]['tasks'].append(self._new_task(task)) + + def v2_playbook_on_stats(self, stats): + """Display info about playbook statistics""" + + hosts = sorted(stats.processed.keys()) + + summary = {} + for h in hosts: + s = stats.summarize(h) + summary[h] = s + + output = { + 'plays': self.results, + 'stats': summary, + 'validation_output': self.simple_results + } + log_file = "{}/{}_{}_{}.json".format( + VALIDATIONS_LOG_DIR, + (os.getenv('ANSIBLE_UUID') if os.getenv('ANSIBLE_UUID') else + self.results[0].get('play').get('id')), + log_file = "{}/{}_{}_{}.json".format( + VALIDATIONS_LOG_DIR, + self.results[0].get('play').get('id'), + self.env['playbook_name'], + self.current_time) + + with open(log_file, 'wb') as js: + js.write(json.dumps(output, + cls=AnsibleJSONEncoder, + indent=4, + sort_keys=True)) + + def _record_task_result(self, on_info, result, **kwargs): + """This function is used as a partial to add + failed/skipped info in a single method + """ + host = result._host + task = result._task + task_result = result._result.copy() + task_result.update(on_info) + task_result['action'] = task.action + self.results[-1]['tasks'][-1]['hosts'][host.name] = task_result + + if 'failed' in task_result.keys(): + self.simple_results.append(self._val_task(task.name)) + self.simple_results[-1]['task']['status'] = "FAILED" + self.simple_results[-1]['task']['hosts'][host.name] = task_result + if 'warnings' in task_result.keys(): + self.simple_results.append(self._val_task(task.name)) + self.simple_results[-1]['task']['status'] = "WARNING" + self.simple_results[-1]['task']['hosts'][host.name] = task_result + + end_time = current_time() + time_elapsed = secondsToStr(time.time() - self.t0) + self.results[-1]['tasks'][-1]['task']['duration']['end'] = end_time + self.results[-1]['play']['duration']['end'] = end_time + self.results[-1]['play']['duration']['time_elapsed'] = time_elapsed + + def __getattribute__(self, name): + """Return ``_record_task_result`` partial with a dict + containing skipped/failed if necessary + """ + if name not in ('v2_runner_on_ok', 'v2_runner_on_failed', + 'v2_runner_on_unreachable', 'v2_runner_on_skipped'): + return object.__getattribute__(self, name) + + on = name.rsplit('_', 1)[1] + + on_info = {} + if on in ('failed', 'skipped'): + on_info[on] = True + + return partial(self._record_task_result, on_info) diff --git a/callback_plugins/validation_output.py b/callback_plugins/validation_output.py new file mode 100644 index 0000000..072c9a6 --- /dev/null +++ b/callback_plugins/validation_output.py @@ -0,0 +1,207 @@ +#!/usr/bin/env python + +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import pprint + +from ansible import constants as C +from ansible.plugins.callback import CallbackBase + + +FAILURE_TEMPLATE = """\ +Task '{}' failed: +Host: {} +Message: {} +""" + +WARNING_TEMPLATE = """\ +Task '{}' succeeded, but had some warnings: +Host: {} +Warnings: {} +""" + +DEBUG_TEMPLATE = """\ +Task: Debug +Host: {} +{} +""" + + +def indent(text): + '''Indent the given text by four spaces.''' + return ''.join(' {}\n'.format(line) for line in text.splitlines()) + + +# TODO(shadower): test with async settings +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'validation_output' + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display) + + def print_failure_message(self, host_name, task_name, results, + abridged_result): + '''Print a human-readable error info from Ansible result dictionary.''' + + def is_script(results): + return ('rc' in results and 'invocation' in results + and 'script' in results._task_fields['action'] + and '_raw_params' in results._task_fields['args']) + + display_full_results = False + if 'rc' in results and 'cmd' in results: + command = results['cmd'] + # The command can be either a list or a string. + # Concat if it's a list: + if type(command) == list: + command = " ".join(results['cmd']) + message = "Command `{}` exited with code: {}".format( + command, results['rc']) + # There may be an optional message attached to the command. + # Display it: + if 'msg' in results: + message = message + ": " + results['msg'] + elif is_script(results): + script_name = results['invocation']['module_args']['_raw_params'] + message = "Script `{}` exited with code: {}".format( + script_name, results['rc']) + elif 'msg' in results: + message = results['msg'] + else: + message = "Unknown error" + display_full_results = True + + self._display.display( + FAILURE_TEMPLATE.format(task_name, host_name, message), + color=C.COLOR_ERROR) + + stdout = results.get('module_stdout', results.get('stdout', '')) + if stdout: + print('stdout:') + self._display.display(indent(stdout), color=C.COLOR_ERROR) + stderr = results.get('module_stderr', results.get('stderr', '')) + if stderr: + print('stderr:') + self._display.display(indent(stderr), color=C.COLOR_ERROR) + if display_full_results: + print( + "Could not get an error message. Here is the Ansible output:") + pprint.pprint(abridged_result, indent=4) + warnings = results.get('warnings', []) + if warnings: + print("Warnings:") + for warning in warnings: + self._display.display("* %s " % warning, color=C.COLOR_WARN) + print("") + + def v2_playbook_on_play_start(self, play): + pass # No need to notify that a play started + + def v2_playbook_on_task_start(self, task, is_conditional): + pass # No need to notify that a task started + + def v2_runner_on_ok(self, result, **kwargs): + host_name = result._host + task_name = result._task.get_name() + task_fields = result._task_fields + results = result._result # A dict of the module name etc. + self._dump_results(results) + warnings = results.get('warnings', []) + # Print only tasks that produced some warnings: + if warnings: + for warning in warnings: + warn_msg = "{}\n".format(warning) + self._display.display(WARNING_TEMPLATE.format(task_name, + host_name, + warn_msg), + color=C.COLOR_WARN) + + if 'debug' in task_fields['action']: + output = "" + + if 'var' in task_fields['args']: + variable = task_fields['args']['var'] + value = results[variable] + output = "{}: {}".format(variable, str(value)) + elif 'msg' in task_fields['args']: + output = "Message: {}".format( + task_fields['args']['msg']) + + self._display.display(DEBUG_TEMPLATE.format(host_name, output), + color=C.COLOR_OK) + + def v2_runner_on_failed(self, result, **kwargs): + host_name = result._host + task_name = result._task.get_name() + + result_dict = result._result # A dict of the module name etc. + abridged_result = self._dump_results(result_dict) + + if 'results' in result_dict: + # The task is a list of items under `results` + for item in result_dict['results']: + if item.get('failed', False): + self.print_failure_message(host_name, task_name, + item, item) + else: + # The task is a "normal" module invocation + self.print_failure_message(host_name, task_name, result_dict, + abridged_result) + + def v2_runner_on_skipped(self, result, **kwargs): + pass # No need to print skipped tasks + + def v2_runner_on_unreachable(self, result, **kwargs): + host_name = result._host + task_name = result._task.get_name() + results = {'msg': 'The host is unreachable.'} + self.print_failure_message(host_name, task_name, results, results) + + def v2_playbook_on_stats(self, stats): + def failed(host): + _failures = stats.summarize(host).get('failures', 0) > 0 + _unreachable = stats.summarize(host).get('unreachable', 0) > 0 + return (_failures or _unreachable) + + hosts = sorted(stats.processed.keys()) + failed_hosts = [host for host in hosts if failed(host)] + + if hosts: + if failed_hosts: + if len(failed_hosts) == len(hosts): + print("Failure! The validation failed for all hosts:") + for failed_host in failed_hosts: + self._display.display("* %s" % failed_host, + color=C.COLOR_ERROR) + else: + print("Failure! The validation failed for hosts:") + for failed_host in failed_hosts: + self._display.display("* %s" % failed_host, + color=C.COLOR_ERROR) + print("and passed for hosts:") + for host in [h for h in hosts if h not in failed_hosts]: + self._display.display("* %s" % host, + color=C.COLOR_OK) + else: + print("Success! The validation passed for all hosts:") + for host in hosts: + self._display.display("* %s" % host, + color=C.COLOR_OK) + else: + print("Warning! The validation did not run on any host.") diff --git a/validations_libs/ansible.py b/validations_libs/ansible.py index 38b81fc..4b678c9 100644 --- a/validations_libs/ansible.py +++ b/validations_libs/ansible.py @@ -20,12 +20,10 @@ import six import sys import tempfile -import uuid import yaml from six.moves import configparser from validations_libs import constants -from validations_libs import utils LOG = logging.getLogger(__name__ + ".ansible") @@ -40,8 +38,9 @@ class Ansible(object): - def __init__(self): + def __init__(self, uuid=None): self.log = logging.getLogger(__name__ + ".Ansible") + self.uuid = uuid def _playbook_check(self, play, playbook_dir=None): """Check if playbook exist""" @@ -216,6 +215,9 @@ def _ansible_env_var(self, output_callback, ssh_user, workdir, connection, env['ANSIBLE_TRANSPORT'] = connection env['ANSIBLE_CACHE_PLUGIN_TIMEOUT'] = 7200 + if self.uuid: + env['ANSIBLE_UUID'] = self.uuid + if connection == 'local': env['ANSIBLE_PYTHON_INTERPRETER'] = sys.executable @@ -268,7 +270,7 @@ def run(self, playbook, inventory, workdir, playbook_dir=None, gathering_policy='smart', extra_env_variables=None, parallel_run=False, callback_whitelist=None, ansible_cfg=None, - ansible_timeout=30): + ansible_timeout=30, ansible_artifact_path=None): if not playbook_dir: playbook_dir = workdir @@ -284,9 +286,8 @@ def run(self, playbook, inventory, workdir, playbook_dir=None, ) ) - ansible_fact_path = self._creates_ansible_fact_dir() + # ansible_fact_path = self._creates_ansible_fact_dir() extravars = self._get_extra_vars(extra_vars) - callback_whitelist = self._callback_whitelist(callback_whitelist, output_callback) @@ -295,36 +296,34 @@ def run(self, playbook, inventory, workdir, playbook_dir=None, connection, gathering_policy, module_path, key, extra_env_variables, ansible_timeout, callback_whitelist) - - command_path = None - - with utils.TempDirs(dir_path=constants.VALIDATION_RUN_LOG_PATH, - chdir=False,) as ansible_artifact_path: - if 'ANSIBLE_CONFIG' not in env and not ansible_cfg: - ansible_cfg = os.path.join(ansible_artifact_path, 'ansible.cfg') - config = configparser.ConfigParser() - config.add_section('defaults') - config.set('defaults', 'internal_poll_interval', '0.05') - with open(ansible_cfg, 'w') as f: - config.write(f) - env['ANSIBLE_CONFIG'] = ansible_cfg - elif 'ANSIBLE_CONFIG' not in env and ansible_cfg: - env['ANSIBLE_CONFIG'] = ansible_cfg - - r_opts = { - 'private_data_dir': workdir, - 'project_dir': playbook_dir, - 'inventory': self._inventory(inventory, ansible_artifact_path), - 'envvars': self._encode_envvars(env=env), - 'playbook': playbook, - 'verbosity': verbosity, - 'quiet': quiet, - 'extravars': extravars, - 'fact_cache': ansible_fact_path, - 'fact_cache_type': 'jsonfile', - 'artifact_dir': ansible_artifact_path, - 'rotate_artifacts': 256 - } + if not ansible_artifact_path: + ansible_artifact_path = constants.VALIDATION_ANSIBLE_ARTIFACT_PATH + if 'ANSIBLE_CONFIG' not in env and not ansible_cfg: + ansible_cfg = os.path.join(ansible_artifact_path, 'ansible.cfg') + config = configparser.ConfigParser() + config.add_section('defaults') + config.set('defaults', 'internal_poll_interval', '0.05') + with open(ansible_cfg, 'w') as f: + config.write(f) + env['ANSIBLE_CONFIG'] = ansible_cfg + elif 'ANSIBLE_CONFIG' not in env and ansible_cfg: + env['ANSIBLE_CONFIG'] = ansible_cfg + + r_opts = { + 'private_data_dir': workdir, + 'project_dir': playbook_dir, + 'inventory': self._inventory(inventory, ansible_artifact_path), + 'envvars': self._encode_envvars(env=env), + 'playbook': playbook, + 'verbosity': verbosity, + 'quiet': quiet, + 'extravars': extravars, + 'fact_cache': ansible_artifact_path, + 'fact_cache_type': 'jsonfile', + 'artifact_dir': workdir, + 'rotate_artifacts': 256, + 'ident': '' + } if skip_tags: r_opts['skip_tags'] = skip_tags @@ -351,4 +350,4 @@ def run(self, playbook, inventory, workdir, playbook_dir=None, runner = ansible_runner.Runner(config=runner_config) status, rc = runner.run() - return runner.stdout.name, playbook, rc, status + return playbook, rc, status diff --git a/validations_libs/constants.py b/validations_libs/constants.py index be27233..765f61d 100644 --- a/validations_libs/constants.py +++ b/validations_libs/constants.py @@ -23,3 +23,5 @@ 'post'] VALIDATION_RUN_LOG_PATH = '/var/lib/validations/logs' +VALIDATIONS_LOG_BASEDIR = '/var/logs/validations/' +VALIDATION_ANSIBLE_ARTIFACT_PATH = '/var/lib/validations/artifacts/' diff --git a/validations_libs/run.py b/validations_libs/run.py index e7391f8..c175e1d 100644 --- a/validations_libs/run.py +++ b/validations_libs/run.py @@ -15,7 +15,6 @@ import logging import os -import six from validations_libs.ansible import Ansible as v_ansible from validations_libs import constants @@ -32,7 +31,7 @@ def __init__(self): def run_validations(self, playbook=[], inventory='localhost', group=None, extra_vars=None, validations_dir=None, validation_name=None, extra_env_vars=None, - ansible_cfg=None, quiet=True): + ansible_cfg=None, quiet=True, workdir=None): self.log = logging.getLogger(__name__ + ".run_validations") @@ -63,33 +62,35 @@ def run_validations(self, playbook=[], inventory='localhost', raise("Please, use '--group' argument instead of " "'--validation' to run validation(s) by their " "name(s)." - ) + ) else: raise RuntimeError("No validations found") - run_ansible = v_ansible() self.log.debug('Running the validations with Ansible') results = [] - with v_utils.TempDirs(chdir=False) as tmp: - for playbook in playbooks: - stdout_file, _playbook, _rc, _status = run_ansible.run( - workdir=tmp, - playbook=playbook, - playbook_dir=(validations_dir if - validations_dir else - constants.ANSIBLE_VALIDATION_DIR), - parallel_run=True, - inventory=inventory, - output_callback='validation_json', - quiet=quiet, - extra_vars=extra_vars, - extra_env_variables=extra_env_vars, - ansible_cfg=ansible_cfg, - gathering_policy='explicit') + for playbook in playbooks: + validation_uuid, artifacts_dir = v_utils.create_artifacts_dir( + prefix=os.path.basename(playbook)) + run_ansible = v_ansible(validation_uuid) + _playbook, _rc, _status = run_ansible.run( + workdir=artifacts_dir, + playbook=playbook, + playbook_dir=(validations_dir if + validations_dir else + constants.ANSIBLE_VALIDATION_DIR), + parallel_run=True, + inventory=inventory, + output_callback='validation_json', + quiet=quiet, + extra_vars=extra_vars, + extra_env_variables=extra_env_vars, + ansible_cfg=ansible_cfg, + gathering_policy='explicit', + ansible_artifact_path=artifacts_dir) results.append({'validation': { - 'playbook': _playbook, - 'rc_code': _rc, - 'status': _status, - 'stdout_file': stdout_file - }}) + 'playbook': _playbook, + 'rc_code': _rc, + 'status': _status, + 'validation_id': validation_uuid + }}) return results diff --git a/validations_libs/tests/fakes.py b/validations_libs/tests/fakes.py index d9708f9..481306c 100644 --- a/validations_libs/tests/fakes.py +++ b/validations_libs/tests/fakes.py @@ -13,8 +13,6 @@ # under the License. # -from unittest import mock - VALIDATIONS_LIST = [{ 'description': 'My Validation One Description', 'groups': ['prep', 'pre-deployment'], diff --git a/validations_libs/tests/test_ansible.py b/validations_libs/tests/test_ansible.py index 02a4009..89225f4 100644 --- a/validations_libs/tests/test_ansible.py +++ b/validations_libs/tests/test_ansible.py @@ -19,7 +19,6 @@ from ansible_runner import Runner from validations_libs.ansible import Ansible from validations_libs.tests import fakes -from validations_libs import utils class TestAnsible(TestCase): @@ -44,7 +43,7 @@ def test_check_no_playbook(self, mock_dump_artifact, mock_exists): ) mock_exists.assert_called_with('/tmp/non-existing.yaml') - @mock.patch('tempfile.mkdtemp', return_value='/tmp/') + @mock.patch('six.moves.builtins.open') @mock.patch('os.path.exists', return_value=True) @mock.patch('os.makedirs') @mock.patch.object( @@ -55,32 +54,29 @@ def test_check_no_playbook(self, mock_dump_artifact, mock_exists): ) @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner.Runner.stdout', autospec=True, - return_value="/tmp/foo.yaml") - def test_ansible_runner_error(self, mock_stdout, mock_dump_artifact, + @mock.patch('ansible_runner.runner_config.RunnerConfig') + def test_ansible_runner_error(self, mock_config, mock_dump_artifact, mock_run, mock_mkdirs, mock_exists, - mock_mkdtemp): + mock_open): - stdout_file, _playbook, _rc, _status = self.run.run('existing.yaml', - 'localhost,', - '/tmp') + _playbook, _rc, _status = self.run.run('existing.yaml', + 'localhost,', + '/tmp') self.assertEquals((_playbook, _rc, _status), ('existing.yaml', 1, 'failed')) - @mock.patch('tempfile.mkdtemp', return_value='/tmp/') + @mock.patch('six.moves.builtins.open') @mock.patch('os.path.exists', return_value=True) @mock.patch('os.makedirs') @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0) - ) + return_value=fakes.fake_ansible_runner_run_return(rc=0)) @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner.Runner.stdout', autospec=True, - return_value="/tmp/foo.yaml") - def test_run_success_default(self, mock_stdout, mock_dump_artifact, + @mock.patch('ansible_runner.runner_config.RunnerConfig') + def test_run_success_default(self, mock_config, mock_dump_artifact, mock_run, mock_mkdirs, mock_exists, - mock_mkstemp): - stdout_file, _playbook, _rc, _status = self.run.run( + mock_open): + _playbook, _rc, _status = self.run.run( playbook='existing.yaml', inventory='localhost,', workdir='/tmp' @@ -88,21 +84,19 @@ def test_run_success_default(self, mock_stdout, mock_dump_artifact, self.assertEquals((_playbook, _rc, _status), ('existing.yaml', 0, 'successful')) - @mock.patch('tempfile.mkdtemp', return_value='/tmp/') + @mock.patch('six.moves.builtins.open') @mock.patch('os.path.exists', return_value=True) @mock.patch('os.makedirs') @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0) - ) + return_value=fakes.fake_ansible_runner_run_return(rc=0)) @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner.Runner.stdout', autospec=True, - return_value="/tmp/foo.yaml") - def test_run_success_gathering_policy(self, mock_stdout, + @mock.patch('ansible_runner.runner_config.RunnerConfig') + def test_run_success_gathering_policy(self, mock_config, mock_dump_artifact, mock_run, mock_mkdirs, mock_exists, - mock_mkstemp): - stdout_file, _playbook, _rc, _status = self.run.run( + mock_open): + _playbook, _rc, _status = self.run.run( playbook='existing.yaml', inventory='localhost,', workdir='/tmp', @@ -112,21 +106,19 @@ def test_run_success_gathering_policy(self, mock_stdout, self.assertEquals((_playbook, _rc, _status), ('existing.yaml', 0, 'successful')) - @mock.patch('tempfile.mkdtemp', return_value='/tmp/') @mock.patch('os.path.exists', return_value=True) @mock.patch('os.makedirs') @mock.patch.object(Runner, 'run', - return_value=fakes.fake_ansible_runner_run_return(rc=0) - ) + return_value=fakes.fake_ansible_runner_run_return(rc=0)) @mock.patch('ansible_runner.utils.dump_artifact', autospec=True, return_value="/foo/inventory.yaml") - @mock.patch('ansible_runner.runner.Runner.stdout', autospec=True, - return_value="/tmp/foo.yaml") - def test_run_success_local(self, mock_stdout, + @mock.patch('six.moves.builtins.open') + @mock.patch('ansible_runner.runner_config.RunnerConfig') + def test_run_success_local(self, mock_config, mock_open, mock_dump_artifact, mock_run, - mock_mkdirs, mock_exists, - mock_mkstemp): - stdout_file, _playbook, _rc, _status = self.run.run( + mock_mkdirs, mock_exists + ): + _playbook, _rc, _status = self.run.run( playbook='existing.yaml', inventory='localhost,', workdir='/tmp', diff --git a/validations_libs/tests/test_validations_run.py b/validations_libs/tests/test_validations_run.py index 847d24f..02ef9bc 100644 --- a/validations_libs/tests/test_validations_run.py +++ b/validations_libs/tests/test_validations_run.py @@ -27,7 +27,9 @@ def setUp(self): @mock.patch('validations_libs.utils.parse_all_validations_on_disk') @mock.patch('validations_libs.ansible.Ansible.run') - def test_validation_run_success(self, mock_ansible_run, + @mock.patch('validations_libs.utils.create_artifacts_dir', + return_value=('1234', '/tmp/')) + def test_validation_run_success(self, mock_tmp, mock_ansible_run, mock_validation_dir): mock_validation_dir.return_value = [{ 'description': 'My Validation One Description', @@ -35,14 +37,17 @@ def test_validation_run_success(self, mock_ansible_run, 'id': 'foo', 'name': 'My Validition One Name', 'parameters': {}}] - mock_ansible_run.return_value = ('/tmp/validation/stdout.log', - 'foo.yaml', 0, 'successful') + mock_ansible_run.return_value = ('foo.yaml', 0, 'successful') expected_run_return = [ {'validation': {'playbook': 'foo.yaml', 'rc_code': 0, 'status': 'successful', - 'stdout_file': '/tmp/validation/stdout.log'}}] + 'validation_id': '1234'}}, + {'validation': {'playbook': 'foo.yaml', + 'rc_code': 0, + 'status': 'successful', + 'validation_id': '1234'}}] playbook = ['fake.yaml'] inventory = 'tmp/inventory.yaml' @@ -55,7 +60,9 @@ def test_validation_run_success(self, mock_ansible_run, @mock.patch('validations_libs.utils.parse_all_validations_on_disk') @mock.patch('validations_libs.ansible.Ansible.run') - def test_validation_run_failed(self, mock_ansible_run, + @mock.patch('validations_libs.utils.create_artifacts_dir', + return_value=('1234', '/tmp/')) + def test_validation_run_failed(self, mock_tmp, mock_ansible_run, mock_validation_dir): mock_validation_dir.return_value = [{ 'description': 'My Validation One Description', @@ -63,14 +70,17 @@ def test_validation_run_failed(self, mock_ansible_run, 'id': 'foo', 'name': 'My Validition One Name', 'parameters': {}}] - mock_ansible_run.return_value = ('/tmp/validation/stdout.log', - 'foo.yaml', 0, 'failed') + mock_ansible_run.return_value = ('foo.yaml', 0, 'failed') expected_run_return = [ {'validation': {'playbook': 'foo.yaml', 'rc_code': 0, 'status': 'failed', - 'stdout_file': '/tmp/validation/stdout.log'}}] + 'validation_id': '1234'}}, + {'validation': {'playbook': 'foo.yaml', + 'rc_code': 0, + 'status': 'failed', + 'validation_id': '1234'}}] playbook = ['fake.yaml'] inventory = 'tmp/inventory.yaml' diff --git a/validations_libs/utils.py b/validations_libs/utils.py index 9b39dc0..6723c8d 100644 --- a/validations_libs/utils.py +++ b/validations_libs/utils.py @@ -12,8 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. # +import datetime import glob -import json import logging import os import six @@ -22,6 +22,7 @@ import yaml from validations_libs import constants +from uuid import uuid4 RED = "\033[1;31m" GREEN = "\033[0;32m" @@ -125,6 +126,24 @@ def clean(self): LOG.info("Temporary directory [ %s ] cleaned up" % self.dir) +def current_time(): + return '%sZ' % datetime.datetime.utcnow().isoformat() + + +def create_artifacts_dir(dir_path=None, prefix=None): + dir_path = (dir_path if dir_path else + constants.VALIDATION_ANSIBLE_ARTIFACT_PATH) + validation_uuid = str(uuid4()) + log_dir = "{}/{}_{}_{}".format(dir_path, validation_uuid, + (prefix if prefix else ''), current_time()) + try: + os.makedirs(log_dir) + return validation_uuid, log_dir + except OSError: + LOG.exception("Error while creating Ansible artifacts log file." + "Please check the access rights for {}").format(log_dir) + + def parse_all_validations_on_disk(path, groups=None): results = [] validations_abspath = glob.glob("{path}/*.yaml".format(path=path)) @@ -224,11 +243,11 @@ def get_validation_group_name_list(): return results -def get_new_validations_logs_on_disk(): +def get_new_validations_logs_on_disk(validations_logs_dir): """Return a list of new log execution filenames """ files = [] - for root, dirs, filenames in os.walk(constants.VALIDATIONS_LOG_BASEDIR): + for root, dirs, filenames in os.walk(validations_logs_dir): files = [ f for f in filenames if not f.startswith('processed') and os.path.splitext(f)[1] == '.json'