Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
102 changes: 101 additions & 1 deletion tests/workflows/test_iteration_status_emails_workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,14 @@ def workflow_content(workflow_raw):
return yaml.safe_load(workflow_raw)


@pytest.fixture(scope='module')
def jobs(workflow_content):
"""
Module-scoped fixture for jobs configuration.
"""
return workflow_content.get('jobs', {})
Comment on lines +57 to +62
Copy link

Copilot AI Feb 8, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The new jobs fixture is not referenced anywhere in this module. Either remove it to avoid dead test code, or switch the edge-case tests to accept jobs and use it (matching the pattern in other workflow test modules).

Copilot uses AI. Check for mistakes.


@pytest.fixture(scope='module')
def dashboard_path():
"""
Expand Down Expand Up @@ -292,7 +300,7 @@ def test_setup_documentation_has_secrets_section(self):
assert 'SMTP_USERNAME' in content or 'email' in content.lower()


class TestSecurityBestPractices:
class TestWorkflowSecurity:
"""Tests for security considerations."""

def test_no_hardcoded_credentials(self, workflow_raw):
Expand Down Expand Up @@ -331,5 +339,97 @@ def test_uses_secure_connection(self, workflow_content):
assert with_config['secure'] is True or with_config['secure'] == 'true'


class TestEdgeCases:
"""Test edge cases and error handling in the workflow."""

def test_no_tabs_in_yaml(self, workflow_raw):
"""Test that workflow uses spaces, not tabs."""
assert '\t' not in workflow_raw, "YAML should use spaces, not tabs"

def test_consistent_indentation(self, workflow_raw):
"""Test that indentation is consistent (a multiple of 2 spaces)."""
lines = workflow_raw.split('\n')
for i, line in enumerate(lines, 1):
if line.strip() and not line.strip().startswith('#'):
leading_spaces = len(line) - len(line.lstrip(' '))
if leading_spaces > 0:
assert leading_spaces % 2 == 0, \
f"Line {i} has indentation that is not a multiple of 2 spaces"

def test_no_duplicate_job_names(self, workflow_content):
"""Test that there are no duplicate job names."""
job_names = list(workflow_content['jobs'].keys())
Copy link

Copilot AI Feb 8, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

test_no_duplicate_job_names indexes workflow_content['jobs'], which will raise a KeyError (test error) if the YAML is missing the jobs key. Prefer asserting jobs exists first (or using the jobs fixture / workflow_content.get('jobs')) so the test fails with a clear assertion message instead of erroring.

Suggested change
job_names = list(workflow_content['jobs'].keys())
jobs = workflow_content.get('jobs')
assert jobs is not None, "Workflow must define jobs"
job_names = list(jobs.keys())

Copilot uses AI. Check for mistakes.
assert len(job_names) == len(set(job_names)), "Duplicate job names found"

def test_no_duplicate_step_ids(self, workflow_content):
"""Test that step IDs are unique within each job."""
jobs = workflow_content.get('jobs', {})
for job_name, job_config in jobs.items():
steps = job_config.get('steps', [])
step_ids = [s.get('id') for s in steps if 'id' in s]
assert len(step_ids) == len(set(step_ids)), \
f"Duplicate step IDs in job '{job_name}'"
Comment on lines +364 to +371
Copy link

Copilot AI Feb 8, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

test_no_duplicate_step_ids assumes every step is a dict; if the YAML contains an empty step (None) or a non-mapping value, if 'id' in s will raise. Consider asserting isinstance(step, dict) (or filtering to dicts) before inspecting keys so the test fails cleanly on malformed steps.

Copilot uses AI. Check for mistakes.

def test_no_empty_steps(self, workflow_content):
"""Test that there are no empty steps."""
jobs = workflow_content.get('jobs', {})
for job_name, job_config in jobs.items():
steps = job_config.get('steps', [])
for i, step in enumerate(steps):
assert len(step) > 0, f"Step {i} in job '{job_name}' is empty"
assert 'uses' in step or 'run' in step, \
f"Step {i} in job '{job_name}' missing 'uses' or 'run'"

def test_yaml_is_parseable(self, workflow_content):
"""Test that YAML parses into a non-empty workflow with required keys."""
assert workflow_content, "YAML should parse into a non-empty workflow configuration"
# Note: 'on' can be parsed as True in YAML
required_keys = {"name", "jobs"}
missing = required_keys.difference(workflow_content.keys())
assert not missing, f"Workflow is missing required top-level keys: {', '.join(sorted(missing))}"
# Verify trigger configuration exists (can be 'on' or True)
has_triggers = 'on' in workflow_content or True in workflow_content.keys()
Copy link

Copilot AI Feb 8, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

has_triggers = 'on' in workflow_content or True in workflow_content.keys() can produce false positives because True == 1 in Python (so a numeric key 1 would satisfy it). Use the same approach as the other trigger tests (workflow_content.get('on') or workflow_content.get(True)) or check boolean keys with identity (e.g., any(k is True for k in keys)).

Suggested change
has_triggers = 'on' in workflow_content or True in workflow_content.keys()
has_triggers = (
'on' in workflow_content
or any(k is True for k in workflow_content.keys())
)

Copilot uses AI. Check for mistakes.
assert has_triggers, "Workflow must have trigger configuration ('on' key)"

def test_workflow_handles_missing_dashboard(self, workflow_content):
"""Test that parse step checks for dashboard file and has proper error handling."""
# Verify the parse step checks if dashboard file exists
jobs = workflow_content.get('jobs', {})
parse_job = jobs.get('parse-and-notify', {})
steps = parse_job.get('steps', [])

# Find the parse dashboard step by id or name
parse_step = None
for step in steps:
# Check by id first (more reliable), then by name
if step.get('id') == 'parse':
parse_step = step
break
if 'Parse dashboard' in step.get('name', ''):
parse_step = step
break

assert parse_step is not None, "Workflow should have a dashboard parsing step"

# Verify the parse step script checks for file existence
run_script = parse_step.get('run', '')
assert '! -f' in run_script, \
"Parse step should check if dashboard file exists using '! -f' pattern"
assert 'exit 1' in run_script, \
"Parse step should exit with error code when dashboard file is missing"

# Verify there's an error handling step
error_step = None
for step in steps:
if step.get('if') == 'failure()':
error_step = step
break

assert error_step is not None, "Workflow should have a failure handler step"
error_run = error_step.get('run', '').lower()
assert 'dashboard' in error_run or 'dashboard file' in error_run, \
"Error handler should mention dashboard file issues"


if __name__ == '__main__':
pytest.main([__file__, '-v'])
Loading