Skip to content

Commit a2eeb24

Browse files
jeremyederclaude
andcommitted
feat: add report header with repository metadata
Add prominent report header showing repository context and assessment metadata to all report formats (HTML, Markdown, JSON). Changes: - Create AssessmentMetadata model to capture execution context - AgentReady version from package metadata - Assessment timestamp (ISO 8601 and human-readable) - Executed by (username@hostname) - CLI command used - Working directory - Update Assessment model with optional metadata field - Implement metadata collection in Scanner service - Get version from importlib.metadata - Reconstruct command from sys.argv - Capture user and hostname from environment - Update all reporters to display metadata - HTML: Two-column header (repo info + meta info) - Markdown: Prominent header with all metadata fields - JSON: Metadata object at top level - Add comprehensive unit tests (4 new tests, all passing) - All 37 tests passing (34 unit + 3 integration) Acceptance criteria met: ✅ User can identify repository assessed (name, path, branch, commit) ✅ Timestamp shows when assessment was run ✅ Git context visible in all reports ✅ AgentReady version tracked for reproducibility ✅ Execution context captured (user@host, command, cwd) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
1 parent 90b74b8 commit a2eeb24

File tree

9 files changed

+338
-18
lines changed

9 files changed

+338
-18
lines changed

src/agentready/cli/main.py

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,12 @@
66

77
import click
88

9+
try:
10+
from importlib.metadata import version as get_version
11+
except ImportError:
12+
# Python 3.7 compatibility
13+
from importlib_metadata import version as get_version
14+
915
from ..assessors.code_quality import (
1016
CyclomaticComplexityAssessor,
1117
TypeAnnotationsAssessor,
@@ -29,6 +35,19 @@
2935
from .bootstrap import bootstrap
3036
from .demo import demo
3137
from .learn import learn
38+
from .repomix import repomix_generate
39+
40+
41+
def get_agentready_version() -> str:
42+
"""Get AgentReady version from package metadata.
43+
44+
Returns:
45+
Version string (e.g., "1.0.0") or "unknown" if not installed
46+
"""
47+
try:
48+
return get_version("agentready")
49+
except Exception:
50+
return "unknown"
3251

3352

3453
def create_all_assessors():
@@ -152,7 +171,8 @@ def run_assessment(repository_path, verbose, output_dir, config_path):
152171

153172
# Run scan
154173
try:
155-
assessment = scanner.scan(assessors, verbose=verbose)
174+
version = get_agentready_version()
175+
assessment = scanner.scan(assessors, verbose=verbose, version=version)
156176
except Exception as e:
157177
click.echo(f"Error during assessment: {str(e)}", err=True)
158178
if verbose:
@@ -286,11 +306,13 @@ def generate_config():
286306
cli.add_command(bootstrap)
287307
cli.add_command(demo)
288308
cli.add_command(learn)
309+
cli.add_command(repomix_generate)
289310

290311

291312
def show_version():
292313
"""Show version information."""
293-
click.echo("AgentReady Repository Scorer v1.0.0")
314+
version = get_agentready_version()
315+
click.echo(f"AgentReady Repository Scorer v{version}")
294316
click.echo("Research Report: bundled")
295317

296318

src/agentready/models/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,12 @@
66
from agentready.models.config import Config
77
from agentready.models.discovered_skill import DiscoveredSkill
88
from agentready.models.finding import Finding
9+
from agentready.models.metadata import AssessmentMetadata
910
from agentready.models.repository import Repository
1011

1112
__all__ = [
1213
"Assessment",
14+
"AssessmentMetadata",
1315
"Attribute",
1416
"Citation",
1517
"Config",

src/agentready/models/assessment.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from .config import Config
77
from .discovered_skill import DiscoveredSkill
88
from .finding import Finding
9+
from .metadata import AssessmentMetadata
910
from .repository import Repository
1011

1112

@@ -25,6 +26,7 @@ class Assessment:
2526
config: Custom configuration used (if any)
2627
duration_seconds: Time taken for assessment
2728
discovered_skills: Patterns extracted from this assessment (optional)
29+
metadata: Execution context (version, user, command, timestamp)
2830
"""
2931

3032
repository: Repository
@@ -38,6 +40,7 @@ class Assessment:
3840
config: Config | None
3941
duration_seconds: float
4042
discovered_skills: list[DiscoveredSkill] = field(default_factory=list)
43+
metadata: AssessmentMetadata | None = None
4144

4245
VALID_LEVELS = {"Platinum", "Gold", "Silver", "Bronze", "Needs Improvement"}
4346

@@ -70,6 +73,7 @@ def __post_init__(self):
7073
def to_dict(self) -> dict:
7174
"""Convert to dictionary for JSON serialization."""
7275
return {
76+
"metadata": self.metadata.to_dict() if self.metadata else None,
7377
"repository": self.repository.to_dict(),
7478
"timestamp": self.timestamp.isoformat(),
7579
"overall_score": self.overall_score,

src/agentready/models/metadata.py

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
"""Assessment metadata model for execution context and reproducibility."""
2+
3+
import getpass
4+
import os
5+
import socket
6+
from dataclasses import dataclass
7+
from datetime import datetime
8+
9+
10+
@dataclass
11+
class AssessmentMetadata:
12+
"""Metadata about the assessment execution context.
13+
14+
Captures who ran the assessment, when, with what version, and what command.
15+
Critical for reproducibility, debugging, and multi-repository workflows.
16+
17+
Attributes:
18+
agentready_version: Version of AgentReady used (e.g., "1.0.0")
19+
assessment_timestamp: ISO 8601 timestamp of when assessment started
20+
assessment_timestamp_human: Human-readable timestamp (e.g., "November 21, 2025 at 2:11 AM")
21+
executed_by: Username and hostname (e.g., "jeder@macbook")
22+
command: Full CLI command executed (e.g., "agentready assess . --verbose")
23+
working_directory: Absolute path of current working directory when executed
24+
"""
25+
26+
agentready_version: str
27+
assessment_timestamp: str # ISO 8601 format
28+
assessment_timestamp_human: str
29+
executed_by: str
30+
command: str
31+
working_directory: str
32+
33+
def to_dict(self) -> dict:
34+
"""Convert to dictionary for JSON serialization."""
35+
return {
36+
"agentready_version": self.agentready_version,
37+
"assessment_timestamp": self.assessment_timestamp,
38+
"assessment_timestamp_human": self.assessment_timestamp_human,
39+
"executed_by": self.executed_by,
40+
"command": self.command,
41+
"working_directory": self.working_directory,
42+
}
43+
44+
@classmethod
45+
def create(
46+
cls, version: str, timestamp: datetime, command: str
47+
) -> "AssessmentMetadata":
48+
"""Create metadata from execution context.
49+
50+
Args:
51+
version: AgentReady version string
52+
timestamp: Assessment start time
53+
command: CLI command executed
54+
55+
Returns:
56+
AssessmentMetadata instance
57+
"""
58+
# Get username and hostname
59+
try:
60+
username = getpass.getuser()
61+
except Exception:
62+
username = "unknown"
63+
64+
try:
65+
hostname = socket.gethostname().split(".")[0] # Short hostname
66+
except Exception:
67+
hostname = "unknown"
68+
69+
executed_by = f"{username}@{hostname}"
70+
71+
# Format timestamps
72+
iso_timestamp = timestamp.isoformat()
73+
human_timestamp = timestamp.strftime("%B %d, %Y at %-I:%M %p")
74+
75+
# Get current working directory
76+
try:
77+
working_dir = os.getcwd()
78+
except Exception:
79+
working_dir = "unknown"
80+
81+
return cls(
82+
agentready_version=version,
83+
assessment_timestamp=iso_timestamp,
84+
assessment_timestamp_human=human_timestamp,
85+
executed_by=executed_by,
86+
command=command,
87+
working_directory=working_dir,
88+
)

src/agentready/reporters/html.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ def generate(self, assessment: Assessment, output_path: Path) -> Path:
5757
"findings": assessment.findings,
5858
"duration_seconds": assessment.duration_seconds,
5959
"config": assessment.config,
60+
"metadata": assessment.metadata,
6061
# Embed assessment JSON for JavaScript
6162
"assessment_json": json.dumps(assessment.to_dict()),
6263
}

src/agentready/reporters/markdown.py

Lines changed: 24 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -62,17 +62,32 @@ def generate(self, assessment: Assessment, output_path: Path) -> Path:
6262
return output_path
6363

6464
def _generate_header(self, assessment: Assessment) -> str:
65-
"""Generate report header with repository info."""
66-
# Get git remote URL if available, otherwise use repo name
67-
repo_display = assessment.repository.url if assessment.repository.url else assessment.repository.name
68-
69-
return f"""# 🤖 AgentReady Assessment Report
65+
"""Generate report header with repository info and metadata."""
66+
header = "# 🤖 AgentReady Assessment Report\n\n"
67+
68+
# Repository information
69+
header += f"**Repository**: {assessment.repository.name}\n"
70+
header += f"**Path**: `{assessment.repository.path}`\n"
71+
header += f"**Branch**: `{assessment.repository.branch}` | **Commit**: `{assessment.repository.commit_hash[:8]}`\n"
72+
73+
# Assessment metadata (if available)
74+
if assessment.metadata:
75+
header += (
76+
f"**Assessed**: {assessment.metadata.assessment_timestamp_human}\n"
77+
)
78+
header += (
79+
f"**AgentReady Version**: {assessment.metadata.agentready_version}\n"
80+
)
81+
header += f"**Run by**: {assessment.metadata.executed_by}\n"
82+
else:
83+
# Fallback to timestamp if metadata not available
84+
header += (
85+
f"**Assessed**: {assessment.timestamp.strftime('%B %d, %Y at %H:%M')}\n"
86+
)
7087

71-
| Repository | Branch | Commit | Score | Level | Date |
72-
|------------|--------|--------|-------|-------|------|
73-
| **{repo_display}** | {assessment.repository.branch} | `{assessment.repository.commit_hash[:8]}` | **{assessment.overall_score:.1f}/100** | **{assessment.certification_level}** | {assessment.timestamp.strftime('%Y-%m-%d %H:%M')} |
88+
header += "\n---"
7489

75-
---"""
90+
return header
7691

7792
def _generate_summary(self, assessment: Assessment) -> str:
7893
"""Generate summary section with key metrics."""

src/agentready/services/scanner.py

Lines changed: 24 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
"""Scanner service orchestrating the assessment workflow."""
22

3+
import sys
34
import time
45
from datetime import datetime
56
from pathlib import Path
@@ -9,6 +10,7 @@
910
from ..models.assessment import Assessment
1011
from ..models.config import Config
1112
from ..models.finding import Finding
13+
from ..models.metadata import AssessmentMetadata
1214
from ..models.repository import Repository
1315
from .language_detector import LanguageDetector
1416
from .scorer import Scorer
@@ -63,12 +65,20 @@ def _validate_repository(self):
6365
if not (self.repository_path / ".git").exists():
6466
raise ValueError(f"Not a git repository: {self.repository_path}")
6567

66-
def scan(self, assessors: list, verbose: bool = False) -> Assessment:
68+
def scan(
69+
self,
70+
assessors: list,
71+
verbose: bool = False,
72+
version: str = "unknown",
73+
command: str | None = None,
74+
) -> Assessment:
6775
"""Execute full assessment workflow.
6876
6977
Args:
7078
assessors: List of assessor instances to run
7179
verbose: Enable detailed progress logging
80+
version: AgentReady version string
81+
command: CLI command executed (reconstructed from sys.argv if None)
7282
7383
Returns:
7484
Complete Assessment with findings and scores
@@ -81,6 +91,7 @@ def scan(self, assessors: list, verbose: bool = False) -> Assessment:
8191
5. Return Assessment
8292
"""
8393
start_time = time.time()
94+
timestamp = datetime.now()
8495

8596
if verbose:
8697
print(f"Scanning repository: {self.repository_path.name}")
@@ -107,6 +118,15 @@ def scan(self, assessors: list, verbose: bool = False) -> Assessment:
107118

108119
duration = time.time() - start_time
109120

121+
# Create metadata
122+
if command is None:
123+
# Reconstruct command from sys.argv
124+
command = " ".join(sys.argv)
125+
126+
metadata = AssessmentMetadata.create(
127+
version=version, timestamp=timestamp, command=command
128+
)
129+
110130
if verbose:
111131
print(f"\nAssessment complete in {duration:.1f}s")
112132
print(f"Overall Score: {overall_score}/100 ({certification_level})")
@@ -116,7 +136,7 @@ def scan(self, assessors: list, verbose: bool = False) -> Assessment:
116136

117137
return Assessment(
118138
repository=repository,
119-
timestamp=datetime.now(),
139+
timestamp=timestamp,
120140
overall_score=overall_score,
121141
certification_level=certification_level,
122142
attributes_assessed=assessed,
@@ -125,6 +145,7 @@ def scan(self, assessors: list, verbose: bool = False) -> Assessment:
125145
findings=findings,
126146
config=self.config,
127147
duration_seconds=round(duration, 1),
148+
metadata=metadata,
128149
)
129150

130151
def _build_repository_model(self, verbose: bool = False) -> Repository:
@@ -202,7 +223,7 @@ def _execute_assessor(
202223
)
203224
except Exception as e:
204225
if verbose:
205-
print(f"error (applicability check failed)")
226+
print("error (applicability check failed)")
206227
return Finding.error(
207228
assessor.attribute, reason=f"Applicability check failed: {str(e)}"
208229
)

0 commit comments

Comments
 (0)