Skip to content

Commit e7b96e9

Browse files
committed
[llvm-advisor] add core API endpoints for compilation data
- implement units API for compilation unit lising and details - add file content API with multiple format support - add artifacts API for build output analysis
1 parent af475f3 commit e7b96e9

File tree

5 files changed

+1170
-0
lines changed

5 files changed

+1170
-0
lines changed
Lines changed: 153 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,153 @@
1+
# ===----------------------------------------------------------------------===//
2+
#
3+
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
# See https://llvm.org/LICENSE.txt for license information.
5+
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
#
7+
# ===----------------------------------------------------------------------===//
8+
9+
import os
10+
import sys
11+
from typing import Dict, Any
12+
from pathlib import Path
13+
14+
# Add parent directories to path for imports
15+
current_dir = Path(__file__).parent
16+
tools_dir = current_dir.parent.parent
17+
sys.path.insert(0, str(tools_dir))
18+
19+
from .base import BaseEndpoint, APIResponse
20+
from common.models import FileType
21+
22+
23+
class ArtifactsEndpoint(BaseEndpoint):
24+
"""GET /api/artifacts/{file_type} - Get aggregated data for a file type across all units"""
25+
26+
def handle(self, path_parts: list, query_params: Dict[str, list]) -> Dict[str, Any]:
27+
if len(path_parts) < 3:
28+
return APIResponse.invalid_request("File type required")
29+
30+
file_type_str = path_parts[2]
31+
32+
# Validate file type
33+
try:
34+
file_type = FileType(file_type_str)
35+
except ValueError:
36+
return APIResponse.invalid_request(f"Invalid file type: {file_type_str}")
37+
38+
parsed_data = self.get_parsed_data()
39+
40+
# Aggregate data from all units for this file type
41+
aggregated_data = {
42+
"file_type": file_type.value,
43+
"units": {},
44+
"global_summary": {
45+
"total_files": 0,
46+
"total_errors": 0,
47+
"units_with_type": 0,
48+
},
49+
}
50+
51+
for unit_name, unit_data in parsed_data.items():
52+
if file_type in unit_data:
53+
unit_files = []
54+
error_count = 0
55+
unit_summary_stats = {}
56+
57+
for parsed_file in unit_data[file_type]:
58+
has_error = "error" in parsed_file.metadata
59+
if has_error:
60+
error_count += 1
61+
62+
file_summary = {
63+
"file_name": os.path.basename(parsed_file.file_path),
64+
"file_path": parsed_file.file_path,
65+
"file_size_bytes": parsed_file.metadata.get("file_size", 0),
66+
"has_error": has_error,
67+
"metadata": parsed_file.metadata,
68+
}
69+
70+
# Include relevant summary data based on file type
71+
if (
72+
isinstance(parsed_file.data, dict)
73+
and "summary" in parsed_file.data
74+
):
75+
file_summary["summary"] = parsed_file.data["summary"]
76+
77+
# Aggregate numeric summary stats
78+
for key, value in parsed_file.data["summary"].items():
79+
if isinstance(value, (int, float)):
80+
unit_summary_stats[key] = (
81+
unit_summary_stats.get(key, 0) + value
82+
)
83+
84+
elif isinstance(parsed_file.data, list):
85+
file_summary["item_count"] = len(parsed_file.data)
86+
unit_summary_stats["total_items"] = unit_summary_stats.get(
87+
"total_items", 0
88+
) + len(parsed_file.data)
89+
90+
unit_files.append(file_summary)
91+
92+
aggregated_data["units"][unit_name] = {
93+
"files": unit_files,
94+
"count": len(unit_files),
95+
"errors": error_count,
96+
"summary_stats": unit_summary_stats,
97+
}
98+
99+
aggregated_data["global_summary"]["total_files"] += len(unit_files)
100+
aggregated_data["global_summary"]["total_errors"] += error_count
101+
aggregated_data["global_summary"]["units_with_type"] += 1
102+
103+
# Add file type specific aggregations
104+
if file_type_str == "diagnostics":
105+
aggregated_data["global_summary"]["total_diagnostics"] = sum(
106+
unit["summary_stats"].get("total_diagnostics", 0)
107+
for unit in aggregated_data["units"].values()
108+
)
109+
elif file_type_str == "remarks":
110+
aggregated_data["global_summary"]["total_remarks"] = sum(
111+
unit["summary_stats"].get("total_remarks", 0)
112+
for unit in aggregated_data["units"].values()
113+
)
114+
elif file_type_str in ["time-trace", "runtime-trace"]:
115+
aggregated_data["global_summary"]["total_events"] = sum(
116+
unit["summary_stats"].get("total_events", 0)
117+
for unit in aggregated_data["units"].values()
118+
)
119+
120+
return APIResponse.success(aggregated_data)
121+
122+
123+
class ArtifactTypesEndpoint(BaseEndpoint):
124+
"""GET /api/artifacts - List all available artifact types with counts"""
125+
126+
def handle(self, path_parts: list, query_params: Dict[str, list]) -> Dict[str, Any]:
127+
parsed_data = self.get_parsed_data()
128+
129+
# Count files by type across all units
130+
type_counts = {}
131+
132+
for unit_name, unit_data in parsed_data.items():
133+
for file_type, parsed_files in unit_data.items():
134+
type_name = file_type.value
135+
if type_name not in type_counts:
136+
type_counts[type_name] = {
137+
"total_files": 0,
138+
"total_errors": 0,
139+
"units": [],
140+
}
141+
142+
error_count = sum(1 for f in parsed_files if "error" in f.metadata)
143+
type_counts[type_name]["total_files"] += len(parsed_files)
144+
type_counts[type_name]["total_errors"] += error_count
145+
type_counts[type_name]["units"].append(unit_name)
146+
147+
response_data = {
148+
"supported_types": [ft.value for ft in FileType],
149+
"available_types": type_counts,
150+
"total_types_found": len(type_counts),
151+
}
152+
153+
return APIResponse.success(response_data)

0 commit comments

Comments
 (0)