diff --git a/codegen-examples/examples/codebase_analysis_api/ANALYSIS_VIEW_MOCKUP.md b/codegen-examples/examples/codebase_analysis_api/ANALYSIS_VIEW_MOCKUP.md new file mode 100644 index 000000000..387a31b4f --- /dev/null +++ b/codegen-examples/examples/codebase_analysis_api/ANALYSIS_VIEW_MOCKUP.md @@ -0,0 +1,414 @@ +# Codebase Analysis View Mockup + +This document demonstrates how the analysis results from the Codebase Analysis API would be presented to users in different formats. + +## 1. Overall Statistics + +``` +------ Overall Statistics ------ +Total Files: 324 + - Python: 187 (57.7%) + - JavaScript: 98 (30.2%) + - TypeScript: 39 (12.0%) +Total Lines of Code: 45,892 +Total Classes: 156 +Total Functions: 843 +Total Symbols: 2,187 +Average Cyclomatic Complexity: 4.8 +``` + +## 2. Important Codefiles and Entry Points + +``` +------ Important Codefiles and Entry Points ------ +• src/core/processor.py - class Processor - method process_data (complexity: 12) +• src/app.py - function main (complexity: 8) +• src/server/api.py - class APIServer - method start (complexity: 6) +• src/cli/commands.py - function run_command (complexity: 15) +``` + +## 3. Project Structure + +``` +my-project/ +├── src/ (42 files) +│ ├── core/ (17 files) +│ │ ├── processor.py (3 classes, 17 functions, 488 lines) +│ │ │ ├── class Processor +│ │ │ │ ├── method __init__ (lines 7-26) +│ │ │ │ ├── method process_data (lines 32-78) +│ │ │ │ └── method validate_input (lines 80-95) +│ │ │ └── function initialize_processor (lines 98-120) +│ │ └── transformer.py (2 classes, 8 functions, 256 lines) +│ │ ├── class Transformer +│ │ │ ├── method __init__ (lines 5-18) +│ │ │ └── method transform_output (lines 20-45) +│ │ └── function apply_transformation (lines 48-72) +│ └── utils/ (25 files) +│ └── validator.py (1 class, 5 functions, 142 lines) +│ ├── class Validator +│ │ ├── method __init__ (lines 8-15) +│ │ └── method validate (lines 17-42) +│ └── function is_valid (lines 45-60) +└── tests/ (18 files) + └── test_processor.py (1 class, 6 functions, 189 lines) + └── class TestProcessor + ├── method setUp (lines 10-22) + ├── method test_process_data (lines 24-56) + └── method test_validate_input (lines 58-75) +``` + +## 4. Code Quality Issues + +``` +------ Code Quality Issues ------ + +Unused Imports: 134 (7.1% of 1,876 total imports) +• src/core/processor.py - import os (line 3) +• src/utils/helpers.py - import json (line 5) +• src/api/models.py - import datetime (line 8) +... + +Unused Functions: 45 (5.3% of 843 total functions) +• src/utils/validator.py - function validate_email (line 78) +• src/core/transformer.py - function transform_legacy_format (line 112) +• src/data/converters.py - function convert_to_csv (line 45) +... + +Unused Classes: 12 (7.7% of 156 total classes) +• src/models/legacy.py - class LegacyUserModel (line 56) +• src/utils/formatters.py - class JSONFormatter (line 23) +• src/api/deprecated.py - class OldAPIClient (line 15) +... + +High Complexity Functions: 28 (3.3% of 843 total functions) +• src/core/processor.py - function process_complex_data (complexity: 32, rank: E) +• src/api/handlers.py - function handle_user_request (complexity: 28, rank: D) +• src/utils/parser.py - function parse_config_file (complexity: 24, rank: D) +... + +Duplicate Code Blocks: 18 +• src/models/user.py (lines 45-60) and src/models/account.py (lines 32-47) +• src/utils/formatters.py (lines 78-95) and src/utils/exporters.py (lines 120-137) +... + +Function Call Issues: 72 +• src/api/client.py - function make_request - missing required parameter 'timeout' +• src/core/processor.py - method process_data - incorrect parameter type for 'data' +... + +Parameter Analysis Issues: 11 +• src/api/handlers.py - function process_request - parameter 'user_id' should be int, not str +• src/utils/validator.py - function validate_input - parameter 'options' missing default value +... + +Interface Implementation Issues: 6 +• src/api/providers/aws.py - class AWSProvider - missing required method 'disconnect' +• src/data/storage/sql.py - class SQLStorage - incorrect signature for method 'query' +... +``` + +## 5. Visualization Options + +The API provides several visualization options that can be selected and customized: + +### Call Hierarchy Visualization + +``` +[Call Graph] +process_data +├── validate_input +│ └── is_valid +│ └── check_format +├── transform_data +│ ├── apply_transformation +│ └── normalize_output +└── save_result + ├── format_output + └── write_to_database + └── execute_query +``` + +### Symbol Hierarchy Visualization + +``` +[Symbol Tree] +Classes +├── Processor +│ ├── __init__ +│ ├── process_data +│ └── validate_input +├── Transformer +│ ├── __init__ +│ └── transform_output +└── Validator + ├── __init__ + └── validate + +Functions +├── initialize_processor +├── apply_transformation +└── is_valid +``` + +### Module Dependency Visualization + +``` +[Module Dependencies] +src/core +├── imports from: src/utils, src/data +└── imported by: src/api, src/cli + +src/utils +├── imports from: src/data +└── imported by: src/core, src/api, src/cli + +src/data +├── imports from: none +└── imported by: src/core, src/utils, src/api + +src/api +├── imports from: src/core, src/utils, src/data +└── imported by: src/cli + +src/cli +├── imports from: src/core, src/utils, src/api +└── imported by: none +``` + +### Inheritance Hierarchy Visualization + +``` +[Inheritance Hierarchy] +BaseModel +├── UserModel +│ └── AdminUserModel +├── ProductModel +└── OrderModel + ├── StandardOrderModel + └── SubscriptionOrderModel + +BaseController +├── UserController +├── ProductController +└── OrderController +``` + +## 6. Essential Data Context Preview + +The Essential Data Context Preview provides a high-level dashboard view of the codebase with the most critical information: + +``` +CODEBASE HEALTH DASHBOARD + +Repository: organization/project +Last Updated: 2023-05-15 + +OVERVIEW: +- Files: 324 (Python: 57.7%, JavaScript: 30.2%, TypeScript: 12.0%) +- Lines of Code: 45,892 +- Complexity Score: B (4.8 avg) +- Maintainability Index: 72 (Good) + +CRITICAL ISSUES: +- 3 functions with complexity > 25 (E/F rank) +- 12 unused classes +- 6 interface implementation issues + +ENTRY POINTS: +- src/app.py - function main +- src/core/processor.py - class Processor - method process_data + +TOP DEPENDENCIES: +1. src/core → src/utils (45 imports) +2. src/api → src/core (32 imports) +3. src/cli → src/api (28 imports) +``` + +## 7. Full Specific Issues View + +The Full Specific Issues View provides detailed information about a specific file or component: + +``` +FILE ANALYSIS: src/core/processor.py + +OVERVIEW: +- 488 lines (412 code, 76 comments) +- 3 classes, 17 functions +- Avg. Complexity: 7.2 (Rank: B) +- Maintainability Index: 68 (Rank: B) + +CLASSES: +- Processor + - Methods: 8 + - Complexity: 6.5 avg + - Issues: method process_data has complexity 12 (Rank: C) +- DataHandler + - Methods: 5 + - Complexity: 4.2 avg + - Issues: None +- ConfigManager + - Methods: 4 + - Complexity: 3.8 avg + - Issues: None + +FUNCTIONS: +- initialize_processor + - Complexity: 5 (Rank: A) + - Issues: None + +ISSUES: +- Unused imports: os (line 3) +- High complexity: process_data method (complexity: 12) +- Parameter issue: process_data - parameter 'data' incorrect type + +DEPENDENCIES: +- Imports from: src/utils/validator.py, src/data/storage.py +- Imported by: src/api/handlers.py, src/cli/commands.py + +CALL HIERARCHY: +process_data +├── validate_input +│ └── is_valid +├── transform_data +└── save_result +``` + +## 8. API Response Format + +The API returns JSON responses that can be used to generate any of the views above: + +```json +{ + "overall_statistics": { + "total_files": 324, + "files_by_language": { + "python": 187, + "javascript": 98, + "typescript": 39 + }, + "total_lines_of_code": 45892, + "total_classes": 156, + "total_functions": 843, + "total_symbols": 2187, + "average_cyclomatic_complexity": 4.8 + }, + "important_files": [ + { + "type": "function", + "name": "main", + "filepath": "src/app.py", + "complexity": 8 + }, + { + "type": "class", + "name": "Processor", + "filepath": "src/core/processor.py", + "methods": 8 + } + ], + "project_structure": { + "name": "my-project", + "type": "directory", + "children": [ + { + "name": "src", + "type": "directory", + "children": [ + { + "name": "core", + "type": "directory", + "children": [ + { + "name": "processor.py", + "type": "file", + "language": "python", + "symbols": 20, + "classes": 3, + "functions": 17, + "lines": 488, + "details": [ + { + "name": "Processor", + "type": "class", + "methods": 8, + "attributes": 12, + "line": 15, + "methods_details": [ + { + "name": "__init__", + "line": 16, + "parameters": 3 + }, + { + "name": "process_data", + "line": 32, + "parameters": 4 + } + ] + } + ] + } + ] + } + ] + } + ] + }, + "code_quality_issues": { + "unused_imports": { + "count": 134, + "items": [ + { + "filepath": "src/core/processor.py", + "import": "os", + "line": 3 + } + ] + }, + "unused_functions": { + "count": 45, + "items": [ + { + "filepath": "src/utils/validator.py", + "name": "validate_email", + "line": 78 + } + ] + }, + "unused_classes": { + "count": 12, + "items": [ + { + "filepath": "src/models/legacy.py", + "name": "LegacyUserModel", + "line": 56 + } + ] + }, + "high_complexity_functions": { + "count": 28, + "items": [ + { + "filepath": "src/core/processor.py", + "name": "process_complex_data", + "complexity": 32, + "rank": "E", + "line": 120 + } + ] + } + }, + "visualization_options": [ + "call_graph", + "dependency_graph", + "symbol_tree", + "module_dependency", + "inheritance_hierarchy" + ], + "analysis_time": 12.45 +} +``` + + + diff --git a/codegen-examples/examples/codebase_analysis_api/README.md b/codegen-examples/examples/codebase_analysis_api/README.md new file mode 100644 index 000000000..9823c2eea --- /dev/null +++ b/codegen-examples/examples/codebase_analysis_api/README.md @@ -0,0 +1,60 @@ +# Codebase Analysis API + +This example demonstrates how to create a comprehensive API for codebase analysis using the Codegen SDK. The API provides detailed insights into your codebase structure, quality, and dependencies. + +## Features + +- **Comprehensive Analysis**: Get detailed metrics about your codebase including file statistics, symbol analysis, complexity metrics, and more. +- **Visualization Support**: Generate visual representations of your codebase structure, dependencies, and call graphs. +- **Issue Detection**: Identify potential issues like unused code, circular dependencies, and high complexity functions. +- **Language-Specific Analysis**: Get specialized insights for Python and TypeScript codebases. + +## Usage + +### Running the API Server + +```bash +python api.py +``` + +This will start a FastAPI server on port 8000 by default. + +### API Endpoints + +- `GET /analyze/{repo_url}`: Analyze a GitHub repository +- `POST /analyze/local`: Analyze a local codebase (requires uploading a zip file) +- `GET /visualize/{repo_url}/{visualization_type}`: Generate a visualization of the codebase + +## Example Request + +```bash +curl -X GET "http://localhost:8000/analyze/github.com/username/repo" +``` + +## Example Response + +The API returns a comprehensive JSON response with detailed analysis of the codebase, including: + +- Overall statistics (file count, language breakdown, etc.) +- Important entry points and main files +- Project structure with detailed information +- Code quality issues (unused imports, functions, classes, etc.) +- Visualization options + +## Configuration + +You can configure the API by setting environment variables: + +- `PORT`: The port to run the server on (default: 8000) +- `HOST`: The host to bind to (default: 0.0.0.0) +- `MAX_REPO_SIZE`: Maximum repository size in MB (default: 100) +- `ANALYSIS_TIMEOUT`: Maximum time in seconds for analysis (default: 300) + +## Dependencies + +- Codegen SDK +- FastAPI +- Uvicorn +- NetworkX +- Plotly + diff --git a/codegen-examples/examples/codebase_analysis_api/api.py b/codegen-examples/examples/codebase_analysis_api/api.py new file mode 100644 index 000000000..6f5030d4b --- /dev/null +++ b/codegen-examples/examples/codebase_analysis_api/api.py @@ -0,0 +1,769 @@ +import os +from typing import Dict, Any, List, Optional +from enum import Enum +from fastapi import FastAPI, File, UploadFile, HTTPException, BackgroundTasks, Query +from fastapi.responses import JSONResponse +from pydantic import BaseModel, Field +import uvicorn +import tempfile +import shutil +import zipfile +import networkx as nx +from pathlib import Path +import time +import re +import math +from collections import Counter, defaultdict + +import codegen +from codegen import Codebase +from codegen.sdk.core.statements.for_loop_statement import ForLoopStatement +from codegen.sdk.core.statements.if_block_statement import IfBlockStatement +from codegen.sdk.core.statements.try_catch_statement import TryCatchStatement +from codegen.sdk.core.statements.while_statement import WhileStatement +from codegen.sdk.core.expressions.binary_expression import BinaryExpression +from codegen.sdk.core.expressions.unary_expression import UnaryExpression +from codegen.sdk.core.expressions.comparison_expression import ComparisonExpression + +# Configuration +PORT = int(os.getenv("PORT", "8000")) +HOST = os.getenv("HOST", "0.0.0.0") +MAX_REPO_SIZE = int(os.getenv("MAX_REPO_SIZE", "100")) * 1024 * 1024 # in bytes +ANALYSIS_TIMEOUT = int(os.getenv("ANALYSIS_TIMEOUT", "300")) # in seconds + +app = FastAPI( + title="Codebase Analysis API", + description="A comprehensive API for analyzing codebases using Codegen SDK", + version="1.0.0", +) + +# Models +class VisualizationType(str, Enum): + CALL_GRAPH = "call_graph" + DEPENDENCY_GRAPH = "dependency_graph" + SYMBOL_TREE = "symbol_tree" + MODULE_DEPENDENCY = "module_dependency" + INHERITANCE_HIERARCHY = "inheritance_hierarchy" + +class LanguageType(str, Enum): + PYTHON = "python" + TYPESCRIPT = "typescript" + AUTO = "auto" + +class AnalysisRequest(BaseModel): + repo_path: str + language: LanguageType = LanguageType.AUTO + include_visualizations: bool = False + max_depth: int = Field(default=3, ge=1, le=10) + +class AnalysisResponse(BaseModel): + overall_statistics: Dict[str, Any] + important_files: List[Dict[str, Any]] + project_structure: Dict[str, Any] + code_quality_issues: Dict[str, Any] + visualization_options: List[str] + analysis_time: float + +# Cache for analysis results +analysis_cache = {} + +# Helper functions +def calculate_cyclomatic_complexity(function): + """Calculate cyclomatic complexity for a function""" + + def analyze_statement(statement): + complexity = 0 + + if isinstance(statement, IfBlockStatement): + complexity += 1 + if hasattr(statement, "elif_statements"): + complexity += len(statement.elif_statements) + + elif isinstance(statement, (ForLoopStatement, WhileStatement)): + complexity += 1 + + elif isinstance(statement, TryCatchStatement): + complexity += len(getattr(statement, "except_blocks", [])) + + if hasattr(statement, "condition") and isinstance(statement.condition, str): + complexity += statement.condition.count(" and ") + statement.condition.count(" or ") + + if hasattr(statement, "nested_code_blocks"): + for block in statement.nested_code_blocks: + complexity += analyze_block(block) + + return complexity + + def analyze_block(block): + if not block or not hasattr(block, "statements"): + return 0 + return sum(analyze_statement(stmt) for stmt in block.statements) + + return 1 + analyze_block(function.code_block) if hasattr(function, "code_block") else 1 + +def cc_rank(complexity): + """Convert cyclomatic complexity to a letter grade""" + if complexity < 0: + raise ValueError("Complexity must be a non-negative value") + + ranks = [ + (1, 5, "A"), + (6, 10, "B"), + (11, 20, "C"), + (21, 30, "D"), + (31, 40, "E"), + (41, float("inf"), "F"), + ] + for low, high, rank in ranks: + if low <= complexity <= high: + return rank + return "F" + +def get_operators_and_operands(function): + """Extract operators and operands from a function""" + operators = [] + operands = [] + + for statement in function.code_block.statements: + for call in statement.function_calls: + operators.append(call.name) + for arg in call.args: + operands.append(arg.source) + + if hasattr(statement, "expressions"): + for expr in statement.expressions: + if isinstance(expr, BinaryExpression): + operators.extend([op.source for op in expr.operators]) + operands.extend([elem.source for elem in expr.elements]) + elif isinstance(expr, UnaryExpression): + operators.append(expr.ts_node.type) + operands.append(expr.argument.source) + elif isinstance(expr, ComparisonExpression): + operators.extend([op.source for op in expr.operators]) + operands.extend([elem.source for elem in expr.elements]) + + if hasattr(statement, "expression"): + expr = statement.expression + if isinstance(expr, BinaryExpression): + operators.extend([op.source for op in expr.operators]) + operands.extend([elem.source for elem in expr.elements]) + elif isinstance(expr, UnaryExpression): + operators.append(expr.ts_node.type) + operands.append(expr.argument.source) + elif isinstance(expr, ComparisonExpression): + operators.extend([op.source for op in expr.operators]) + operands.extend([elem.source for elem in expr.elements]) + + return operators, operands + +def calculate_halstead_volume(operators, operands): + """Calculate Halstead volume for a function""" + n1 = len(set(operators)) + n2 = len(set(operands)) + + N1 = len(operators) + N2 = len(operands) + + N = N1 + N2 + n = n1 + n2 + + if n > 0: + volume = N * math.log2(n) + return volume, N1, N2, n1, n2 + return 0, N1, N2, n1, n2 + +def count_lines(source: str): + """Count different types of lines in source code""" + if not source.strip(): + return 0, 0, 0, 0 + + lines = [line.strip() for line in source.splitlines()] + loc = len(lines) + sloc = len([line for line in lines if line]) + + in_multiline = False + comments = 0 + code_lines = [] + + i = 0 + while i < len(lines): + line = lines[i] + code_part = line + if not in_multiline and "#" in line: + comment_start = line.find("#") + if not re.search(r'[\"\\'].*#.*[\"\\']', line[:comment_start]): + code_part = line[:comment_start].strip() + if line[comment_start:].strip(): + comments += 1 + + if ('"""' in line or "'''" in line) and not (line.count('"""') % 2 == 0 or line.count("'''") % 2 == 0): + if in_multiline: + in_multiline = False + comments += 1 + else: + in_multiline = True + comments += 1 + if line.strip().startswith('"""') or line.strip().startswith("'''"): + code_part = "" + elif in_multiline: + comments += 1 + code_part = "" + elif line.strip().startswith("#"): + comments += 1 + code_part = "" + + if code_part.strip(): + code_lines.append(code_part) + + i += 1 + + lloc = 0 + continued_line = False + for line in code_lines: + if continued_line: + if not any(line.rstrip().endswith(c) for c in ("\\", ",", "{", "[", "(")): + continued_line = False + continue + + lloc += len([stmt for stmt in line.split(";") if stmt.strip()]) + + if any(line.rstrip().endswith(c) for c in ("\\", ",", "{", "[", "(")): + continued_line = True + + return loc, lloc, sloc, comments + +def calculate_maintainability_index(halstead_volume: float, cyclomatic_complexity: float, loc: int) -> int: + """Calculate the normalized maintainability index for a given function""" + if loc <= 0: + return 100 + + try: + raw_mi = 171 - 5.2 * math.log(max(1, halstead_volume)) - 0.23 * cyclomatic_complexity - 16.2 * math.log(max(1, loc)) + normalized_mi = max(0, min(100, raw_mi * 100 / 171)) + return int(normalized_mi) + except (ValueError, TypeError): + return 0 + +def get_maintainability_rank(mi_score: int) -> str: + """Convert maintainability index to a letter grade""" + if mi_score >= 85: + return "A" + elif mi_score >= 65: + return "B" + elif mi_score >= 40: + return "C" + elif mi_score >= 20: + return "D" + else: + return "F" + +def find_entry_points(codebase: Codebase) -> List[Dict[str, Any]]: + """Find potential entry points in the codebase""" + entry_points = [] + + # Look for main functions, app initializations, etc. + for func in codebase.functions: + if func.name in ["main", "app", "init", "start", "run"]: + entry_points.append({ + "type": "function", + "name": func.name, + "filepath": func.filepath, + "complexity": calculate_cyclomatic_complexity(func) + }) + + # Look for classes that might be entry points + for cls in codebase.classes: + if any(name in cls.name.lower() for name in ["app", "service", "controller", "main"]): + entry_points.append({ + "type": "class", + "name": cls.name, + "filepath": cls.filepath, + "methods": len(cls.methods) + }) + + # Look for files that might be entry points + for file in codebase.files: + if any(name in file.name.lower() for name in ["main", "app", "index", "server"]): + entry_points.append({ + "type": "file", + "name": file.name, + "filepath": file.filepath, + "symbols": len(file.symbols) + }) + + return entry_points + +def find_unused_imports(codebase: Codebase) -> List[Dict[str, Any]]: + """Find unused imports in the codebase""" + unused_imports = [] + + for file in codebase.files: + for import_stmt in file.import_statements: + for imp in import_stmt.imports: + if not imp.symbol_usages: + unused_imports.append({ + "filepath": file.filepath, + "import": imp.name, + "line": import_stmt.start_point[0] + 1 + }) + + return unused_imports + +def find_unused_functions(codebase: Codebase) -> List[Dict[str, Any]]: + """Find unused functions in the codebase""" + unused_functions = [] + + for func in codebase.functions: + if not func.call_sites and not func.name.startswith("_"): + unused_functions.append({ + "filepath": func.filepath, + "name": func.name, + "line": func.start_point[0] + 1 + }) + + return unused_functions + +def find_unused_classes(codebase: Codebase) -> List[Dict[str, Any]]: + """Find unused classes in the codebase""" + unused_classes = [] + + for cls in codebase.classes: + if not cls.symbol_usages and not cls.name.startswith("_"): + unused_classes.append({ + "filepath": cls.filepath, + "name": cls.name, + "line": cls.start_point[0] + 1 + }) + + return unused_classes + +def find_high_complexity_functions(codebase: Codebase, threshold: int = 10) -> List[Dict[str, Any]]: + """Find functions with high cyclomatic complexity""" + complex_functions = [] + + for func in codebase.functions: + complexity = calculate_cyclomatic_complexity(func) + if complexity > threshold: + complex_functions.append({ + "filepath": func.filepath, + "name": func.name, + "complexity": complexity, + "rank": cc_rank(complexity), + "line": func.start_point[0] + 1 + }) + + # Sort by complexity (highest first) + complex_functions.sort(key=lambda x: x["complexity"], reverse=True) + return complex_functions + +def build_project_tree(codebase: Codebase, max_depth: int = 3) -> Dict[str, Any]: + """Build a hierarchical representation of the project structure""" + root = {"name": Path(codebase.repo_path).name, "type": "directory", "children": []} + + # Group files by directory + directories = defaultdict(list) + for file in codebase.files: + rel_path = Path(file.filepath) + parent_dir = str(rel_path.parent) + directories[parent_dir].append(file) + + # Build directory tree + def add_directory(path, parent, current_depth=0): + if current_depth > max_depth: + return + + dir_name = Path(path).name or path + dir_node = {"name": dir_name, "type": "directory", "children": []} + parent["children"].append(dir_node) + + # Add files in this directory + for file in directories.get(path, []): + file_node = { + "name": file.name, + "type": "file", + "language": file.language, + "symbols": len(file.symbols), + "classes": len(file.classes), + "functions": len(file.functions), + "lines": len(file.source.splitlines()) if file.source else 0 + } + + # Add detailed information about symbols if not too deep + if current_depth < max_depth - 1: + file_node["details"] = [] + + # Add classes + for cls in file.classes: + cls_node = { + "name": cls.name, + "type": "class", + "methods": len(cls.methods), + "attributes": len(cls.properties), + "line": cls.start_point[0] + 1 + } + + # Add methods if not too deep + if current_depth < max_depth - 2: + cls_node["methods_details"] = [] + for method in cls.methods: + method_node = { + "name": method.name, + "line": method.start_point[0] + 1, + "parameters": len(method.parameters) if hasattr(method, "parameters") else 0 + } + cls_node["methods_details"].append(method_node) + + file_node["details"].append(cls_node) + + # Add functions + for func in file.functions: + if not any(cls.methods for cls in file.classes if func in cls.methods): + func_node = { + "name": func.name, + "type": "function", + "line": func.start_point[0] + 1, + "parameters": len(func.parameters) if hasattr(func, "parameters") else 0 + } + file_node["details"].append(func_node) + + dir_node["children"].append(file_node) + + # Add subdirectories + subdirs = set() + for d in directories.keys(): + if d.startswith(path + "/") and d != path: + subdir = d.split("/")[len(path.split("/"))] + subdirs.add(path + "/" + subdir if path else subdir) + + for subdir in sorted(subdirs): + add_directory(subdir, dir_node, current_depth + 1) + + # Start with the root directory + add_directory("", root) + return root + +def build_dependency_graph(codebase: Codebase) -> Dict[str, Any]: + """Build a dependency graph of the codebase""" + graph = nx.DiGraph() + + # Add nodes for each file + for file in codebase.files: + graph.add_node(file.filepath, type="file", name=file.name) + + # Add edges for imports + for file in codebase.files: + for import_stmt in file.import_statements: + for imp in import_stmt.imports: + if imp.symbol_definition and imp.symbol_definition.filepath: + graph.add_edge(file.filepath, imp.symbol_definition.filepath) + + # Convert to a serializable format + nodes = [] + for node in graph.nodes: + nodes.append({ + "id": node, + "name": graph.nodes[node].get("name", Path(node).name), + "type": graph.nodes[node].get("type", "file") + }) + + edges = [] + for source, target in graph.edges: + edges.append({ + "source": source, + "target": target + }) + + return { + "nodes": nodes, + "edges": edges, + "stats": { + "node_count": len(nodes), + "edge_count": len(edges), + "density": nx.density(graph) if len(nodes) > 1 else 0, + "connected_components": nx.number_connected_components(graph.to_undirected()) if len(nodes) > 0 else 0 + } + } + +def build_call_graph(codebase: Codebase) -> Dict[str, Any]: + """Build a call graph of the codebase""" + graph = nx.DiGraph() + + # Add nodes for each function + for func in codebase.functions: + graph.add_node(f"{func.filepath}:{func.name}", type="function", name=func.name, filepath=func.filepath) + + # Add edges for function calls + for func in codebase.functions: + if hasattr(func, "code_block") and func.code_block: + for stmt in func.code_block.statements: + for call in stmt.function_calls: + if call.symbol_definition: + target_func = call.symbol_definition + graph.add_edge( + f"{func.filepath}:{func.name}", + f"{target_func.filepath}:{target_func.name}" + ) + + # Convert to a serializable format + nodes = [] + for node in graph.nodes: + nodes.append({ + "id": node, + "name": graph.nodes[node].get("name", node.split(":")[-1]), + "filepath": graph.nodes[node].get("filepath", node.split(":")[0]), + "type": graph.nodes[node].get("type", "function") + }) + + edges = [] + for source, target in graph.edges: + edges.append({ + "source": source, + "target": target + }) + + return { + "nodes": nodes, + "edges": edges, + "stats": { + "node_count": len(nodes), + "edge_count": len(edges), + "density": nx.density(graph) if len(nodes) > 1 else 0, + "connected_components": nx.number_connected_components(graph.to_undirected()) if len(nodes) > 0 else 0 + } + } + +def analyze_codebase(codebase_path: str, language: LanguageType = LanguageType.AUTO, max_depth: int = 3) -> Dict[str, Any]: + """Analyze a codebase and return comprehensive metrics""" + start_time = time.time() + + # Initialize codebase + codebase = Codebase(codebase_path) + + # Collect overall statistics + file_count = len(codebase.files) + files_by_language = Counter(file.language for file in codebase.files if file.language) + total_lines = sum(len(file.source.splitlines()) if file.source else 0 for file in codebase.files) + class_count = len(codebase.classes) + function_count = len(codebase.functions) + symbol_count = sum(len(file.symbols) for file in codebase.files) + + # Calculate average complexity + complexities = [calculate_cyclomatic_complexity(func) for func in codebase.functions if hasattr(func, "code_block")] + avg_complexity = sum(complexities) / len(complexities) if complexities else 0 + + # Find important files and entry points + entry_points = find_entry_points(codebase) + + # Build project structure tree + project_structure = build_project_tree(codebase, max_depth) + + # Find code quality issues + unused_imports = find_unused_imports(codebase) + unused_functions = find_unused_functions(codebase) + unused_classes = find_unused_classes(codebase) + high_complexity_functions = find_high_complexity_functions(codebase) + + # Build dependency graphs + dependency_graph = build_dependency_graph(codebase) + call_graph = build_call_graph(codebase) + + # Detect circular dependencies + circular_deps = [] + try: + cycles = list(nx.simple_cycles(nx.DiGraph(dependency_graph["edges"]))) + for cycle in cycles: + if len(cycle) > 1: + circular_deps.append({ + "files": cycle, + "length": len(cycle) + }) + except nx.NetworkXNoCycle: + pass + + # Compile results + analysis_time = time.time() - start_time + + return { + "overall_statistics": { + "total_files": file_count, + "files_by_language": dict(files_by_language), + "total_lines_of_code": total_lines, + "total_classes": class_count, + "total_functions": function_count, + "total_symbols": symbol_count, + "average_cyclomatic_complexity": round(avg_complexity, 2) + }, + "important_files": entry_points, + "project_structure": project_structure, + "code_quality_issues": { + "unused_imports": { + "count": len(unused_imports), + "items": unused_imports[:10] # Limit to avoid huge responses + }, + "unused_functions": { + "count": len(unused_functions), + "items": unused_functions[:10] + }, + "unused_classes": { + "count": len(unused_classes), + "items": unused_classes[:10] + }, + "high_complexity_functions": { + "count": len(high_complexity_functions), + "items": high_complexity_functions[:10] + }, + "circular_dependencies": { + "count": len(circular_deps), + "items": circular_deps[:10] + } + }, + "dependency_graph": dependency_graph, + "call_graph": call_graph, + "visualization_options": [v.value for v in VisualizationType], + "analysis_time": round(analysis_time, 2) + } + +@app.get("/") +async def root(): + return {"message": "Welcome to the Codebase Analysis API", "version": "1.0.0"} + +@app.get("/analyze/{repo_url:path}") +async def analyze_repo( + repo_url: str, + language: LanguageType = LanguageType.AUTO, + include_visualizations: bool = False, + max_depth: int = Query(default=3, ge=1, le=10), + background_tasks: BackgroundTasks = None +): + """Analyze a GitHub repository""" + # Check cache + cache_key = f"{repo_url}:{language}:{max_depth}" + if cache_key in analysis_cache: + return analysis_cache[cache_key] + + # Clone repository to temporary directory + with tempfile.TemporaryDirectory() as temp_dir: + try: + # Clone repository + repo_name = repo_url.split("/")[-1] + clone_dir = os.path.join(temp_dir, repo_name) + + # Use subprocess to clone + import subprocess + result = subprocess.run( + ["git", "clone", f"https://github.com/{repo_url}", clone_dir, "--depth", "1"], + capture_output=True, + text=True, + timeout=300 + ) + + if result.returncode != 0: + raise HTTPException(status_code=400, detail=f"Failed to clone repository: {result.stderr}") + + # Analyze codebase + analysis_result = analyze_codebase(clone_dir, language, max_depth) + + # Remove large data if not requested + if not include_visualizations: + if "dependency_graph" in analysis_result: + del analysis_result["dependency_graph"] + if "call_graph" in analysis_result: + del analysis_result["call_graph"] + + # Cache result + analysis_cache[cache_key] = analysis_result + + return analysis_result + + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error analyzing repository: {str(e)}") + +@app.post("/analyze/local") +async def analyze_local_repo( + file: UploadFile = File(...), + language: LanguageType = LanguageType.AUTO, + include_visualizations: bool = False, + max_depth: int = Query(default=3, ge=1, le=10) +): + """Analyze a local codebase (uploaded as a zip file)""" + # Create temporary directory + with tempfile.TemporaryDirectory() as temp_dir: + try: + # Check file size + file_size = 0 + chunk_size = 1024 * 1024 # 1MB + zip_path = os.path.join(temp_dir, "repo.zip") + + with open(zip_path, "wb") as f: + while chunk := await file.read(chunk_size): + file_size += len(chunk) + if file_size > MAX_REPO_SIZE: + raise HTTPException( + status_code=413, + detail=f"File too large. Maximum size is {MAX_REPO_SIZE / (1024 * 1024)}MB" + ) + f.write(chunk) + + # Extract zip file + extract_dir = os.path.join(temp_dir, "extracted") + os.makedirs(extract_dir, exist_ok=True) + + with zipfile.ZipFile(zip_path, "r") as zip_ref: + zip_ref.extractall(extract_dir) + + # Analyze codebase + analysis_result = analyze_codebase(extract_dir, language, max_depth) + + # Remove large data if not requested + if not include_visualizations: + if "dependency_graph" in analysis_result: + del analysis_result["dependency_graph"] + if "call_graph" in analysis_result: + del analysis_result["call_graph"] + + return analysis_result + + except zipfile.BadZipFile: + raise HTTPException(status_code=400, detail="Invalid zip file") + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error analyzing codebase: {str(e)}") + +@app.get("/visualize/{repo_url:path}/{visualization_type}") +async def visualize_repo( + repo_url: str, + visualization_type: VisualizationType, + language: LanguageType = LanguageType.AUTO, + format: str = Query(default="json", regex="^(json|html|svg)$") +): + """Generate a visualization of the codebase""" + # Check cache + cache_key = f"{repo_url}:{language}:3" # Use default depth + + if cache_key not in analysis_cache: + # Analyze repository first + await analyze_repo(repo_url, language, True, 3) + + analysis_result = analysis_cache[cache_key] + + if visualization_type == VisualizationType.DEPENDENCY_GRAPH: + graph_data = analysis_result.get("dependency_graph", {}) + elif visualization_type == VisualizationType.CALL_GRAPH: + graph_data = analysis_result.get("call_graph", {}) + else: + # For other visualization types, we need to generate them + raise HTTPException(status_code=501, detail=f"Visualization type {visualization_type} not implemented yet") + + if format == "json": + return graph_data + else: + # For other formats, we would generate the appropriate visualization + raise HTTPException(status_code=501, detail=f"Format {format} not implemented yet") + +@app.get("/health") +async def health_check(): + """Health check endpoint""" + return {"status": "healthy", "version": "1.0.0"} + +if __name__ == "__main__": + uvicorn.run("api:app", host=HOST, port=PORT, reload=True) + diff --git a/codegen-examples/examples/codebase_analysis_api/client.py b/codegen-examples/examples/codebase_analysis_api/client.py new file mode 100644 index 000000000..cb2c5fc35 --- /dev/null +++ b/codegen-examples/examples/codebase_analysis_api/client.py @@ -0,0 +1,384 @@ +#!/usr/bin/env python3 +""" +Codebase Analysis API Client + +This script demonstrates how to use the Codebase Analysis API to analyze a repository +and display the results in a user-friendly format. +""" + +import argparse +import json +import requests +import sys +from typing import Dict, Any, Optional +from enum import Enum +from rich.console import Console +from rich.tree import Tree +from rich.table import Table +from rich.panel import Panel +from rich.markdown import Markdown +from rich import print as rprint + +# Configuration +API_URL = "http://localhost:8000" + +class VisualizationType(str, Enum): + CALL_GRAPH = "call_graph" + DEPENDENCY_GRAPH = "dependency_graph" + SYMBOL_TREE = "symbol_tree" + MODULE_DEPENDENCY = "module_dependency" + INHERITANCE_HIERARCHY = "inheritance_hierarchy" + +class LanguageType(str, Enum): + PYTHON = "python" + TYPESCRIPT = "typescript" + AUTO = "auto" + +console = Console() + +def analyze_repo(repo_url: str, language: LanguageType = LanguageType.AUTO) -> Dict[str, Any]: + """Analyze a repository using the API""" + url = f"{API_URL}/analyze/{repo_url}" + params = {"language": language} + + with console.status(f"Analyzing repository {repo_url}..."): + response = requests.get(url, params=params) + + if response.status_code != 200: + console.print(f"[bold red]Error:[/bold red] {response.text}") + sys.exit(1) + + return response.json() + +def visualize_repo(repo_url: str, viz_type: VisualizationType, target: Optional[str] = None, language: LanguageType = LanguageType.AUTO) -> Dict[str, Any]: + """Generate visualization for a repository""" + url = f"{API_URL}/visualize/{repo_url}/{viz_type}" + params = {"language": language} + if target: + params["target"] = target + + with console.status(f"Generating {viz_type} visualization..."): + response = requests.get(url, params=params) + + if response.status_code != 200: + console.print(f"[bold red]Error:[/bold red] {response.text}") + sys.exit(1) + + return response.json() + +def display_overall_statistics(stats: Dict[str, Any]) -> None: + """Display overall statistics in a rich table""" + console.print("\n[bold cyan]Overall Statistics[/bold cyan]") + + table = Table(show_header=False) + table.add_column("Metric", style="green") + table.add_column("Value", style="yellow") + + table.add_row("Total Files", str(stats["total_files"])) + + # Display files by language + lang_breakdown = [] + for lang, count in stats["files_by_language"].items(): + percentage = (count / stats["total_files"]) * 100 + lang_breakdown.append(f"{lang.capitalize()}: {count} ({percentage:.1f}%)") + + table.add_row("Files by Language", "\n".join(lang_breakdown)) + table.add_row("Total Lines of Code", f"{stats['total_lines_of_code']:,}") + table.add_row("Total Classes", str(stats["total_classes"])) + table.add_row("Total Functions", str(stats["total_functions"])) + table.add_row("Total Symbols", str(stats["total_symbols"])) + table.add_row("Average Cyclomatic Complexity", str(stats["average_cyclomatic_complexity"])) + + console.print(table) + +def display_important_files(files: list) -> None: + """Display important files in a rich table""" + console.print("\n[bold cyan]Important Files and Entry Points[/bold cyan]") + + table = Table() + table.add_column("Type", style="green") + table.add_column("Name", style="yellow") + table.add_column("Filepath", style="blue") + table.add_column("Details", style="magenta") + + for file in files: + file_type = file["type"] + name = file["name"] + filepath = file["filepath"] + + if file_type == "function": + details = f"Complexity: {file.get('complexity', 'N/A')}" + elif file_type == "class": + details = f"Methods: {file.get('methods', 'N/A')}" + else: + details = f"Symbols: {file.get('symbols', 'N/A')}" + + table.add_row(file_type.capitalize(), name, filepath, details) + + console.print(table) + +def display_project_structure(structure: Dict[str, Any]) -> None: + """Display project structure as a tree""" + console.print("\n[bold cyan]Project Structure[/bold cyan]") + + def add_to_tree(node: Dict[str, Any], tree: Tree) -> None: + if node["type"] == "directory": + for child in node.get("children", []): + if child["type"] == "directory": + branch = tree.add(f"[bold blue]{child['name']}[/bold blue]") + add_to_tree(child, branch) + else: + # File node + file_details = f"[green]{child['name']}[/green]" + if "language" in child: + file_details += f" ([yellow]{child['language']}[/yellow])" + if "classes" in child and "functions" in child: + file_details += f" - {child['classes']} classes, {child['functions']} functions" + if "lines" in child: + file_details += f", {child['lines']} lines" + + file_branch = tree.add(file_details) + + # Add class and function details if available + if "details" in child: + for detail in child["details"]: + if detail["type"] == "class": + class_details = f"[bold magenta]class {detail['name']}[/bold magenta]" + class_details += f" - {detail.get('methods', 0)} methods" + class_branch = file_branch.add(class_details) + + # Add method details + if "methods_details" in detail: + for method in detail["methods_details"]: + method_details = f"[cyan]method {method['name']}[/cyan]" + method_details += f" (line {method['line']}, {method['parameters']} params)" + class_branch.add(method_details) + + elif detail["type"] == "function": + func_details = f"[cyan]function {detail['name']}[/cyan]" + func_details += f" (line {detail['line']}, {detail['parameters']} params)" + file_branch.add(func_details) + + tree = Tree(f"[bold red]{structure['name']}[/bold red]") + add_to_tree(structure, tree) + console.print(tree) + +def display_code_quality_issues(issues: Dict[str, Any]) -> None: + """Display code quality issues""" + console.print("\n[bold cyan]Code Quality Issues[/bold cyan]") + + # Unused imports + unused_imports = issues["unused_imports"] + console.print(f"\n[bold yellow]Unused Imports:[/bold yellow] {unused_imports['count']} items") + + if unused_imports["count"] > 0: + table = Table(show_header=True) + table.add_column("Filepath", style="blue") + table.add_column("Import", style="green") + table.add_column("Line", style="yellow") + + for item in unused_imports["items"][:10]: # Show first 10 + table.add_row(item["filepath"], item["import"], str(item["line"])) + + console.print(table) + if len(unused_imports["items"]) > 10: + console.print(f"[italic]...and {len(unused_imports['items']) - 10} more[/italic]") + + # Unused functions + unused_functions = issues["unused_functions"] + console.print(f"\n[bold yellow]Unused Functions:[/bold yellow] {unused_functions['count']} items") + + if unused_functions["count"] > 0: + table = Table(show_header=True) + table.add_column("Filepath", style="blue") + table.add_column("Function", style="green") + table.add_column("Line", style="yellow") + + for item in unused_functions["items"][:10]: # Show first 10 + table.add_row(item["filepath"], item["name"], str(item["line"])) + + console.print(table) + if len(unused_functions["items"]) > 10: + console.print(f"[italic]...and {len(unused_functions['items']) - 10} more[/italic]") + + # Unused classes + unused_classes = issues["unused_classes"] + console.print(f"\n[bold yellow]Unused Classes:[/bold yellow] {unused_classes['count']} items") + + if unused_classes["count"] > 0: + table = Table(show_header=True) + table.add_column("Filepath", style="blue") + table.add_column("Class", style="green") + table.add_column("Line", style="yellow") + + for item in unused_classes["items"][:10]: # Show first 10 + table.add_row(item["filepath"], item["name"], str(item["line"])) + + console.print(table) + if len(unused_classes["items"]) > 10: + console.print(f"[italic]...and {len(unused_classes['items']) - 10} more[/italic]") + + # High complexity functions + complex_functions = issues["high_complexity_functions"] + console.print(f"\n[bold yellow]High Complexity Functions:[/bold yellow] {complex_functions['count']} items") + + if complex_functions["count"] > 0: + table = Table(show_header=True) + table.add_column("Filepath", style="blue") + table.add_column("Function", style="green") + table.add_column("Complexity", style="red") + table.add_column("Rank", style="yellow") + table.add_column("Line", style="magenta") + + for item in complex_functions["items"][:10]: # Show first 10 + table.add_row( + item["filepath"], + item["name"], + str(item["complexity"]), + item["rank"], + str(item["line"]) + ) + + console.print(table) + if len(complex_functions["items"]) > 10: + console.print(f"[italic]...and {len(complex_functions['items']) - 10} more[/italic]") + +def display_visualization_options(options: list) -> None: + """Display available visualization options""" + console.print("\n[bold cyan]Available Visualizations[/bold cyan]") + + for option in options: + console.print(f"- [green]{option}[/green]") + + console.print("\nTo generate a visualization, use:") + console.print("[yellow]python client.py visualize REPO_URL VISUALIZATION_TYPE [--target TARGET] [--language LANGUAGE][/yellow]") + +def display_visualization(viz_data: Dict[str, Any], viz_type: VisualizationType) -> None: + """Display visualization data""" + console.print(f"\n[bold cyan]{viz_type.value.replace('_', ' ').title()} Visualization[/bold cyan]") + + if "error" in viz_data: + console.print(f"[bold red]Error:[/bold red] {viz_data['error']}") + return + + if viz_type in [VisualizationType.CALL_GRAPH, VisualizationType.DEPENDENCY_GRAPH]: + # Display graph data + console.print(f"\nNodes: {len(viz_data['nodes'])}") + console.print(f"Edges: {len(viz_data['edges'])}") + + # Display sample of nodes + console.print("\n[bold yellow]Sample Nodes:[/bold yellow]") + table = Table(show_header=True) + table.add_column("ID", style="green") + table.add_column("Type", style="blue") + + for node in viz_data["nodes"][:10]: # Show first 10 + table.add_row(node["id"], node.get("type", "unknown")) + + console.print(table) + if len(viz_data["nodes"]) > 10: + console.print(f"[italic]...and {len(viz_data['nodes']) - 10} more nodes[/italic]") + + # Display sample of edges + console.print("\n[bold yellow]Sample Edges:[/bold yellow]") + table = Table(show_header=True) + table.add_column("Source", style="green") + table.add_column("Target", style="blue") + + for edge in viz_data["edges"][:10]: # Show first 10 + table.add_row(edge["source"], edge["target"]) + + console.print(table) + if len(viz_data["edges"]) > 10: + console.print(f"[italic]...and {len(viz_data['edges']) - 10} more edges[/italic]") + + elif viz_type in [VisualizationType.SYMBOL_TREE, VisualizationType.INHERITANCE_HIERARCHY]: + # Display tree data + def add_to_tree(node: Dict[str, Any], tree: Tree) -> None: + if "children" in node: + for child in node["children"]: + child_name = child["name"] + child_type = child.get("type", "unknown") + + if child_type == "class": + branch = tree.add(f"[bold magenta]{child_name}[/bold magenta]") + if "methods" in child: + branch.add(f"[cyan]Methods: {child['methods']}[/cyan]") + if "filepath" in child: + branch.add(f"[blue]Path: {child['filepath']}[/blue]") + elif child_type == "function" or child_type == "method": + branch = tree.add(f"[bold green]{child_name}[/bold green]") + if "parameters" in child: + branch.add(f"[cyan]Parameters: {child['parameters']}[/cyan]") + elif child_type == "category": + branch = tree.add(f"[bold yellow]{child_name}[/bold yellow]") + else: + branch = tree.add(f"[bold]{child_name}[/bold]") + + add_to_tree(child, branch) + + tree = Tree(f"[bold red]{viz_data['name']}[/bold red]") + add_to_tree(viz_data, tree) + console.print(tree) + +def main(): + parser = argparse.ArgumentParser(description="Codebase Analysis API Client") + subparsers = parser.add_subparsers(dest="command", help="Command to run") + + # Analyze command + analyze_parser = subparsers.add_parser("analyze", help="Analyze a repository") + analyze_parser.add_argument("repo_url", help="Repository URL (e.g., github.com/username/repo)") + analyze_parser.add_argument("--language", choices=[e.value for e in LanguageType], default="auto", help="Programming language") + analyze_parser.add_argument("--output", help="Output file for JSON results") + + # Visualize command + visualize_parser = subparsers.add_parser("visualize", help="Generate visualization for a repository") + visualize_parser.add_argument("repo_url", help="Repository URL (e.g., github.com/username/repo)") + visualize_parser.add_argument("viz_type", choices=[e.value for e in VisualizationType], help="Visualization type") + visualize_parser.add_argument("--target", help="Target symbol for visualization (e.g., function name or Class.method)") + visualize_parser.add_argument("--language", choices=[e.value for e in LanguageType], default="auto", help="Programming language") + visualize_parser.add_argument("--output", help="Output file for JSON results") + + args = parser.parse_args() + + if not args.command: + parser.print_help() + sys.exit(1) + + if args.command == "analyze": + # Analyze repository + results = analyze_repo(args.repo_url, args.language) + + # Save results to file if requested + if args.output: + with open(args.output, "w") as f: + json.dump(results, f, indent=2) + console.print(f"[green]Results saved to {args.output}[/green]") + + # Display results + console.print(Panel(f"[bold]Analysis Results for {args.repo_url}[/bold]", style="cyan")) + console.print(f"Analysis completed in [bold green]{results['analysis_time']:.2f}[/bold green] seconds") + + display_overall_statistics(results["overall_statistics"]) + display_important_files(results["important_files"]) + display_project_structure(results["project_structure"]) + display_code_quality_issues(results["code_quality_issues"]) + display_visualization_options(results["visualization_options"]) + + elif args.command == "visualize": + # Generate visualization + viz_data = visualize_repo(args.repo_url, args.viz_type, args.target, args.language) + + # Save results to file if requested + if args.output: + with open(args.output, "w") as f: + json.dump(viz_data, f, indent=2) + console.print(f"[green]Visualization data saved to {args.output}[/green]") + + # Display visualization + console.print(Panel(f"[bold]Visualization for {args.repo_url}[/bold]", style="cyan")) + display_visualization(viz_data, args.viz_type) + +if __name__ == "__main__": + main() + diff --git a/codegen-examples/examples/codebase_analysis_api/pyproject.toml b/codegen-examples/examples/codebase_analysis_api/pyproject.toml new file mode 100644 index 000000000..e08a7b2f1 --- /dev/null +++ b/codegen-examples/examples/codebase_analysis_api/pyproject.toml @@ -0,0 +1,23 @@ +[tool.poetry] +name = "codebase-analysis-api" +version = "0.1.0" +description = "A comprehensive API for analyzing codebases using Codegen SDK" +authors = ["Codegen "] +readme = "README.md" + +[tool.poetry.dependencies] +python = "^3.9" +fastapi = "^0.95.0" +uvicorn = "^0.22.0" +python-multipart = "^0.0.6" +requests = "^2.28.2" +networkx = "^3.0" +plotly = "^5.14.0" +rich = "^13.3.5" +codegen = "^0.1.0" +pydantic = "^2.0.0" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + diff --git a/codegen-examples/examples/codebase_analysis_api/requirements.txt b/codegen-examples/examples/codebase_analysis_api/requirements.txt new file mode 100644 index 000000000..d236fc3ac --- /dev/null +++ b/codegen-examples/examples/codebase_analysis_api/requirements.txt @@ -0,0 +1,7 @@ +fastapi>=0.104.0 +uvicorn>=0.23.2 +networkx>=3.1 +plotly>=5.18.0 +python-multipart>=0.0.6 +codegen>=0.1.0 +