From b3a24515408e4604706b794a9161c9ada0fa8841 Mon Sep 17 00:00:00 2001 From: Imamuzzaki Abu Salam Date: Sat, 22 Mar 2025 06:24:31 +0700 Subject: [PATCH 01/12] Add caching mechanism for repository data --- sources/manager_cache.py | 112 +++++++++++++++++++++++++++++ sources/manager_cache_test.py | 132 ++++++++++++++++++++++++++++++++++ 2 files changed, 244 insertions(+) create mode 100644 sources/manager_cache.py create mode 100644 sources/manager_cache_test.py diff --git a/sources/manager_cache.py b/sources/manager_cache.py new file mode 100644 index 0000000..d7db222 --- /dev/null +++ b/sources/manager_cache.py @@ -0,0 +1,112 @@ +import json +import os +import time +from pathlib import Path +from typing import Dict, Any, Optional + + +class CacheManager: + """Manages caching for GitHub repository data to improve performance. + + This class provides functionality to cache and retrieve repository data, + significantly reducing API calls and processing time for users with many repos. + """ + + CACHE_DIR = '.cache' + CACHE_EXPIRY = 86400 # Cache expiry in seconds (24 hours) + + def __init__(self, user_id: str): + """Initialize the cache manager. + + Args: + user_id: GitHub username or organization name to create user-specific cache + """ + self.user_id = user_id + self.cache_path = Path(self.CACHE_DIR) / f"{user_id}_repo_cache.json" + self._ensure_cache_dir() + + def _ensure_cache_dir(self) -> None: + """Ensure cache directory exists.""" + os.makedirs(self.CACHE_DIR, exist_ok=True) + + def get_cached_data(self, repo_name: str) -> Optional[Dict[str, Any]]: + """Get cached data for a specific repository if it exists and is valid. + + Args: + repo_name: The name of the repository + + Returns: + The cached repository data or None if not cached or expired + """ + if not self.cache_path.exists(): + return None + + try: + with open(self.cache_path, 'r') as f: + cache_data = json.load(f) + + if repo_name not in cache_data: + return None + + repo_cache = cache_data[repo_name] + # Check if cache is expired + if time.time() - repo_cache.get('timestamp', 0) > self.CACHE_EXPIRY: + return None + + return repo_cache.get('data') + except (json.JSONDecodeError, IOError): + # If cache file is corrupted or cannot be read, return None + return None + + def update_cache(self, repo_name: str, data: Dict[str, Any]) -> None: + """Update the cache with new repository data. + + Args: + repo_name: The name of the repository + data: The repository data to cache + """ + cache_data = {} + if self.cache_path.exists(): + try: + with open(self.cache_path, 'r') as f: + cache_data = json.load(f) + except (json.JSONDecodeError, IOError): + # If cache file is corrupted, start with an empty cache + cache_data = {} + + # Update cache with new data + cache_data[repo_name] = { + 'timestamp': time.time(), + 'data': data + } + + with open(self.cache_path, 'w') as f: + json.dump(cache_data, f) + + def clear_cache(self) -> None: + """Clear the entire cache for the user.""" + if self.cache_path.exists(): + os.remove(self.cache_path) + + def get_repo_last_modified(self, repo_name: str) -> Optional[float]: + """Get the last modified timestamp of a cached repository. + + Args: + repo_name: The name of the repository + + Returns: + Timestamp of last modification or None if not cached + """ + if not self.cache_path.exists(): + return None + + try: + with open(self.cache_path, 'r') as f: + cache_data = json.load(f) + + if repo_name not in cache_data: + return None + + return cache_data[repo_name].get('timestamp') + except (json.JSONDecodeError, IOError): + return None diff --git a/sources/manager_cache_test.py b/sources/manager_cache_test.py new file mode 100644 index 0000000..791b64c --- /dev/null +++ b/sources/manager_cache_test.py @@ -0,0 +1,132 @@ +import json +import os +import time +from pathlib import Path + +import pytest + +from sources.manager_cache import CacheManager + + +@pytest.fixture +def cache_manager(): + manager = CacheManager('test_user') + # Ensure clean state for tests + if Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json').exists(): + os.remove(Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json')) + yield manager + # Clean up after tests + if Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json').exists(): + os.remove(Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json')) + + +def test_ensure_cache_dir_creation(cache_manager): + """Test that the cache directory is created.""" + assert Path(CacheManager.CACHE_DIR).exists() + + +def test_get_cached_data_no_cache_file(cache_manager): + """Test getting data when no cache file exists.""" + assert cache_manager.get_cached_data('repo1') is None + + +def test_update_and_get_cache(cache_manager): + """Test updating and retrieving cache.""" + test_data = {'name': 'repo1', 'language': 'Python'} + cache_manager.update_cache('repo1', test_data) + + assert Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json').exists() + assert cache_manager.get_cached_data('repo1') == test_data + + +def test_update_existing_cache(cache_manager): + """Test updating existing cache entry.""" + # Set initial data + initial_data = {'name': 'repo1', 'language': 'Python'} + cache_manager.update_cache('repo1', initial_data) + + # Update with new data + updated_data = {'name': 'repo1', 'language': 'JavaScript'} + cache_manager.update_cache('repo1', updated_data) + + # Verify update worked + assert cache_manager.get_cached_data('repo1') == updated_data + + +def test_multiple_repos_cache(cache_manager): + """Test caching multiple repositories.""" + repo1_data = {'name': 'repo1', 'language': 'Python'} + repo2_data = {'name': 'repo2', 'language': 'JavaScript'} + + cache_manager.update_cache('repo1', repo1_data) + cache_manager.update_cache('repo2', repo2_data) + + assert cache_manager.get_cached_data('repo1') == repo1_data + assert cache_manager.get_cached_data('repo2') == repo2_data + + +def test_clear_cache(cache_manager): + """Test clearing the cache.""" + # Add some data + cache_manager.update_cache('repo1', {'data': 'test'}) + + # Verify it exists + assert cache_manager.get_cached_data('repo1') is not None + + # Clear and verify it's gone + cache_manager.clear_cache() + assert cache_manager.get_cached_data('repo1') is None + assert not Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json').exists() + + +def test_cache_expiry(cache_manager, monkeypatch): + """Test that expired cache entries are not returned.""" + # Add data + cache_manager.update_cache('repo1', {'data': 'test'}) + + # Verify it exists + assert cache_manager.get_cached_data('repo1') is not None + + # Mock time to simulate passage of time beyond expiry + current_time = time.time() + future_time = current_time + CacheManager.CACHE_EXPIRY + 100 + monkeypatch.setattr(time, 'time', lambda: future_time) + + # Verify expired cache is not returned + assert cache_manager.get_cached_data('repo1') is None + + +def test_corrupted_cache_file(cache_manager): + """Test handling of corrupted cache files.""" + # Create a corrupted JSON file + os.makedirs(CacheManager.CACHE_DIR, exist_ok=True) + with open(Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json'), 'w') as f: + f.write('{"not valid JSON"') + + # Should handle gracefully and return None + assert cache_manager.get_cached_data('repo1') is None + + # Should be able to update cache even after corruption + cache_manager.update_cache('repo1', {'data': 'new'}) + assert cache_manager.get_cached_data('repo1') == {'data': 'new'} + + +def test_get_repo_last_modified(cache_manager, monkeypatch): + """Test getting the last modified timestamp.""" + # Mock time for consistent testing + test_time = 1617000000.0 + monkeypatch.setattr(time, 'time', lambda: test_time) + + # Add data + cache_manager.update_cache('repo1', {'data': 'test'}) + + # Check timestamp + assert cache_manager.get_repo_last_modified('repo1') == test_time + + # Non-existent repo + assert cache_manager.get_repo_last_modified('non_existent') is None + + +def test_get_repo_last_modified_no_cache(cache_manager): + """Test getting timestamp when no cache exists.""" + assert cache_manager.get_repo_last_modified('repo1') is None From 16f3d66071d29b206f330126c1790d116116d1cf Mon Sep 17 00:00:00 2001 From: Imamuzzaki Abu Salam Date: Sat, 22 Mar 2025 06:25:28 +0700 Subject: [PATCH 02/12] Add performance benchmarking utilities --- benchmark.py | 126 +++++++++++++++++++++++++++++ sources/benchmarking.py | 151 +++++++++++++++++++++++++++++++++++ sources/benchmarking_test.py | 123 ++++++++++++++++++++++++++++ 3 files changed, 400 insertions(+) create mode 100644 benchmark.py create mode 100644 sources/benchmarking.py create mode 100644 sources/benchmarking_test.py diff --git a/benchmark.py b/benchmark.py new file mode 100644 index 0000000..47460af --- /dev/null +++ b/benchmark.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +""" +Benchmarking script for waka-readme-stats + +This script runs performance benchmarks on various parts of the codebase +to identify bottlenecks and measure improvements. + +Usage: + python benchmark.py --username [--full] + +Options: + --username GitHub username to use for benchmarking + --full Run full benchmark suite (including API calls) + --no-cache Disable caching for benchmarking +""" + +import argparse +import os +import sys +import time +from pathlib import Path + +# Add the parent directory to the path so we can import from sources +parent_dir = Path(__file__).resolve().parent +sys.path.append(str(parent_dir)) + +from sources.benchmarking import BenchmarkTracker, benchmark +from sources.manager_cache import CacheManager + +# Import conditionally to avoid errors if running without full dependencies +try: + from sources.main import main as waka_main +except ImportError: + print("Failed to import main module. Make sure all dependencies are installed.") + sys.exit(1) + + +@benchmark(name="Full Execution", metadata={"type": "full_run"}) +def run_full_benchmark(username, use_cache=True): + """Run a full benchmark of the waka-readme-stats process. + + Args: + username: GitHub username to use for benchmarking + use_cache: Whether to use caching during benchmarking + """ + # Set up environment variables for the test + os.environ["INPUT_GH_TOKEN"] = os.environ.get("GH_TOKEN", "") + os.environ["INPUT_WAKATIME_API_KEY"] = os.environ.get("WAKATIME_API_KEY", "") + os.environ["INPUT_SHOW_TIMEZONE"] = "True" + os.environ["INPUT_SHOW_LANGUAGE"] = "True" + os.environ["INPUT_SHOW_EDITORS"] = "True" + os.environ["INPUT_SHOW_PROJECTS"] = "True" + os.environ["INPUT_SHOW_OS"] = "True" + os.environ["INPUT_SHOW_COMMIT"] = "True" + os.environ["INPUT_SHOW_LANGUAGE_PER_REPO"] = "True" + os.environ["GITHUB_REPOSITORY"] = f"{username}/{username}" + + # Control caching behavior + if not use_cache: + # Clear cache before running + cache_manager = CacheManager(username) + cache_manager.clear_cache() + + # Run the main function + try: + waka_main() + except Exception as e: + print(f"Error running benchmark: {e}") + + +def print_system_info(): + """Print system information for context.""" + import platform + import multiprocessing + + print("System Information:") + print(f" - Python version: {platform.python_version()}") + print(f" - OS: {platform.system()} {platform.release()}") + print(f" - CPU cores: {multiprocessing.cpu_count()}") + print() + + +def main(): + """Main benchmark function.""" + parser = argparse.ArgumentParser(description="Benchmark waka-readme-stats") + parser.add_argument( + "--username", + required=True, + help="GitHub username to use for benchmarking" + ) + parser.add_argument( + "--full", + action="store_true", + help="Run full benchmark suite (including API calls)" + ) + parser.add_argument( + "--no-cache", + action="store_true", + help="Disable caching for benchmarking" + ) + + args = parser.parse_args() + + print("Starting benchmarks for waka-readme-stats...\n") + print_system_info() + + # Run with cache + if not args.no_cache: + print("Running benchmark with caching enabled...") + start_time = time.time() + run_full_benchmark(args.username, use_cache=True) + print(f"Completed in {time.time() - start_time:.2f}s with caching enabled\n") + + # Run without cache for comparison if requested + if args.no_cache: + print("Running benchmark with caching disabled...") + start_time = time.time() + run_full_benchmark(args.username, use_cache=False) + print(f"Completed in {time.time() - start_time:.2f}s with caching disabled\n") + + # Print detailed benchmark results + print(BenchmarkTracker.get_summary()) + + +if __name__ == "__main__": + main() diff --git a/sources/benchmarking.py b/sources/benchmarking.py new file mode 100644 index 0000000..8760099 --- /dev/null +++ b/sources/benchmarking.py @@ -0,0 +1,151 @@ +import time +from functools import wraps +from typing import Dict, Any, Callable, List, Optional, Tuple + + +class BenchmarkResult: + """Contains the result of a performance benchmark.""" + + def __init__(self, name: str, execution_time: float, metadata: Optional[Dict[str, Any]] = None): + """Initialize the benchmark result. + + Args: + name: Name of the benchmarked function or operation + execution_time: Time taken to execute in seconds + metadata: Additional metadata about the benchmark + """ + self.name = name + self.execution_time = execution_time + self.metadata = metadata or {} + + def __str__(self) -> str: + """String representation of the benchmark result.""" + return f"{self.name}: {self.execution_time:.4f}s" + + +class BenchmarkTracker: + """Tracks and manages benchmarks for performance analysis.""" + + _results: List[BenchmarkResult] = [] + + @classmethod + def add_result(cls, result: BenchmarkResult) -> None: + """Add a benchmark result to the tracker. + + Args: + result: The benchmark result to add + """ + cls._results.append(result) + + @classmethod + def get_results(cls) -> List[BenchmarkResult]: + """Get all benchmark results. + + Returns: + List of benchmark results + """ + return cls._results + + @classmethod + def clear_results(cls) -> None: + """Clear all benchmark results.""" + cls._results.clear() + + @classmethod + def get_total_execution_time(cls) -> float: + """Get the total execution time of all benchmarks. + + Returns: + Total execution time in seconds + """ + return sum(result.execution_time for result in cls._results) + + @classmethod + def get_summary(cls) -> str: + """Get a formatted summary of all benchmark results. + + Returns: + Formatted summary string + """ + if not cls._results: + return "No benchmarks recorded." + + summary = "Performance Benchmark Summary:\n" + summary += "=================================\n" + + for result in cls._results: + summary += f"{result}\n" + + # Add metadata if present + if result.metadata: + for key, value in result.metadata.items(): + summary += f" - {key}: {value}\n" + + summary += "=================================\n" + summary += f"Total execution time: {cls.get_total_execution_time():.4f}s\n" + + return summary + + +def benchmark(name: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None) -> Callable: + """Decorator to benchmark a function's execution time. + + Args: + name: Optional name for the benchmark + metadata: Optional metadata about the benchmark + + Returns: + Decorated function + """ + def decorator(func: Callable) -> Callable: + @wraps(func) + def wrapper(*args: Any, **kwargs: Any) -> Any: + benchmark_name = name if name else func.__name__ + start_time = time.time() + result = func(*args, **kwargs) + end_time = time.time() + + execution_time = end_time - start_time + + # Add dynamic metadata if provided + final_metadata = metadata.copy() if metadata else {} + if 'args_count' not in final_metadata: + final_metadata['args_count'] = len(args) + + benchmark_result = BenchmarkResult( + name=benchmark_name, + execution_time=execution_time, + metadata=final_metadata + ) + + BenchmarkTracker.add_result(benchmark_result) + return result + return wrapper + return decorator + + +def benchmark_block(name: str, metadata: Optional[Dict[str, Any]] = None) -> Tuple[Callable, Callable]: + """Context manager for benchmarking a block of code. + + Args: + name: Name for the benchmark + metadata: Optional metadata about the benchmark + + Returns: + Start and end functions for the benchmark + """ + start_time = [0.0] # Use a list to allow modification in nested scope + + def start() -> None: + start_time[0] = time.time() + + def end() -> None: + execution_time = time.time() - start_time[0] + benchmark_result = BenchmarkResult( + name=name, + execution_time=execution_time, + metadata=metadata + ) + BenchmarkTracker.add_result(benchmark_result) + + return start, end diff --git a/sources/benchmarking_test.py b/sources/benchmarking_test.py new file mode 100644 index 0000000..1790191 --- /dev/null +++ b/sources/benchmarking_test.py @@ -0,0 +1,123 @@ +import time +from unittest.mock import patch + +import pytest + +from sources.benchmarking import benchmark, benchmark_block, BenchmarkTracker, BenchmarkResult + + +@pytest.fixture +def clear_benchmark_results(): + """Fixture to clear benchmark results before and after each test.""" + BenchmarkTracker.clear_results() + yield + BenchmarkTracker.clear_results() + + +def test_benchmark_decorator(clear_benchmark_results): + """Test the benchmark decorator functionality.""" + # Define a function to benchmark + @benchmark() + def example_function(sleep_time): + time.sleep(sleep_time) + return "result" + + # Run the function + result = example_function(0.01) + + # Check the function still returns correctly + assert result == "result" + + # Check that the benchmark was recorded + benchmark_results = BenchmarkTracker.get_results() + assert len(benchmark_results) == 1 + assert benchmark_results[0].name == "example_function" + assert benchmark_results[0].execution_time >= 0.01 + assert benchmark_results[0].metadata.get("args_count") == 1 + + +def test_benchmark_with_custom_name(clear_benchmark_results): + """Test benchmark decorator with custom name.""" + @benchmark(name="CustomTest") + def example_function(): + return "result" + + example_function() + + benchmark_results = BenchmarkTracker.get_results() + assert len(benchmark_results) == 1 + assert benchmark_results[0].name == "CustomTest" + + +def test_benchmark_with_metadata(clear_benchmark_results): + """Test benchmark decorator with custom metadata.""" + @benchmark(metadata={"category": "io_operations"}) + def example_function(): + return "result" + + example_function() + + benchmark_results = BenchmarkTracker.get_results() + assert len(benchmark_results) == 1 + assert benchmark_results[0].metadata.get("category") == "io_operations" + assert benchmark_results[0].metadata.get("args_count") == 0 + + +def test_benchmark_block(clear_benchmark_results): + """Test the benchmark_block context manager.""" + start, end = benchmark_block("test_block", {"type": "code_block"}) + + start() + time.sleep(0.01) + end() + + benchmark_results = BenchmarkTracker.get_results() + assert len(benchmark_results) == 1 + assert benchmark_results[0].name == "test_block" + assert benchmark_results[0].execution_time >= 0.01 + assert benchmark_results[0].metadata.get("type") == "code_block" + + +def test_benchmark_tracker_get_total_execution_time(clear_benchmark_results): + """Test getting total execution time from the tracker.""" + BenchmarkTracker.add_result(BenchmarkResult("test1", 1.5)) + BenchmarkTracker.add_result(BenchmarkResult("test2", 2.5)) + + assert BenchmarkTracker.get_total_execution_time() == 4.0 + + +def test_benchmark_tracker_get_summary(clear_benchmark_results): + """Test getting a summary from the tracker.""" + BenchmarkTracker.add_result(BenchmarkResult( + "test1", 1.5, {"category": "api_calls"})) + BenchmarkTracker.add_result(BenchmarkResult( + "test2", 2.5, {"category": "data_processing"})) + + summary = BenchmarkTracker.get_summary() + + assert "Performance Benchmark Summary:" in summary + assert "test1: 1.5000s" in summary + assert "test2: 2.5000s" in summary + assert "category: api_calls" in summary + assert "category: data_processing" in summary + assert "Total execution time: 4.0000s" in summary + + +def test_benchmark_tracker_get_summary_empty(clear_benchmark_results): + """Test getting a summary when no benchmarks are recorded.""" + assert BenchmarkTracker.get_summary() == "No benchmarks recorded." + + +def test_benchmark_tracker_clear_results(clear_benchmark_results): + """Test clearing benchmark results.""" + BenchmarkTracker.add_result(BenchmarkResult("test1", 1.5)) + assert len(BenchmarkTracker.get_results()) == 1 + + BenchmarkTracker.clear_results() + assert len(BenchmarkTracker.get_results()) == 0 + + +def test_benchmark_result_str(): + """Test string representation of benchmark result.""" + result = BenchmarkResult("test_func", 1.2345) + assert str(result) == "test_func: 1.2345s" From 9d7d5bc54e5ee9203144ca287b908c94adc19ff7 Mon Sep 17 00:00:00 2001 From: Imamuzzaki Abu Salam Date: Sat, 22 Mar 2025 06:32:59 +0700 Subject: [PATCH 03/12] feat: Implement caching, benchmarking and file fixes for v4.2.0 --- .github/workflows/ci.yml | 16 +- .github/workflows/codeql.yml | 4 +- .github/workflows/codestyle.yml | 4 +- .gitignore | 4 + Dockerfile | 4 +- Pipfile | 2 +- Pipfile.lock | 340 ++++++++++++++-------------- README.md | 115 ++++++---- sources/debug.py | 39 ++++ sources/manager_github.py | 149 ++++++++++-- sources/yearly_commit_calculator.py | 236 +++++++++++++++++-- 11 files changed, 642 insertions(+), 271 deletions(-) create mode 100644 sources/debug.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5bdb0c9..eba44d5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,12 +1,12 @@ name: CI on: -# TODO: Make separate CI for direct push to master and PRs -# push: -# branches: -# - master -# paths-ignore: -# - "**/coverage.svg" + # TODO: Make separate CI for direct push to master and PRs + # push: + # branches: + # - master + # paths-ignore: + # - "**/coverage.svg" pull_request_target: branches: @@ -31,10 +31,10 @@ jobs: - name: Checkout πŸ›ŽοΈ uses: actions/checkout@v4 - - name: Setup Python 3.11 🐍 + - name: Setup Python 3.13 🐍 uses: actions/setup-python@v5 with: - python-version: 3.11 + python-version: 3.13 cache: "pipenv" - name: Install pipenv diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index d11e527..6f9edd3 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -31,10 +31,10 @@ jobs: - name: Checkout πŸ›ŽοΈ uses: actions/checkout@v4 - - name: Setup Python 3.11 🐍 + - name: Setup Python 3.13 🐍 uses: actions/setup-python@v5 with: - python-version: 3.11 + python-version: 3.13 cache: "pipenv" - name: Install pipenv diff --git a/.github/workflows/codestyle.yml b/.github/workflows/codestyle.yml index 3696809..d694533 100644 --- a/.github/workflows/codestyle.yml +++ b/.github/workflows/codestyle.yml @@ -12,10 +12,10 @@ jobs: - name: Checkout πŸ›ŽοΈ uses: actions/checkout@v4 - - name: Setup Python 3.11 🐍 + - name: Setup Python 3.13 🐍 uses: actions/setup-python@v5 with: - python-version: 3.11 + python-version: 3.13 cache: "pipenv" - name: Install pipenv diff --git a/.gitignore b/.gitignore index 7343aa2..e3d834e 100644 --- a/.gitignore +++ b/.gitignore @@ -175,3 +175,7 @@ cython_debug/ *.lcov assets/ + +# Cache files +.cache/ +*_repo_cache.json diff --git a/Dockerfile b/Dockerfile index b84d09f..975eb6a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.11.3-alpine +FROM python:3.13-alpine ENV PYTHONUNBUFFERED=1 ENV PYTHONDONTWRITEBYTECODE=1 @@ -25,4 +25,4 @@ COPY sources/ ./sources/ RUN git config --global user.name "readme-bot" && \ git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" -ENTRYPOINT ["python3", "sources/main.py"] +ENTRYPOINT ["python3", "sources/main.py"] diff --git a/Pipfile b/Pipfile index d9516a6..871b847 100644 --- a/Pipfile +++ b/Pipfile @@ -28,4 +28,4 @@ pytest-mock = "~=3.14" pre-commit = "*" [requires] -python_version = "3.11" +python_version = "3.13" diff --git a/Pipfile.lock b/Pipfile.lock index 90052fa..55a7877 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,11 +1,11 @@ { "_meta": { "hash": { - "sha256": "f9581dad7d6e50879c40294e981a829023f70673a47c8ba7db2c9baf374d6699" + "sha256": "8a04c3dbdfc0e2db90bdd160658387f396eb317fbd59c5a6133ce193617d7219" }, "pipfile-spec": 6, "requires": { - "python_version": "3.11" + "python_version": "3.13" }, "sources": [ { @@ -18,11 +18,11 @@ "default": { "anyio": { "hashes": [ - "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a", - "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a" + "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028", + "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c" ], "markers": "python_version >= '3.9'", - "version": "==4.8.0" + "version": "==4.9.0" }, "certifi": { "hashes": [ @@ -265,40 +265,44 @@ }, "cryptography": { "hashes": [ - "sha256:00918d859aa4e57db8299607086f793fa7813ae2ff5a4637e318a25ef82730f7", - "sha256:1e8d181e90a777b63f3f0caa836844a1182f1f265687fac2115fcf245f5fbec3", - "sha256:1f9a92144fa0c877117e9748c74501bea842f93d21ee00b0cf922846d9d0b183", - "sha256:21377472ca4ada2906bc313168c9dc7b1d7ca417b63c1c3011d0c74b7de9ae69", - "sha256:24979e9f2040c953a94bf3c6782e67795a4c260734e5264dceea65c8f4bae64a", - "sha256:2a46a89ad3e6176223b632056f321bc7de36b9f9b93b2cc1cccf935a3849dc62", - "sha256:322eb03ecc62784536bc173f1483e76747aafeb69c8728df48537eb431cd1911", - "sha256:436df4f203482f41aad60ed1813811ac4ab102765ecae7a2bbb1dbb66dcff5a7", - "sha256:4f422e8c6a28cf8b7f883eb790695d6d45b0c385a2583073f3cec434cc705e1a", - "sha256:53f23339864b617a3dfc2b0ac8d5c432625c80014c25caac9082314e9de56f41", - "sha256:5fed5cd6102bb4eb843e3315d2bf25fede494509bddadb81e03a859c1bc17b83", - "sha256:610a83540765a8d8ce0f351ce42e26e53e1f774a6efb71eb1b41eb01d01c3d12", - "sha256:6c8acf6f3d1f47acb2248ec3ea261171a671f3d9428e34ad0357148d492c7864", - "sha256:6f76fdd6fd048576a04c5210d53aa04ca34d2ed63336d4abd306d0cbe298fddf", - "sha256:72198e2b5925155497a5a3e8c216c7fb3e64c16ccee11f0e7da272fa93b35c4c", - "sha256:887143b9ff6bad2b7570da75a7fe8bbf5f65276365ac259a5d2d5147a73775f2", - "sha256:888fcc3fce0c888785a4876ca55f9f43787f4c5c1cc1e2e0da71ad481ff82c5b", - "sha256:8e6a85a93d0642bd774460a86513c5d9d80b5c002ca9693e63f6e540f1815ed0", - "sha256:94f99f2b943b354a5b6307d7e8d19f5c423a794462bde2bf310c770ba052b1c4", - "sha256:9b336599e2cb77b1008cb2ac264b290803ec5e8e89d618a5e978ff5eb6f715d9", - "sha256:a2d8a7045e1ab9b9f803f0d9531ead85f90c5f2859e653b61497228b18452008", - "sha256:b8272f257cf1cbd3f2e120f14c68bff2b6bdfcc157fafdee84a1b795efd72862", - "sha256:bf688f615c29bfe9dfc44312ca470989279f0e94bb9f631f85e3459af8efc009", - "sha256:d9c5b9f698a83c8bd71e0f4d3f9f839ef244798e5ffe96febfa9714717db7af7", - "sha256:dd7c7e2d71d908dc0f8d2027e1604102140d84b155e658c20e8ad1304317691f", - "sha256:df978682c1504fc93b3209de21aeabf2375cb1571d4e61907b3e7a2540e83026", - "sha256:e403f7f766ded778ecdb790da786b418a9f2394f36e8cc8b796cc056ab05f44f", - "sha256:eb3889330f2a4a148abead555399ec9a32b13b7c8ba969b72d8e500eb7ef84cd", - "sha256:f4daefc971c2d1f82f03097dc6f216744a6cd2ac0f04c68fb935ea2ba2a0d420", - "sha256:f51f5705ab27898afda1aaa430f34ad90dc117421057782022edf0600bec5f14", - "sha256:fd0ee90072861e276b0ff08bd627abec29e32a53b2be44e41dbcdf87cbee2b00" + "sha256:04abd71114848aa25edb28e225ab5f268096f44cf0127f3d36975bdf1bdf3390", + "sha256:0529b1d5a0105dd3731fa65680b45ce49da4d8115ea76e9da77a875396727b41", + "sha256:1bc312dfb7a6e5d66082c87c34c8a62176e684b6fe3d90fcfe1568de675e6688", + "sha256:268e4e9b177c76d569e8a145a6939eca9a5fec658c932348598818acf31ae9a5", + "sha256:29ecec49f3ba3f3849362854b7253a9f59799e3763b0c9d0826259a88efa02f1", + "sha256:2bf7bf75f7df9715f810d1b038870309342bff3069c5bd8c6b96128cb158668d", + "sha256:3b721b8b4d948b218c88cb8c45a01793483821e709afe5f622861fc6182b20a7", + "sha256:3c00b6b757b32ce0f62c574b78b939afab9eecaf597c4d624caca4f9e71e7843", + "sha256:3dc62975e31617badc19a906481deacdeb80b4bb454394b4098e3f2525a488c5", + "sha256:4973da6ca3db4405c54cd0b26d328be54c7747e89e284fcff166132eb7bccc9c", + "sha256:4e389622b6927d8133f314949a9812972711a111d577a5d1f4bee5e58736b80a", + "sha256:51e4de3af4ec3899d6d178a8c005226491c27c4ba84101bfb59c901e10ca9f79", + "sha256:5f6f90b72d8ccadb9c6e311c775c8305381db88374c65fa1a68250aa8a9cb3a6", + "sha256:6210c05941994290f3f7f175a4a57dbbb2afd9273657614c506d5976db061181", + "sha256:6f101b1f780f7fc613d040ca4bdf835c6ef3b00e9bd7125a4255ec574c7916e4", + "sha256:7bdcd82189759aba3816d1f729ce42ffded1ac304c151d0a8e89b9996ab863d5", + "sha256:7ca25849404be2f8e4b3c59483d9d3c51298a22c1c61a0e84415104dacaf5562", + "sha256:81276f0ea79a208d961c433a947029e1a15948966658cf6710bbabb60fcc2639", + "sha256:8cadc6e3b5a1f144a039ea08a0bdb03a2a92e19c46be3285123d32029f40a922", + "sha256:8e0ddd63e6bf1161800592c71ac794d3fb8001f2caebe0966e77c5234fa9efc3", + "sha256:909c97ab43a9c0c0b0ada7a1281430e4e5ec0458e6d9244c0e821bbf152f061d", + "sha256:96e7a5e9d6e71f9f4fca8eebfd603f8e86c5225bb18eb621b2c1e50b290a9471", + "sha256:9a1e657c0f4ea2a23304ee3f964db058c9e9e635cc7019c4aa21c330755ef6fd", + "sha256:9eb9d22b0a5d8fd9925a7764a054dca914000607dff201a24c791ff5c799e1fa", + "sha256:af4ff3e388f2fa7bff9f7f2b31b87d5651c45731d3e8cfa0944be43dff5cfbdb", + "sha256:b042d2a275c8cee83a4b7ae30c45a15e6a4baa65a179a0ec2d78ebb90e4f6699", + "sha256:bc821e161ae88bfe8088d11bb39caf2916562e0a2dc7b6d56714a48b784ef0bb", + "sha256:c505d61b6176aaf982c5717ce04e87da5abc9a36a5b39ac03905c4aafe8de7aa", + "sha256:c63454aa261a0cf0c5b4718349629793e9e634993538db841165b3df74f37ec0", + "sha256:c7362add18b416b69d58c910caa217f980c5ef39b23a38a0880dfd87bdf8cd23", + "sha256:d03806036b4f89e3b13b6218fefea8d5312e450935b1a2d55f0524e2ed7c59d9", + "sha256:d1b3031093a366ac767b3feb8bcddb596671b3aaff82d4050f984da0c248b615", + "sha256:d1c3572526997b36f245a96a2b1713bf79ce99b271bbcf084beb6b9b075f29ea", + "sha256:efcfe97d1b3c79e486554efddeb8f6f53a4cdd4cf6086642784fa31fc384e1d7", + "sha256:f514ef4cd14bb6fb484b4a60203e912cfcb64f2ab139e88c2274511514bf7308" ], "markers": "python_version >= '3.7' and python_full_version not in '3.9.0, 3.9.1'", - "version": "==44.0.1" + "version": "==44.0.2" }, "cycler": { "hashes": [ @@ -416,12 +420,12 @@ }, "humanize": { "hashes": [ - "sha256:106a7436a2d545d742c147c469716b3a08424aa143a82103630147c489a89f48", - "sha256:87ff7b43591370b12a1d103c9405849d911d4b039ed22d80b718b62c76eec8a3" + "sha256:1338ba97415c96556758a6e2f65977ed406dddf4620d4c6db9bbdfd07f0f1232", + "sha256:86014ca5c52675dffa1d404491952f1f5bf03b07c175a51891a343daebf01fea" ], "index": "pypi", "markers": "python_version >= '3.9'", - "version": "==4.12.0" + "version": "==4.12.1" }, "idna": { "hashes": [ @@ -519,44 +523,44 @@ }, "matplotlib": { "hashes": [ - "sha256:01d2b19f13aeec2e759414d3bfe19ddfb16b13a1250add08d46d5ff6f9be83c6", - "sha256:12eaf48463b472c3c0f8dbacdbf906e573013df81a0ab82f0616ea4b11281908", - "sha256:2c5829a5a1dd5a71f0e31e6e8bb449bc0ee9dbfb05ad28fc0c6b55101b3a4be6", - "sha256:2fbbabc82fde51391c4da5006f965e36d86d95f6ee83fb594b279564a4c5d0d2", - "sha256:3547d153d70233a8496859097ef0312212e2689cdf8d7ed764441c77604095ae", - "sha256:359f87baedb1f836ce307f0e850d12bb5f1936f70d035561f90d41d305fdacea", - "sha256:3b427392354d10975c1d0f4ee18aa5844640b512d5311ef32efd4dd7db106ede", - "sha256:4659665bc7c9b58f8c00317c3c2a299f7f258eeae5a5d56b4c64226fca2f7c59", - "sha256:4673ff67a36152c48ddeaf1135e74ce0d4bce1bbf836ae40ed39c29edf7e2765", - "sha256:503feb23bd8c8acc75541548a1d709c059b7184cde26314896e10a9f14df5f12", - "sha256:5439f4c5a3e2e8eab18e2f8c3ef929772fd5641876db71f08127eed95ab64683", - "sha256:5cdbaf909887373c3e094b0318d7ff230b2ad9dcb64da7ade654182872ab2593", - "sha256:5e6c6461e1fc63df30bf6f80f0b93f5b6784299f721bc28530477acd51bfc3d1", - "sha256:5fd41b0ec7ee45cd960a8e71aea7c946a28a0b8a4dcee47d2856b2af051f334c", - "sha256:607b16c8a73943df110f99ee2e940b8a1cbf9714b65307c040d422558397dac5", - "sha256:7e8632baebb058555ac0cde75db885c61f1212e47723d63921879806b40bec6a", - "sha256:81713dd0d103b379de4516b861d964b1d789a144103277769238c732229d7f03", - "sha256:845d96568ec873be63f25fa80e9e7fae4be854a66a7e2f0c8ccc99e94a8bd4ef", - "sha256:95b710fea129c76d30be72c3b38f330269363fbc6e570a5dd43580487380b5ff", - "sha256:96f2886f5c1e466f21cc41b70c5a0cd47bfa0015eb2d5793c88ebce658600e25", - "sha256:994c07b9d9fe8d25951e3202a68c17900679274dadfc1248738dcfa1bd40d7f3", - "sha256:9ade1003376731a971e398cc4ef38bb83ee8caf0aee46ac6daa4b0506db1fd06", - "sha256:9b0558bae37f154fffda54d779a592bc97ca8b4701f1c710055b609a3bac44c8", - "sha256:a2a43cbefe22d653ab34bb55d42384ed30f611bcbdea1f8d7f431011a2e1c62e", - "sha256:a994f29e968ca002b50982b27168addfd65f0105610b6be7fa515ca4b5307c95", - "sha256:ad2e15300530c1a94c63cfa546e3b7864bd18ea2901317bae8bbf06a5ade6dcf", - "sha256:ae80dc3a4add4665cf2faa90138384a7ffe2a4e37c58d83e115b54287c4f06ef", - "sha256:b886d02a581b96704c9d1ffe55709e49b4d2d52709ccebc4be42db856e511278", - "sha256:c40ba2eb08b3f5de88152c2333c58cee7edcead0a2a0d60fcafa116b17117adc", - "sha256:c55b20591ced744aa04e8c3e4b7543ea4d650b6c3c4b208c08a05b4010e8b442", - "sha256:c58a9622d5dbeb668f407f35f4e6bfac34bb9ecdcc81680c04d0258169747997", - "sha256:d44cb942af1693cced2604c33a9abcef6205601c445f6d0dc531d813af8a2f5a", - "sha256:d907fddb39f923d011875452ff1eca29a9e7f21722b873e90db32e5d8ddff12e", - "sha256:fd44fc75522f58612ec4a33958a7e5552562b7705b42ef1b4f8c0818e304a363" + "sha256:01e63101ebb3014e6e9f80d9cf9ee361a8599ddca2c3e166c563628b39305dbb", + "sha256:02582304e352f40520727984a5a18f37e8187861f954fea9be7ef06569cf85b4", + "sha256:057206ff2d6ab82ff3e94ebd94463d084760ca682ed5f150817b859372ec4401", + "sha256:0721a3fd3d5756ed593220a8b86808a36c5031fce489adb5b31ee6dbb47dd5b2", + "sha256:0f69dc9713e4ad2fb21a1c30e37bd445d496524257dfda40ff4a8efb3604ab5c", + "sha256:11b65088c6f3dae784bc72e8d039a2580186285f87448babb9ddb2ad0082993a", + "sha256:1985ad3d97f51307a2cbfc801a930f120def19ba22864182dacef55277102ba6", + "sha256:19b06241ad89c3ae9469e07d77efa87041eac65d78df4fcf9cac318028009b01", + "sha256:2589659ea30726284c6c91037216f64a506a9822f8e50592d48ac16a2f29e044", + "sha256:35e87384ee9e488d8dd5a2dd7baf471178d38b90618d8ea147aced4ab59c9bea", + "sha256:3f06bad951eea6422ac4e8bdebcf3a70c59ea0a03338c5d2b109f57b64eb3972", + "sha256:4c59af3e8aca75d7744b68e8e78a669e91ccbcf1ac35d0102a7b1b46883f1dd7", + "sha256:4f0647b17b667ae745c13721602b540f7aadb2a32c5b96e924cd4fea5dcb90f1", + "sha256:56c5d9fcd9879aa8040f196a235e2dcbdf7dd03ab5b07c0696f80bc6cf04bedd", + "sha256:5d45d3f5245be5b469843450617dcad9af75ca50568acf59997bed9311131a0b", + "sha256:648406f1899f9a818cef8c0231b44dcfc4ff36f167101c3fd1c9151f24220fdc", + "sha256:66e907a06e68cb6cfd652c193311d61a12b54f56809cafbed9736ce5ad92f107", + "sha256:7e496c01441be4c7d5f96d4e40f7fca06e20dcb40e44c8daa2e740e1757ad9e6", + "sha256:8e875b95ac59a7908978fe307ecdbdd9a26af7fa0f33f474a27fcf8c99f64a19", + "sha256:8e8e25b1209161d20dfe93037c8a7f7ca796ec9aa326e6e4588d8c4a5dd1e473", + "sha256:a144867dd6bf8ba8cb5fc81a158b645037e11b3e5cf8a50bd5f9917cb863adfe", + "sha256:a3dfb036f34873b46978f55e240cff7a239f6c4409eac62d8145bad3fc6ba5a3", + "sha256:a97ff127f295817bc34517255c9db6e71de8eddaab7f837b7d341dee9f2f587f", + "sha256:aa3854b5f9473564ef40a41bc922be978fab217776e9ae1545c9b3a5cf2092a3", + "sha256:bc411ebd5889a78dabbc457b3fa153203e22248bfa6eedc6797be5df0164dbf9", + "sha256:c42eee41e1b60fd83ee3292ed83a97a5f2a8239b10c26715d8a6172226988d7b", + "sha256:c96f2c2f825d1257e437a1482c5a2cf4fee15db4261bd6fc0750f81ba2b4ba3d", + "sha256:cfd414bce89cc78a7e1d25202e979b3f1af799e416010a20ab2b5ebb3a02425c", + "sha256:d0673b4b8f131890eb3a1ad058d6e065fb3c6e71f160089b65f8515373394698", + "sha256:d3809916157ba871bcdd33d3493acd7fe3037db5daa917ca6e77975a94cef779", + "sha256:dc6ab14a7ab3b4d813b88ba957fc05c79493a037f54e246162033591e770de6f", + "sha256:e8d2d0e3881b129268585bf4765ad3ee73a4591d77b9a18c214ac7e3a79fb2ba", + "sha256:e9b4bb156abb8fa5e5b2b460196f7db7264fc6d62678c03457979e7d5254b7be", + "sha256:ff2ae14910be903f4a24afdbb6d7d3a6c44da210fc7d42790b87aeac92238a16" ], "index": "pypi", "markers": "python_version >= '3.10'", - "version": "==3.10.0" + "version": "==3.10.1" }, "numpy": { "hashes": [ @@ -743,7 +747,7 @@ "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", "version": "==2.9.0.post0" }, "pytz": { @@ -827,7 +831,7 @@ "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", "version": "==1.17.0" }, "smmap": { @@ -846,14 +850,6 @@ "markers": "python_version >= '3.7'", "version": "==1.3.1" }, - "typing-extensions": { - "hashes": [ - "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", - "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8" - ], - "markers": "python_version < '3.13'", - "version": "==4.12.2" - }, "urllib3": { "hashes": [ "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", @@ -999,72 +995,72 @@ "toml" ], "hashes": [ - "sha256:00b2086892cf06c7c2d74983c9595dc511acca00665480b3ddff749ec4fb2a95", - "sha256:0533adc29adf6a69c1baa88c3d7dbcaadcffa21afbed3ca7a225a440e4744bf9", - "sha256:06097c7abfa611c91edb9e6920264e5be1d6ceb374efb4986f38b09eed4cb2fe", - "sha256:07e92ae5a289a4bc4c0aae710c0948d3c7892e20fd3588224ebe242039573bf0", - "sha256:0a9d8be07fb0832636a0f72b80d2a652fe665e80e720301fb22b191c3434d924", - "sha256:0e549f54ac5f301e8e04c569dfdb907f7be71b06b88b5063ce9d6953d2d58574", - "sha256:0ef01d70198431719af0b1f5dcbefc557d44a190e749004042927b2a3fed0702", - "sha256:0f16f44025c06792e0fb09571ae454bcc7a3ec75eeb3c36b025eccf501b1a4c3", - "sha256:14d47376a4f445e9743f6c83291e60adb1b127607a3618e3185bbc8091f0467b", - "sha256:1a936309a65cc5ca80fa9f20a442ff9e2d06927ec9a4f54bcba9c14c066323f2", - "sha256:1ceeb90c3eda1f2d8c4c578c14167dbd8c674ecd7d38e45647543f19839dd6ea", - "sha256:1f7ffa05da41754e20512202c866d0ebfc440bba3b0ed15133070e20bf5aeb5f", - "sha256:200e10beb6ddd7c3ded322a4186313d5ca9e63e33d8fab4faa67ef46d3460af3", - "sha256:220fa6c0ad7d9caef57f2c8771918324563ef0d8272c94974717c3909664e674", - "sha256:2251fabcfee0a55a8578a9d29cecfee5f2de02f11530e7d5c5a05859aa85aee9", - "sha256:2458f275944db8129f95d91aee32c828a408481ecde3b30af31d552c2ce284a0", - "sha256:299cf973a7abff87a30609879c10df0b3bfc33d021e1adabc29138a48888841e", - "sha256:2b996819ced9f7dbb812c701485d58f261bef08f9b85304d41219b1496b591ef", - "sha256:3688b99604a24492bcfe1c106278c45586eb819bf66a654d8a9a1433022fb2eb", - "sha256:3a1e465f398c713f1b212400b4e79a09829cd42aebd360362cd89c5bdc44eb87", - "sha256:488c27b3db0ebee97a830e6b5a3ea930c4a6e2c07f27a5e67e1b3532e76b9ef1", - "sha256:48cfc4641d95d34766ad41d9573cc0f22a48aa88d22657a1fe01dca0dbae4de2", - "sha256:4b467a8c56974bf06e543e69ad803c6865249d7a5ccf6980457ed2bc50312703", - "sha256:53c56358d470fa507a2b6e67a68fd002364d23c83741dbc4c2e0680d80ca227e", - "sha256:5d1095bbee1851269f79fd8e0c9b5544e4c00c0c24965e66d8cba2eb5bb535fd", - "sha256:641dfe0ab73deb7069fb972d4d9725bf11c239c309ce694dd50b1473c0f641c3", - "sha256:64cbb1a3027c79ca6310bf101014614f6e6e18c226474606cf725238cf5bc2d4", - "sha256:66fe626fd7aa5982cdebad23e49e78ef7dbb3e3c2a5960a2b53632f1f703ea45", - "sha256:676f92141e3c5492d2a1596d52287d0d963df21bf5e55c8b03075a60e1ddf8aa", - "sha256:69e62c5034291c845fc4df7f8155e8544178b6c774f97a99e2734b05eb5bed31", - "sha256:704c8c8c6ce6569286ae9622e534b4f5b9759b6f2cd643f1c1a61f666d534fe8", - "sha256:78f5243bb6b1060aed6213d5107744c19f9571ec76d54c99cc15938eb69e0e86", - "sha256:79cac3390bfa9836bb795be377395f28410811c9066bc4eefd8015258a7578c6", - "sha256:7ae6eabf519bc7871ce117fb18bf14e0e343eeb96c377667e3e5dd12095e0288", - "sha256:7e39e845c4d764208e7b8f6a21c541ade741e2c41afabdfa1caa28687a3c98cf", - "sha256:8161d9fbc7e9fe2326de89cd0abb9f3599bccc1287db0aba285cb68d204ce929", - "sha256:8bec2ac5da793c2685ce5319ca9bcf4eee683b8a1679051f8e6ec04c4f2fd7dc", - "sha256:959244a17184515f8c52dcb65fb662808767c0bd233c1d8a166e7cf74c9ea985", - "sha256:9b148068e881faa26d878ff63e79650e208e95cf1c22bd3f77c3ca7b1d9821a3", - "sha256:aa6f302a3a0b5f240ee201297fff0bbfe2fa0d415a94aeb257d8b461032389bd", - "sha256:ace9048de91293e467b44bce0f0381345078389814ff6e18dbac8fdbf896360e", - "sha256:ad7525bf0241e5502168ae9c643a2f6c219fa0a283001cee4cf23a9b7da75879", - "sha256:b01a840ecc25dce235ae4c1b6a0daefb2a203dba0e6e980637ee9c2f6ee0df57", - "sha256:b076e625396e787448d27a411aefff867db2bffac8ed04e8f7056b07024eed5a", - "sha256:b172f8e030e8ef247b3104902cc671e20df80163b60a203653150d2fc204d1ad", - "sha256:b1f097878d74fe51e1ddd1be62d8e3682748875b461232cf4b52ddc6e6db0bba", - "sha256:b95574d06aa9d2bd6e5cc35a5bbe35696342c96760b69dc4287dbd5abd4ad51d", - "sha256:bda1c5f347550c359f841d6614fb8ca42ae5cb0b74d39f8a1e204815ebe25750", - "sha256:cec6b9ce3bd2b7853d4a4563801292bfee40b030c05a3d29555fd2a8ee9bd68c", - "sha256:d1a987778b9c71da2fc8948e6f2656da6ef68f59298b7e9786849634c35d2c3c", - "sha256:d74c08e9aaef995f8c4ef6d202dbd219c318450fe2a76da624f2ebb9c8ec5d9f", - "sha256:e18aafdfb3e9ec0d261c942d35bd7c28d031c5855dadb491d2723ba54f4c3015", - "sha256:e216c5c45f89ef8971373fd1c5d8d1164b81f7f5f06bbf23c37e7908d19e8558", - "sha256:e695df2c58ce526eeab11a2e915448d3eb76f75dffe338ea613c1201b33bab2f", - "sha256:e7575ab65ca8399c8c4f9a7d61bbd2d204c8b8e447aab9d355682205c9dd948d", - "sha256:e995b3b76ccedc27fe4f477b349b7d64597e53a43fc2961db9d3fbace085d69d", - "sha256:ea31689f05043d520113e0552f039603c4dd71fa4c287b64cb3606140c66f425", - "sha256:eb5507795caabd9b2ae3f1adc95f67b1104971c22c624bb354232d65c4fc90b3", - "sha256:eb8668cfbc279a536c633137deeb9435d2962caec279c3f8cf8b91fff6ff8953", - "sha256:ecea0c38c9079570163d663c0433a9af4094a60aafdca491c6a3d248c7432827", - "sha256:f25d8b92a4e31ff1bd873654ec367ae811b3a943583e05432ea29264782dc32c", - "sha256:f60a297c3987c6c02ffb29effc70eadcbb412fe76947d394a1091a3615948e2f", - "sha256:f973643ef532d4f9be71dd88cf7588936685fdb576d93a79fe9f65bc337d9d73" + "sha256:02fad4f8faa4153db76f9246bc95c1d99f054f4e0a884175bff9155cf4f856cb", + "sha256:092b134129a8bb940c08b2d9ceb4459af5fb3faea77888af63182e17d89e1cf1", + "sha256:0ce92c5a9d7007d838456f4b77ea159cb628187a137e1895331e530973dcf862", + "sha256:0dab4ef76d7b14f432057fdb7a0477e8bffca0ad39ace308be6e74864e632271", + "sha256:1165490be0069e34e4f99d08e9c5209c463de11b471709dfae31e2a98cbd49fd", + "sha256:11dd6f52c2a7ce8bf0a5f3b6e4a8eb60e157ffedc3c4b4314a41c1dfbd26ce58", + "sha256:15d54ecef1582b1d3ec6049b20d3c1a07d5e7f85335d8a3b617c9960b4f807e0", + "sha256:171e9977c6a5d2b2be9efc7df1126fd525ce7cad0eb9904fe692da007ba90d81", + "sha256:177d837339883c541f8524683e227adcaea581eca6bb33823a2a1fdae4c988e1", + "sha256:18f544356bceef17cc55fcf859e5664f06946c1b68efcea6acdc50f8f6a6e776", + "sha256:199a1272e642266b90c9f40dec7fd3d307b51bf639fa0d15980dc0b3246c1393", + "sha256:1e6f867379fd033a0eeabb1be0cffa2bd660582b8b0c9478895c509d875a9d9e", + "sha256:2444fbe1ba1889e0b29eb4d11931afa88f92dc507b7248f45be372775b3cef4f", + "sha256:25fe40967717bad0ce628a0223f08a10d54c9d739e88c9cbb0f77b5959367542", + "sha256:264ff2bcce27a7f455b64ac0dfe097680b65d9a1a293ef902675fa8158d20b24", + "sha256:2a79c4a09765d18311c35975ad2eb1ac613c0401afdd9cb1ca4110aeb5dd3c4c", + "sha256:2c492401bdb3a85824669d6a03f57b3dfadef0941b8541f035f83bbfc39d4282", + "sha256:315ff74b585110ac3b7ab631e89e769d294f303c6d21302a816b3554ed4c81af", + "sha256:34a3bf6b92e6621fc4dcdaab353e173ccb0ca9e4bfbcf7e49a0134c86c9cd303", + "sha256:37351dc8123c154fa05b7579fdb126b9f8b1cf42fd6f79ddf19121b7bdd4aa04", + "sha256:385618003e3d608001676bb35dc67ae3ad44c75c0395d8de5780af7bb35be6b2", + "sha256:392cc8fd2b1b010ca36840735e2a526fcbd76795a5d44006065e79868cc76ccf", + "sha256:3d03287eb03186256999539d98818c425c33546ab4901028c8fa933b62c35c3a", + "sha256:44683f2556a56c9a6e673b583763096b8efbd2df022b02995609cf8e64fc8ae0", + "sha256:44af11c00fd3b19b8809487630f8a0039130d32363239dfd15238e6d37e41a48", + "sha256:452735fafe8ff5918236d5fe1feac322b359e57692269c75151f9b4ee4b7e1bc", + "sha256:4c181ceba2e6808ede1e964f7bdc77bd8c7eb62f202c63a48cc541e5ffffccb6", + "sha256:4dd532dac197d68c478480edde74fd4476c6823355987fd31d01ad9aa1e5fb59", + "sha256:520af84febb6bb54453e7fbb730afa58c7178fd018c398a8fcd8e269a79bf96d", + "sha256:553ba93f8e3c70e1b0031e4dfea36aba4e2b51fe5770db35e99af8dc5c5a9dfe", + "sha256:5b7b02e50d54be6114cc4f6a3222fec83164f7c42772ba03b520138859b5fde1", + "sha256:63306486fcb5a827449464f6211d2991f01dfa2965976018c9bab9d5e45a35c8", + "sha256:75c82b27c56478d5e1391f2e7b2e7f588d093157fa40d53fd9453a471b1191f2", + "sha256:7ba5ff236c87a7b7aa1441a216caf44baee14cbfbd2256d306f926d16b026578", + "sha256:7e688010581dbac9cab72800e9076e16f7cccd0d89af5785b70daa11174e94de", + "sha256:80b5b207a8b08c6a934b214e364cab2fa82663d4af18981a6c0a9e95f8df7602", + "sha256:822fa99dd1ac686061e1219b67868e25d9757989cf2259f735a4802497d6da31", + "sha256:881cae0f9cbd928c9c001487bb3dcbfd0b0af3ef53ae92180878591053be0cb3", + "sha256:88d96127ae01ff571d465d4b0be25c123789cef88ba0879194d673fdea52f54e", + "sha256:8b1c65a739447c5ddce5b96c0a388fd82e4bbdff7251396a70182b1d83631019", + "sha256:8fed429c26b99641dc1f3a79179860122b22745dd9af36f29b141e178925070a", + "sha256:9bb47cc9f07a59a451361a850cb06d20633e77a9118d05fd0f77b1864439461b", + "sha256:a6b6b3bd121ee2ec4bd35039319f3423d0be282b9752a5ae9f18724bc93ebe7c", + "sha256:ae13ed5bf5542d7d4a0a42ff5160e07e84adc44eda65ddaa635c484ff8e55917", + "sha256:af94fb80e4f159f4d93fb411800448ad87b6039b0500849a403b73a0d36bb5ae", + "sha256:b4c144c129343416a49378e05c9451c34aae5ccf00221e4fa4f487db0816ee2f", + "sha256:b52edb940d087e2a96e73c1523284a2e94a4e66fa2ea1e2e64dddc67173bad94", + "sha256:b559adc22486937786731dac69e57296cb9aede7e2687dfc0d2696dbd3b1eb6b", + "sha256:b838a91e84e1773c3436f6cc6996e000ed3ca5721799e7789be18830fad009a2", + "sha256:ba8480ebe401c2f094d10a8c4209b800a9b77215b6c796d16b6ecdf665048950", + "sha256:bc96441c9d9ca12a790b5ae17d2fa6654da4b3962ea15e0eabb1b1caed094777", + "sha256:c90e9141e9221dd6fbc16a2727a5703c19443a8d9bf7d634c792fa0287cee1ab", + "sha256:d2e73e2ac468536197e6b3ab79bc4a5c9da0f078cd78cfcc7fe27cf5d1195ef0", + "sha256:d3154b369141c3169b8133973ac00f63fcf8d6dbcc297d788d36afbb7811e511", + "sha256:d66ff48ab3bb6f762a153e29c0fc1eb5a62a260217bc64470d7ba602f5886d20", + "sha256:d6874929d624d3a670f676efafbbc747f519a6121b581dd41d012109e70a5ebd", + "sha256:e33426a5e1dc7743dd54dfd11d3a6c02c5d127abfaa2edd80a6e352b58347d1a", + "sha256:e52eb31ae3afacdacfe50705a15b75ded67935770c460d88c215a9c0c40d0e9c", + "sha256:eae79f8e3501133aa0e220bbc29573910d096795882a70e6f6e6637b09522133", + "sha256:eebd927b86761a7068a06d3699fd6c20129becf15bb44282db085921ea0f1585", + "sha256:eff187177d8016ff6addf789dcc421c3db0d014e4946c1cc3fbf697f7852459d", + "sha256:f5f99a93cecf799738e211f9746dc83749b5693538fbfac279a61682ba309387", + "sha256:fbba59022e7c20124d2f520842b75904c7b9f16c854233fa46575c69949fb5b9" ], "markers": "python_version >= '3.9'", - "version": "==7.6.12" + "version": "==7.7.1" }, "distlib": { "hashes": [ @@ -1075,11 +1071,11 @@ }, "filelock": { "hashes": [ - "sha256:533dc2f7ba78dc2f0f531fc6c4940addf7b70a481e269a5a3b93be94ffbe8338", - "sha256:ee4e77401ef576ebb38cd7f13b9b28893194acc20a8e68e18730ba9c0e54660e" + "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2", + "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de" ], "markers": "python_version >= '3.9'", - "version": "==3.17.0" + "version": "==3.18.0" }, "flake8": { "hashes": [ @@ -1092,19 +1088,19 @@ }, "identify": { "hashes": [ - "sha256:155931cb617a401807b09ecec6635d6c692d180090a1cedca8ef7d58ba5b6aa0", - "sha256:3fa266b42eba321ee0b2bb0936a6a6b9e36a1351cbb69055b3082f4193035684" + "sha256:c98b4322da415a8e5a70ff6e51fbc2d2932c015532d77e9f8537b4ba7813b150", + "sha256:d40dfe3142a1421d8518e3d3985ef5ac42890683e32306ad614a29490abeb6bf" ], "markers": "python_version >= '3.9'", - "version": "==2.6.7" + "version": "==2.6.9" }, "iniconfig": { "hashes": [ - "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", - "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374" + "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", + "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760" ], - "markers": "python_version >= '3.7'", - "version": "==2.0.0" + "markers": "python_version >= '3.8'", + "version": "==2.1.0" }, "mccabe": { "hashes": [ @@ -1148,11 +1144,11 @@ }, "platformdirs": { "hashes": [ - "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", - "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb" + "sha256:a03875334331946f13c549dbd8f4bac7a13a50a895a0eb1e8c6a8ace80d40a94", + "sha256:eb437d586b6a0986388f0d6f74aa0cde27b48d0e3d66843640bfb6bdcdb6e351" ], - "markers": "python_version >= '3.8'", - "version": "==4.3.6" + "markers": "python_version >= '3.9'", + "version": "==4.3.7" }, "pluggy": { "hashes": [ @@ -1164,12 +1160,12 @@ }, "pre-commit": { "hashes": [ - "sha256:ae3f018575a588e30dfddfab9a05448bfbd6b73d78709617b5a2b853549716d4", - "sha256:d29e7cb346295bcc1cc75fc3e92e343495e3ea0196c9ec6ba53f49f10ab6ae7b" + "sha256:601283b9757afd87d40c4c4a9b2b5de9637a8ea02eaff7adc2d0fb4e04841146", + "sha256:a009ca7205f1eb497d10b845e52c838a98b6cdd2102a6c8e4540e94ee75c58bd" ], "index": "pypi", "markers": "python_version >= '3.9'", - "version": "==4.1.0" + "version": "==4.2.0" }, "pycodestyle": { "hashes": [ @@ -1189,12 +1185,12 @@ }, "pytest": { "hashes": [ - "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", - "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761" + "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", + "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==8.3.4" + "version": "==8.3.5" }, "pytest-asyncio": { "hashes": [ @@ -1285,11 +1281,11 @@ }, "virtualenv": { "hashes": [ - "sha256:fdaabebf6d03b5ba83ae0a02cfe96f48a716f4fae556461d180825866f75b728", - "sha256:febddfc3d1ea571bdb1dc0f98d7b45d24def7428214d4fb73cc486c9568cce6a" + "sha256:3e3d00f5807e83b234dfb6122bf37cfadf4be216c53a49ac059d02414f819170", + "sha256:95e39403fcf3940ac45bc717597dba16110b74506131845d9b687d5e73d947ac" ], "markers": "python_version >= '3.8'", - "version": "==20.29.2" + "version": "==20.29.3" } } } diff --git a/README.md b/README.md index 56a7613..788e95d 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -> Looking For Translation to different languages & Locale [#23](https://github.com/ImBIOS/waka-readme-stats/issues/23) +> Looking For Translation to different languages & Locale [#23](https://github.com/ImBIOS/waka-readme-stats/issues/23) # Dev Metrics in Readme with added feature flags 🎌 @@ -6,12 +6,12 @@

- ![Project Preview](https://user-images.githubusercontent.com/15426564/88030180-8e1c4780-cb58-11ea-8a8b-b3576dd73652.png) +![Project Preview](https://user-images.githubusercontent.com/15426564/88030180-8e1c4780-cb58-11ea-8a8b-b3576dd73652.png)

πŸ“Œβœ¨Awesome Readme Stats

----- +---

@@ -43,14 +43,14 @@ 1. You need to update the markdown file(.md) with 2 comments. You can refer [here](#update-your-readme) for updating it. 2. You'll need a WakaTime API Key. You can get that from your WakaTime Account Settings - - You can refer [here](#new-to-wakatime), if you're new to WakaTime + - You can refer [here](#new-to-wakatime), if you're new to WakaTime 3. You'll need a GitHub API Token with `repo` and `user` scope from [here](https://github.com/settings/tokens) if you're running the action to get commit metrics > enabling the `repo` scope seems **DANGEROUS**
> but this GitHub Action only accesses your commit timestamp and lines of code added or deleted in repository you contributed. - You can use [this](#profile-repository) example to work it out 4. You need to save the WakaTime API Key and the GitHub API Token in the repository secrets. You can find that in the Settings of your repository. Be sure to save those as the following. - - WakaTime API Key as `WAKATIME_API_KEY=` - - GitHub Personal Access Token as `GH_TOKEN=` + - WakaTime API Key as `WAKATIME_API_KEY=` + - GitHub Personal Access Token as `GH_TOKEN=` 5. You can enable and disable feature flags based on requirements. This Action will run everyday at 00.00 IST @@ -89,7 +89,7 @@ name: Waka Readme on: schedule: # Runs at 12am IST - - cron: '30 18 * * *' + - cron: "30 18 * * *" workflow_dispatch: jobs: update-readme: @@ -104,54 +104,83 @@ jobs: - Now you can commit and wait for run automatically, but you can also trigger to run it to see the result now. Just go to the `Actions` in your repo and select your `Profile Readme Development Stats` workflow and click in `Run workflow`. Now wait for a minute or two and you will see your changes. +## Performance Optimizations + +### Caching + +waka-readme-stats includes a caching mechanism that dramatically improves performance for users with many repositories. The caching system: + +- Stores repository data to avoid redundant API calls +- Only updates data for repositories that have changed +- Significantly reduces processing time and API rate limit usage + +The cache is automatically maintained and no additional configuration is required. + +### Benchmarking + +For developers who want to measure performance, a benchmarking utility is included: + +```bash +# Basic benchmarking with your GitHub username +python benchmark.py --username + +# Run without caching to compare performance +python benchmark.py --username --no-cache + +# Run full benchmark suite (includes all API calls) +python benchmark.py --username --full +``` + +The benchmarking tool provides detailed metrics on execution time, helping to identify performance bottlenecks. + ## Extras 1. If you want to add the other info to your stats, you can add multiple `FLAGS` in your workflow file by default all flags are enabled ->except the lines of code flag due to heavy operation performed +> except the lines of code flag due to heavy operation performed ```yml - uses: ImBIOS/waka-readme-stats@master with: - WAKATIME_API_KEY: ${{ secrets.WAKATIME_API_KEY }} - GH_TOKEN: ${{ secrets.GH_TOKEN }} - SHOW_OS: "False" - SHOW_PROJECTS: "False" + WAKATIME_API_KEY: ${{ secrets.WAKATIME_API_KEY }} + GH_TOKEN: ${{ secrets.GH_TOKEN }} + SHOW_OS: "False" + SHOW_PROJECTS: "False" ``` ### Flags Available --- -`LOCALE` This Flag can be used to show stats in your language default is english uses Locale [Short Hand](https://saimana.com/list-of-country-locale-code/) to be passed in the flag variable example of the final result can be found [here](https://github.com/ImBIOS/ImBIOS/blob/master/Readme-fr.md) +`LOCALE` This Flag can be used to show stats in your language default is english uses Locale [Short Hand](https://saimana.com/list-of-country-locale-code/) to be passed in the flag variable example of the final result can be found [here](https://github.com/ImBIOS/ImBIOS/blob/master/Readme-fr.md) -`SECTION_NAME` flag can be set to any string, and will be the name of the section to replace in the readme +`SECTION_NAME` flag can be set to any string, and will be the name of the section to replace in the readme -`COMMIT_BY_ME` flag can be set to `True` to commit the code using your name and email +`COMMIT_BY_ME` flag can be set to `True` to commit the code using your name and email -`COMMIT_MESSAGE` flag can be set to message commit, default is "Updated with Dev Metrics" +`COMMIT_MESSAGE` flag can be set to message commit, default is "Updated with Dev Metrics" -`COMMIT_USERNAME` flag can be set to username to commit the code, default is "readme-bot" +`COMMIT_USERNAME` flag can be set to username to commit the code, default is "readme-bot" -`COMMIT_EMAIL` flag can be set to email to commit the code, default is "41898282+github-actions[bot]@users.noreply.github.com" +`COMMIT_EMAIL` flag can be set to email to commit the code, default is "41898282+github-actions[bot]@users.noreply.github.com" -`SHOW_UPDATED_DATE` flag can be set to `True` to show updated date in end of paragraph +`SHOW_UPDATED_DATE` flag can be set to `True` to show updated date in end of paragraph -`UPDATED_DATE_FORMAT` flag can be set to put updated date into a format, default is `"%d/%m/%Y %H:%M:%S"` +`UPDATED_DATE_FORMAT` flag can be set to put updated date into a format, default is `"%d/%m/%Y %H:%M:%S"` -`SHOW_LINES_OF_CODE` flag can be set to `True` to show the Lines of code writen till date +`SHOW_LINES_OF_CODE` flag can be set to `True` to show the Lines of code writen till date ![Lines of code](https://img.shields.io/badge/From%20Hello%20World%20I've%20written-1.3%20million%20Lines%20of%20code-blue) -`SHOW_TOTAL_CODE_TIME` flag can be set to `False` to hide *Code Time* +`SHOW_TOTAL_CODE_TIME` flag can be set to `False` to hide _Code Time_ ![Code Time](http://img.shields.io/badge/Code%20Time-1%2C438%20hrs%2054%20mins-blue) -`SHOW_PROFILE_VIEWS` flag can be set to `False` to hide the Profile views +`SHOW_PROFILE_VIEWS` flag can be set to `False` to hide the Profile views ![Profile Views](http://img.shields.io/badge/Profile%20Views-2189-blue) -`SHOW_COMMIT` flag can be set to `False` to hide the commit stats +`SHOW_COMMIT` flag can be set to `False` to hide the commit stats **I'm an early 🐀** @@ -163,7 +192,7 @@ jobs: ``` -`SHOW_DAYS_OF_WEEK` flag can be set to `False` to hide the commits made on different days of week +`SHOW_DAYS_OF_WEEK` flag can be set to `False` to hide the commits made on different days of week πŸ“… **I'm Most Productive on Sundays** @@ -178,7 +207,7 @@ Sunday 86 commits β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘ ``` -`SHOW_LANGUAGE` flag can be set to `False` to hide the Coding Language You use +`SHOW_LANGUAGE` flag can be set to `False` to hide the Coding Language You use ```text πŸ’¬ Languages: @@ -189,7 +218,7 @@ Python 22 mins β–ˆβ–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘ XML 8 mins β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘ 1.62% ``` -`SHOW_OS` flag can be set to `False` to hide the OS details +`SHOW_OS` flag can be set to `False` to hide the OS details ```text πŸ’» Operating Systems: @@ -213,7 +242,7 @@ denAPI 40 mins β–ˆβ–ˆβ–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘ ⌚︎ Timezone: Asia/Calcutta ``` -`SHOW_EDITORS` flag can be set to `False` to hide the list of code-editors used +`SHOW_EDITORS` flag can be set to `False` to hide the list of code-editors used ```text πŸ”₯ Editors: @@ -222,7 +251,7 @@ PhpStorm 1 hr 35 mins β–ˆβ–ˆβ–ˆβ–ˆβ–‘β–‘β–‘β–‘β–‘β–‘β–‘ PyCharm 23 mins β–ˆβ–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘ 4.49% ``` -`SHOW_LANGUAGE_PER_REPO` flag can be set to `False` to hide the Number of repository in different language and frameworks +`SHOW_LANGUAGE_PER_REPO` flag can be set to `False` to hide the Number of repository in different language and frameworks **I mostly code in Vue** @@ -237,32 +266,33 @@ CSS 2 repos β–ˆβ–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘ ``` -`SHOW_SHORT_INFO` flag can be set to `False` to hide the short fun fact info of user ->This section requires personal access token with user permission otherwise data shown will be incorrect here +`SHOW_SHORT_INFO` flag can be set to `False` to hide the short fun fact info of user + +> This section requires personal access token with user permission otherwise data shown will be incorrect here **🐱 My GitHub Data** > πŸ† 433 Contributions in year 2020 - > +> > πŸ“¦ Used 292.3 kB in GitHub's Storage - > +> > πŸ’Ό Opted to Hire - > +> > πŸ“œ 25 Public Repository - > +> > πŸ”‘ 15 Owned Private Repository -`SHOW_LOC_CHART` flag can be set to `False` to hide the Lines of code written in different quarters of different year +`SHOW_LOC_CHART` flag can be set to `False` to hide the Lines of code written in different quarters of different year -`IGNORED_REPOS` flag can be set to `"waka-readme-stats, my-first-repo"` (just an example) to ignore some repos you don’t want to be counted +`IGNORED_REPOS` flag can be set to `"waka-readme-stats, my-first-repo"` (just an example) to ignore some repos you don't want to be counted `SYMBOL_VERSION` flag can be set symbol for progress bar (default: `1`) | Version | Done block | Empty block | -|-------- | ---------- | ----------- | -| 1 | β–ˆ | β–‘ | -| 2 | β£Ώ | β£€ | -| 3 | ⬛ | ⬜ | +| ------- | ---------- | ----------- | +| 1 | β–ˆ | β–‘ | +| 2 | β£Ώ | β£€ | +| 3 | ⬛ | ⬜ | `DEBUG_LOGGING` flag can be set to increase action output verbosity, by default equals internal runner debug property @@ -436,8 +466,7 @@ Made with :heart: and Python 🐍. # Inspired From -> [Awesome Pinned Gists](https://github.com/matchai/awesome-pinned-gists)
-> [athul/waka-readme](https://github.com/athul/waka-readme) +> [Awesome Pinned Gists](https://github.com/matchai/awesome-pinned-gists)
> [athul/waka-readme](https://github.com/athul/waka-readme) ### This project need a **star** ⭐ from you β™₯ diff --git a/sources/debug.py b/sources/debug.py new file mode 100644 index 0000000..94b10ac --- /dev/null +++ b/sources/debug.py @@ -0,0 +1,39 @@ +""" +Debug utilities for better error handling and logging in waka-readme-stats. +""" + + +class DebugManager: + """ + Static class for managing debug output. + Provides utilities for info, warning, error, and success logging. + """ + + @staticmethod + def i(message: str): + """Log an information message.""" + print(f"INFO: {message}") + + @staticmethod + def w(message: str): + """Log a warning message.""" + print(f"WARNING: {message}") + + @staticmethod + def g(message: str): + """Log a success (good) message.""" + print(f"SUCCESS: {message}") + + @staticmethod + def p(message: str): + """Log a problem message.""" + print(f"PROBLEM: {message}") + + @staticmethod + def e(message: str): + """Log an error message.""" + print(f"ERROR: {message}") + + +# Add this to be imported in other modules +DBM = DebugManager diff --git a/sources/manager_github.py b/sources/manager_github.py index c3bc549..baea94b 100644 --- a/sources/manager_github.py +++ b/sources/manager_github.py @@ -12,6 +12,18 @@ from manager_environment import EnvironmentManager as EM from manager_file import FileManager as FM from manager_debug import DebugManager as DBM +from manager_cache import CacheManager + +# For benchmarking +try: + from benchmarking import benchmark +except ImportError: + # Define a no-op benchmark decorator if benchmarking not available + def benchmark(name=None, metadata=None): + def decorator(func): + return func + + return decorator def init_github_manager(): @@ -27,9 +39,10 @@ class GitHubManager: USER: AuthenticatedUser REPO: Repo REMOTE: Repository + CACHE: CacheManager = None _REMOTE_NAME: str - _REMOTE_PATH: str + _REPO_PATH: str _SINGLE_COMMIT_BRANCH = "latest_branch" _START_COMMENT = f"" @@ -49,15 +62,26 @@ def prepare_github_env(): GitHubManager.USER = github.get_user() rmtree(clone_path, ignore_errors=True) - GitHubManager._REMOTE_NAME = f"{GitHubManager.USER.login}/{GitHubManager.USER.login}" - GitHubManager._REPO_PATH = f"https://{EM.GH_TOKEN}@github.com/{GitHubManager._REMOTE_NAME}.git" + GitHubManager._REMOTE_NAME = ( + f"{GitHubManager.USER.login}/{GitHubManager.USER.login}" + ) + # URL for repository with authentication token + git_url = f"https://{EM.GH_TOKEN}@github.com/{GitHubManager._REMOTE_NAME}.git" + GitHubManager._REPO_PATH = git_url GitHubManager.REMOTE = github.get_repo(GitHubManager._REMOTE_NAME) - GitHubManager.REPO = Repo.clone_from(GitHubManager._REPO_PATH, to_path=clone_path) + GitHubManager.REPO = Repo.clone_from( + GitHubManager._REPO_PATH, to_path=clone_path + ) + + # Initialize cache with user login + GitHubManager.CACHE = CacheManager(GitHubManager.USER.login) if EM.COMMIT_SINGLE: GitHubManager.REPO.git.checkout(GitHubManager.branch(EM.PULL_BRANCH_NAME)) - GitHubManager.REPO.git.checkout("--orphan", GitHubManager._SINGLE_COMMIT_BRANCH) + GitHubManager.REPO.git.checkout( + "--orphan", GitHubManager._SINGLE_COMMIT_BRANCH + ) else: GitHubManager.REPO.git.checkout(GitHubManager.branch(EM.PUSH_BRANCH_NAME)) @@ -77,25 +101,31 @@ def _get_author() -> Actor: else: return Actor( EM.COMMIT_USERNAME or "readme-bot", - EM.COMMIT_EMAIL or "41898282+github-actions[bot]@users.noreply.github.com", + EM.COMMIT_EMAIL + or "41898282+github-actions[bot]@users.noreply.github.com", ) @staticmethod def branch(requested_branch: str) -> str: """ - Gets requested branch name or the default branch name if requested branch wasn't found. - The default branch name is regularly, 'main' or 'master'. + Gets requested branch name or the default branch name if requested branch + wasn't found. The default branch name is regularly, 'main' or 'master'. :param requested_branch: Requested branch name. :returns: Commit author. """ - return GitHubManager.REMOTE.default_branch if requested_branch == "" else requested_branch + return ( + GitHubManager.REMOTE.default_branch + if requested_branch == "" + else requested_branch + ) @staticmethod def _copy_file_and_add_to_repo(src_path: str): """ Copies file to repository folder, creating path if needed and adds file to git. - The copied file relative to repository root path will be equal the source file relative to work directory path. + The copied file relative to repository root path will be equal the source file + relative to work directory path. :param src_path: Source file path. """ @@ -108,14 +138,23 @@ def _copy_file_and_add_to_repo(src_path: str): def update_readme(stats: str): """ Updates readme with given data if necessary. - Uses commit author, commit message and branch name specified by environmental variables. + Uses commit author, commit message and branch name specified by + environmental variables. """ DBM.i("Updating README...") - readme_path = join(GitHubManager.REPO.working_tree_dir, GitHubManager.REMOTE.get_readme().path) + readme_path = join( + GitHubManager.REPO.working_tree_dir, GitHubManager.REMOTE.get_readme().path + ) with open(readme_path, "r") as readme_file: readme_contents = readme_file.read() - readme_stats = f"{GitHubManager._START_COMMENT}\n{stats}\n{GitHubManager._END_COMMENT}" + + # Create formatted stats section with comments + readme_stats = ( + f"{GitHubManager._START_COMMENT}\n{stats}\n{GitHubManager._END_COMMENT}" + ) + + # Replace old stats section with new one new_readme = sub(GitHubManager._README_REGEX, readme_stats, readme_contents) with open(readme_path, "w") as readme_file: @@ -129,7 +168,8 @@ def update_chart(name: str, path: str) -> str: """ Updates a chart. Inlines data into readme if in debug mode, commits otherwise. - Uses commit author, commit message and branch name specified by environmental variables. + Uses commit author, commit message and branch name specified by + environmental variables. :param name: Name of the chart to update. :param path: Path of the chart to update. @@ -140,14 +180,27 @@ def update_chart(name: str, path: str) -> str: if not EM.DEBUG_RUN: DBM.i("\tAdding chart to repo...") GitHubManager._copy_file_and_add_to_repo(path) - chart_path = f"https://raw.githubusercontent.com/{GitHubManager._REMOTE_NAME}/{GitHubManager.branch(EM.PUSH_BRANCH_NAME)}/{path}" - output += f"![{name} chart]({chart_path})\n\n" + # Create URL for the raw chart image + branch = GitHubManager.branch(EM.PUSH_BRANCH_NAME) + chart_path = ( + f"https://raw.githubusercontent.com/" + f"{GitHubManager._REMOTE_NAME}/{branch}/{path}" + ) + + output += f"![{name} chart]({chart_path})\n\n" else: DBM.i("\tInlining chart...") - hint = "You can use [this website](https://codebeautify.org/base64-to-image-converter) to view the generated base64 image." + hint = ( + "You can use [this website]" + "(https://codebeautify.org/base64-to-image-converter) " + "to view the generated base64 image." + ) + with open(path, "rb") as input_file: - output += f"{hint}\n```\ndata:image/png;base64,{b64encode(input_file.read()).decode('utf-8')}\n```\n\n" + img_data = b64encode(input_file.read()).decode("utf-8") + output += f"{hint}\n```\ndata:image/png;base64,{img_data}\n```\n\n" + return output @staticmethod @@ -157,12 +210,20 @@ def commit_update(): """ actor = GitHubManager._get_author() DBM.i("Committing files to repo...") - GitHubManager.REPO.index.commit(EM.COMMIT_MESSAGE, author=actor, committer=actor) + GitHubManager.REPO.index.commit( + EM.COMMIT_MESSAGE, author=actor, committer=actor + ) if EM.COMMIT_SINGLE: DBM.i("Pushing files to repo as a single commit...") - refspec = f"{GitHubManager._SINGLE_COMMIT_BRANCH}:{GitHubManager.branch(EM.PUSH_BRANCH_NAME)}" - headers = GitHubManager.REPO.remotes.origin.push(force=True, refspec=refspec) + refspec = ( + f"{GitHubManager._SINGLE_COMMIT_BRANCH}:" + f"{GitHubManager.branch(EM.PUSH_BRANCH_NAME)}" + ) + + headers = GitHubManager.REPO.remotes.origin.push( + force=True, refspec=refspec + ) else: DBM.i("Pushing files to repo...") headers = GitHubManager.REPO.remotes.origin.push() @@ -184,7 +245,10 @@ def set_github_output(stats: str): DBM.p("Not in GitHub environment, not setting action output!") return else: - DBM.i("Outputting readme contents, check the latest comment for the generated stats.") + DBM.i( + "Outputting readme contents, check the latest comment " + "for the generated stats." + ) prefix = "README stats current output:" eol = "".join(choice(ascii_letters) for _ in range(10)) @@ -195,3 +259,44 @@ def set_github_output(stats: str): ) DBM.g("Action output set!") + + @staticmethod + @benchmark(name="Get Repository Data", metadata={"operation": "github_api"}) + def get_repository_data(repo_name: str): + """ + Get repository data with caching support. + + Checks if repository data is in cache first, otherwise fetches from GitHub API. + + Args: + repo_name: The name of the repository to get data for + + Returns: + Repository data + """ + # Check if we have a cache instance + if GitHubManager.CACHE is None: + GitHubManager.CACHE = CacheManager(GitHubManager.USER.login) + + # Try to get data from cache first + cached_data = GitHubManager.CACHE.get_cached_data(repo_name) + if cached_data is not None: + DBM.i(f"Using cached data for repository: {repo_name}") + return cached_data + + # If not in cache, fetch from GitHub API + DBM.i(f"Fetching fresh data for repository: {repo_name}") + try: + github = Github(EM.GH_TOKEN) + repo_data = github.get_repo( + f"{GitHubManager.USER.login}/{repo_name}" + ).raw_data + + # Update cache with new data + GitHubManager.CACHE.update_cache(repo_name, repo_data) + + return repo_data + except Exception as e: + # DBM.e is not defined, using DBM.i instead for error reporting + DBM.i(f"Error fetching repository data for {repo_name}: {str(e)}") + return None diff --git a/sources/yearly_commit_calculator.py b/sources/yearly_commit_calculator.py index 96329d9..cc39a7d 100644 --- a/sources/yearly_commit_calculator.py +++ b/sources/yearly_commit_calculator.py @@ -2,7 +2,7 @@ from datetime import datetime from json import dumps from re import search -from typing import Dict, Tuple +from typing import Dict, Tuple, List, Optional from manager_debug import DebugManager as DBM from manager_download import DownloadManager as DM @@ -10,7 +10,28 @@ from manager_file import FileManager as FM from manager_github import GitHubManager as GHM +# Try to import benchmarking utilities +try: + from benchmarking import benchmark, benchmark_block +except ImportError: + # Define no-op benchmarking functions if not available + def benchmark(name=None, metadata=None): + def decorator(func): + return func + return decorator + + def benchmark_block(name=None, metadata=None): + def start(): + pass + + def end(): + pass + + return start, end + + +@benchmark(name="Calculate Commit Data", metadata={"operation": "commit_processing"}) async def calculate_commit_data(repositories: Dict) -> Tuple[Dict, Dict]: """ Calculate commit data by years. @@ -30,11 +51,45 @@ async def calculate_commit_data(repositories: Dict) -> Tuple[Dict, Dict]: yearly_data = dict() date_data = dict() - for ind, repo in enumerate(repositories): - if repo["name"] not in EM.IGNORED_REPOS: - repo_name = "[private]" if repo["isPrivate"] else f"{repo['owner']['login']}/{repo['name']}" - DBM.i(f"\t{ind + 1}/{len(repositories)} Retrieving repo: {repo_name}") - await update_data_with_commit_stats(repo, yearly_data, date_data) + + # Filter out ignored repositories + active_repos = [ + repo for repo in repositories if repo["name"] not in EM.IGNORED_REPOS + ] + + # Use caching to only process repositories that have changed + cached_repos, new_repos = separate_cached_and_new_repos(active_repos) + + DBM.i( + f"Processing {len(cached_repos)} cached repositories and {len(new_repos)} new repositories" + ) + + # Process cached repositories + for ind, repo in enumerate(cached_repos): + repo_name = ( + "[private]" + if repo["isPrivate"] + else f"{repo['owner']['login']}/{repo['name']}" + ) + DBM.i( + f"\t{ind + 1}/{len(cached_repos)} Using cached data for repo: {repo_name}" + ) + + # Get cached commit data and update yearly and date data + commit_cache = get_cached_commit_data(repo) + if commit_cache: + update_yearly_data_from_cache(commit_cache, yearly_data, date_data) + + # Process new repositories + for ind, repo in enumerate(new_repos): + repo_name = ( + "[private]" + if repo["isPrivate"] + else f"{repo['owner']['login']}/{repo['name']}" + ) + DBM.i(f"\t{ind + 1}/{len(new_repos)} Retrieving new repo: {repo_name}") + await update_data_with_commit_stats(repo, yearly_data, date_data) + DBM.g("Commit data calculated!") if EM.DEBUG_RUN: @@ -44,7 +99,95 @@ async def calculate_commit_data(repositories: Dict) -> Tuple[Dict, Dict]: return yearly_data, date_data -async def update_data_with_commit_stats(repo_details: Dict, yearly_data: Dict, date_data: Dict): +def separate_cached_and_new_repos( + repositories: List[Dict], +) -> Tuple[List[Dict], List[Dict]]: + """ + Separates repositories into cached and new based on cache status. + + Args: + repositories: List of repository information dictionaries + + Returns: + Tuple of (cached_repos, new_repos) + """ + cached_repos = [] + new_repos = [] + + for repo in repositories: + # Check if we have cached data for this repository + if GHM.CACHE and GHM.CACHE.get_cached_data(repo["name"]): + cached_repos.append(repo) + else: + new_repos.append(repo) + + return cached_repos, new_repos + + +def get_cached_commit_data(repo: Dict) -> Optional[Dict]: + """ + Retrieves cached commit data for a repository. + + Args: + repo: Repository information dictionary + + Returns: + Cached commit data or None if not available + """ + if not GHM.CACHE: + return None + + return GHM.CACHE.get_cached_data(f"{repo['name']}_commits") + + +def update_yearly_data_from_cache( + commit_cache: Dict, yearly_data: Dict, date_data: Dict +) -> None: + """ + Updates yearly data dictionaries from cached commit data. + + Args: + commit_cache: Cached commit data + yearly_data: Yearly data dictionary to update + date_data: Commit date dictionary to update + """ + # Extract cached data + cache_yearly = commit_cache.get("yearly_data", {}) + cache_date = commit_cache.get("date_data", {}) + + # Update yearly data + for year, quarters in cache_yearly.items(): + if year not in yearly_data: + yearly_data[year] = {} + + for quarter, languages in quarters.items(): + if quarter not in yearly_data[year]: + yearly_data[year][quarter] = {} + + for lang, stats in languages.items(): + if lang not in yearly_data[year][quarter]: + yearly_data[year][quarter][lang] = {"add": 0, "del": 0} + + yearly_data[year][quarter][lang]["add"] += stats["add"] + yearly_data[year][quarter][lang]["del"] += stats["del"] + + # Update date data + for repo_name, branches in cache_date.items(): + if repo_name not in date_data: + date_data[repo_name] = {} + + for branch_name, commits in branches.items(): + if branch_name not in date_data[repo_name]: + date_data[repo_name][branch_name] = {} + + for commit_id, commit_date in commits.items(): + date_data[repo_name][branch_name][commit_id] = commit_date + + +@benchmark(name="Update Commit Stats", metadata={"operation": "repo_processing"}) +async def update_data_with_commit_stats( + repo_details: Dict, yearly_data: Dict, date_data: Dict +): """ Updates yearly commit data with commits from given repository. Skips update if the commit isn't related to any repository. @@ -54,11 +197,16 @@ async def update_data_with_commit_stats(repo_details: Dict, yearly_data: Dict, d :param date_data: Commit date dictionary to update. """ owner = repo_details["owner"]["login"] - branch_data = await DM.get_remote_graphql("repo_branch_list", owner=owner, name=repo_details["name"]) + branch_data = await DM.get_remote_graphql( + "repo_branch_list", owner=owner, name=repo_details["name"] + ) if len(branch_data) == 0: DBM.w("\t\tBranch data not found, skipping repository...") return + repo_yearly_data = {} + repo_date_data = {} + for branch in branch_data: commit_data = await DM.get_remote_graphql( "repo_commit_list", @@ -67,26 +215,76 @@ async def update_data_with_commit_stats(repo_details: Dict, yearly_data: Dict, d branch=branch["name"], id=GHM.USER.node_id, ) + + if repo_details["name"] not in repo_date_data: + repo_date_data[repo_details["name"]] = {} + if branch["name"] not in repo_date_data[repo_details["name"]]: + repo_date_data[repo_details["name"]][branch["name"]] = {} + for commit in commit_data: date = search(r"\d+-\d+-\d+", commit["committedDate"]).group() curr_year = datetime.fromisoformat(date).year quarter = (datetime.fromisoformat(date).month - 1) // 3 + 1 - if repo_details["name"] not in date_data: - date_data[repo_details["name"]] = dict() - if branch["name"] not in date_data[repo_details["name"]]: - date_data[repo_details["name"]][branch["name"]] = dict() - date_data[repo_details["name"]][branch["name"]][commit["oid"]] = commit["committedDate"] + # Update repo-specific date data + repo_date_data[repo_details["name"]][branch["name"]][commit["oid"]] = ( + commit["committedDate"] + ) + # Update repository's yearly data if repo_details["primaryLanguage"] is not None: + if curr_year not in repo_yearly_data: + repo_yearly_data[curr_year] = {} + if quarter not in repo_yearly_data[curr_year]: + repo_yearly_data[curr_year][quarter] = {} + if ( + repo_details["primaryLanguage"]["name"] + not in repo_yearly_data[curr_year][quarter] + ): + repo_yearly_data[curr_year][quarter][ + repo_details["primaryLanguage"]["name"] + ] = {"add": 0, "del": 0} + + repo_yearly_data[curr_year][quarter][ + repo_details["primaryLanguage"]["name"] + ]["add"] += commit["additions"] + repo_yearly_data[curr_year][quarter][ + repo_details["primaryLanguage"]["name"] + ]["del"] += commit["deletions"] + + # Also update the main yearly data if curr_year not in yearly_data: - yearly_data[curr_year] = dict() + yearly_data[curr_year] = {} if quarter not in yearly_data[curr_year]: - yearly_data[curr_year][quarter] = dict() - if repo_details["primaryLanguage"]["name"] not in yearly_data[curr_year][quarter]: - yearly_data[curr_year][quarter][repo_details["primaryLanguage"]["name"]] = {"add": 0, "del": 0} - yearly_data[curr_year][quarter][repo_details["primaryLanguage"]["name"]]["add"] += commit["additions"] - yearly_data[curr_year][quarter][repo_details["primaryLanguage"]["name"]]["del"] += commit["deletions"] + yearly_data[curr_year][quarter] = {} + if ( + repo_details["primaryLanguage"]["name"] + not in yearly_data[curr_year][quarter] + ): + yearly_data[curr_year][quarter][ + repo_details["primaryLanguage"]["name"] + ] = {"add": 0, "del": 0} + + yearly_data[curr_year][quarter][ + repo_details["primaryLanguage"]["name"] + ]["add"] += commit["additions"] + yearly_data[curr_year][quarter][ + repo_details["primaryLanguage"]["name"] + ]["del"] += commit["deletions"] + + # Update main date data + if repo_details["name"] not in date_data: + date_data[repo_details["name"]] = {} + if branch["name"] not in date_data[repo_details["name"]]: + date_data[repo_details["name"]][branch["name"]] = {} + date_data[repo_details["name"]][branch["name"]][commit["oid"]] = commit[ + "committedDate" + ] if not EM.DEBUG_RUN: await sleep(0.4) + + # Cache the repository's commit data + if GHM.CACHE: + cache_data = {"yearly_data": repo_yearly_data, "date_data": repo_date_data} + GHM.CACHE.update_cache(f"{repo_details['name']}_commits", cache_data) From ff8abbaf97c95cb692933f92b980e50d68f72c9d Mon Sep 17 00:00:00 2001 From: Imamuzzaki Abu Salam Date: Sat, 22 Mar 2025 06:35:19 +0700 Subject: [PATCH 04/12] chore: Add Black and Flake8 configuration files --- .flake8 | 8 ++++++++ pyproject.toml | 11 +++++++++++ 2 files changed, 19 insertions(+) create mode 100644 .flake8 create mode 100644 pyproject.toml diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..94d989c --- /dev/null +++ b/.flake8 @@ -0,0 +1,8 @@ +[flake8] +max-line-length = 160 +exclude = venv,assets,.git,.github,__pycache__,.cache +per-file-ignores = + # imported but unused in __init__ files + __init__.py: F401 + # allow long lines in test files + *_test.py: E501 \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..f705dcd --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,11 @@ +[tool.black] +line-length = 160 +target-version = ['py38'] +exclude = ''' +/( + \.git + | \.venv + | venv + | assets +)/ +''' From 327821789683817afe506035b4ab32a5e2bc3793 Mon Sep 17 00:00:00 2001 From: Imamuzzaki Abu Salam Date: Sat, 22 Mar 2025 07:09:46 +0700 Subject: [PATCH 05/12] feat: add semantic commit message enforcer --- .githooks/commit-msg | 46 ++++++++++++++++++++++++++++++++++++ .pre-commit-config.yaml | 52 ++++++++++++++++++++++++----------------- 2 files changed, 76 insertions(+), 22 deletions(-) create mode 100755 .githooks/commit-msg diff --git a/.githooks/commit-msg b/.githooks/commit-msg new file mode 100755 index 0000000..9710b06 --- /dev/null +++ b/.githooks/commit-msg @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +# Path to the commit message file (provided by Git). +COMMIT_MSG_FILE=$1 + +# Debug output +echo "Debug: Checking commit message in file: $COMMIT_MSG_FILE" + +# Read the commit message from the file. +COMMIT_MSG=$(cat "$COMMIT_MSG_FILE") + +# Debug output +echo "Debug: Commit message: '$COMMIT_MSG'" + +# Testing with simplified regex +if [[ $COMMIT_MSG =~ ^(feat|fix|docs|style|refactor|test|chore|build|ci|perf|revert) ]]; then + echo "Debug: Regex matched!" + exit 0 +else + echo "ERROR: Commit message does not follow Conventional Commits format." + echo + echo "The commit message should be structured as follows:" + echo "(): " + echo "[optional body]" + echo "[optional footer(s)]" + echo + echo "Valid types are:" + echo " feat: A new feature." + echo " fix: A bug fix." + echo " docs: Documentation changes." + echo " style: Code style changes (formatting, missing semicolons, etc.)." + echo " refactor: Code refactoring (neither fixes a bug nor adds a feature)." + echo " test: Adding or updating tests." + echo " chore: Routine tasks like updating dependencies or build tools." + echo " build: Changes affecting the build system or external dependencies." + echo " ci: Changes to CI configuration files or scripts." + echo " perf: Performance improvements." + echo " revert: Reverting a previous commit." + echo + echo "Examples:" + echo " feat(auth): add login functionality" + echo " fix(api)!: resolve timeout issue" + echo " docs(readme): update installation instructions" + echo + exit 1 +fi diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4755013..f70185f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,12 +1,4 @@ repos: - - repo: local - hooks: - - id: pytest-check - name: pytest-check - entry: pipenv run pytest - language: system - pass_filenames: false - always_run: true - repo: https://github.com/pre-commit/pre-commit-hooks rev: v2.3.0 hooks: @@ -15,17 +7,33 @@ repos: - id: trailing-whitespace - repo: local hooks: - - id: flake8 - name: flake8 - entry: pipenv run flake8 --max-line-length=160 --exclude venv,assets . - language: system - pass_filenames: false - always_run: true - - repo: local - hooks: - - id: black - name: black - entry: pipenv run black --line-length=160 --exclude='/venv/|/assets/' . - language: system - pass_filenames: false - always_run: true + - id: semantic-commit-msg + name: Semantic Commit Messages + entry: .githooks/commit-msg + language: script + stages: [commit-msg] + # Temporarily commented hooks + # - repo: local + # hooks: + # - id: pytest-check + # name: pytest-check + # entry: pipenv run pytest + # language: system + # pass_filenames: false + # always_run: true + # - repo: local + # hooks: + # - id: flake8 + # name: flake8 + # entry: pipenv run flake8 --max-line-length=160 --exclude venv,assets . + # language: system + # pass_filenames: false + # always_run: true + # - repo: local + # hooks: + # - id: black + # name: black + # entry: pipenv run black --line-length=160 --exclude='/venv/|/assets/' . + # language: system + # pass_filenames: false + # always_run: true From e1b2da7b04916d53cde63b386af26e975ef6d8a9 Mon Sep 17 00:00:00 2001 From: Imamuzzaki Abu Salam Date: Sat, 22 Mar 2025 07:10:20 +0700 Subject: [PATCH 06/12] chore: add test file --- test.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 test.txt diff --git a/test.txt b/test.txt new file mode 100644 index 0000000..524acff --- /dev/null +++ b/test.txt @@ -0,0 +1 @@ +Test file From e221033d52460b59c72d75b4a749b4f55cd7df83 Mon Sep 17 00:00:00 2001 From: Imamuzzaki Abu Salam Date: Sat, 22 Mar 2025 07:11:29 +0700 Subject: [PATCH 07/12] refactor: finalize semantic commit message enforcer --- .githooks/commit-msg | 11 ++------- .pre-commit-config.yaml | 49 ++++++++++++++++++++--------------------- 2 files changed, 26 insertions(+), 34 deletions(-) diff --git a/.githooks/commit-msg b/.githooks/commit-msg index 9710b06..e61a69d 100755 --- a/.githooks/commit-msg +++ b/.githooks/commit-msg @@ -3,18 +3,11 @@ # Path to the commit message file (provided by Git). COMMIT_MSG_FILE=$1 -# Debug output -echo "Debug: Checking commit message in file: $COMMIT_MSG_FILE" - # Read the commit message from the file. COMMIT_MSG=$(cat "$COMMIT_MSG_FILE") -# Debug output -echo "Debug: Commit message: '$COMMIT_MSG'" - -# Testing with simplified regex -if [[ $COMMIT_MSG =~ ^(feat|fix|docs|style|refactor|test|chore|build|ci|perf|revert) ]]; then - echo "Debug: Regex matched!" +# More complete regex that enforces the full format +if [[ $COMMIT_MSG =~ ^(feat|fix|docs|style|refactor|test|chore|build|ci|perf|revert)(\([a-zA-Z0-9_.-]+\))?(!)?:[[:space:]].*$ ]]; then exit 0 else echo "ERROR: Commit message does not follow Conventional Commits format." diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f70185f..3b0e81e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,10 +1,34 @@ repos: + - repo: local + hooks: + - id: pytest-check + name: pytest-check + entry: pipenv run pytest + language: system + pass_filenames: false + always_run: true - repo: https://github.com/pre-commit/pre-commit-hooks rev: v2.3.0 hooks: - id: check-yaml - id: end-of-file-fixer - id: trailing-whitespace + - repo: local + hooks: + - id: flake8 + name: flake8 + entry: pipenv run flake8 --max-line-length=160 --exclude venv,assets . + language: system + pass_filenames: false + always_run: true + - repo: local + hooks: + - id: black + name: black + entry: pipenv run black --line-length=160 --exclude='/venv/|/assets/' . + language: system + pass_filenames: false + always_run: true - repo: local hooks: - id: semantic-commit-msg @@ -12,28 +36,3 @@ repos: entry: .githooks/commit-msg language: script stages: [commit-msg] - # Temporarily commented hooks - # - repo: local - # hooks: - # - id: pytest-check - # name: pytest-check - # entry: pipenv run pytest - # language: system - # pass_filenames: false - # always_run: true - # - repo: local - # hooks: - # - id: flake8 - # name: flake8 - # entry: pipenv run flake8 --max-line-length=160 --exclude venv,assets . - # language: system - # pass_filenames: false - # always_run: true - # - repo: local - # hooks: - # - id: black - # name: black - # entry: pipenv run black --line-length=160 --exclude='/venv/|/assets/' . - # language: system - # pass_filenames: false - # always_run: true From 7277697b88aeb51e79b0e9a6a4a95d785d4006ab Mon Sep 17 00:00:00 2001 From: Imamuzzaki Abu Salam Date: Sat, 22 Mar 2025 07:13:38 +0700 Subject: [PATCH 08/12] fix: update flake8 config, improve benchmark.py structure, and enhance cache manager tests --- .flake8 | 2 +- CONTRIBUTING.md | 2 +- Pipfile | 4 + Pipfile.lock | 11 ++- benchmark.py | 42 +++++------ pyproject.toml | 22 ++++++ sources/benchmarking.py | 49 ++++++------- sources/benchmarking_test.py | 38 +++++----- sources/manager_cache.py | 21 +++--- sources/manager_cache_test.py | 109 ++++++++++++++-------------- sources/manager_github.py | 62 ++++------------ sources/yearly_commit_calculator.py | 82 +++++---------------- test.txt | 1 - 13 files changed, 194 insertions(+), 251 deletions(-) delete mode 100644 test.txt diff --git a/.flake8 b/.flake8 index 94d989c..a4cf698 100644 --- a/.flake8 +++ b/.flake8 @@ -5,4 +5,4 @@ per-file-ignores = # imported but unused in __init__ files __init__.py: F401 # allow long lines in test files - *_test.py: E501 \ No newline at end of file + *_test.py: E501 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 542503b..b3484cc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing -**Working on your first Pull Request?** You can learn how from this *free* series [How to Contribute to an Open Source Project on GitHub](https://egghead.io/courses/how-to-contribute-to-an-open-source-project-on-github) +**Working on your first Pull Request?** You can learn how from this _free_ series [How to Contribute to an Open Source Project on GitHub](https://egghead.io/courses/how-to-contribute-to-an-open-source-project-on-github) --- diff --git a/Pipfile b/Pipfile index 871b847..df096b4 100644 --- a/Pipfile +++ b/Pipfile @@ -21,11 +21,15 @@ pyyaml = "~=6.0" # Codestyle checking modules: black = "~=25.1" flake8 = "~=6.0" +# Testing modules: pytest = "~=8.3" pytest-asyncio = "~=0.25" pytest-cov = "~=6.0" pytest-mock = "~=3.14" +# Pre-commit modules: pre-commit = "*" +# Setuptools modules: +setuptools = "*" [requires] python_version = "3.13" diff --git a/Pipfile.lock b/Pipfile.lock index 55a7877..812e94a 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "8a04c3dbdfc0e2db90bdd160658387f396eb317fbd59c5a6133ce193617d7219" + "sha256": "8ab43cab446ff4f5e9ff5aa69d726fba4a31994555e5f7b47090116275fcf469" }, "pipfile-spec": 6, "requires": { @@ -826,6 +826,15 @@ "markers": "python_version >= '3.8'", "version": "==2.32.3" }, + "setuptools": { + "hashes": [ + "sha256:583b361c8da8de57403743e756609670de6fb2345920e36dc5c2d914c319c945", + "sha256:67122e78221da5cf550ddd04cf8742c8fe12094483749a792d56cd669d6cf58c" + ], + "index": "pypi", + "markers": "python_version >= '3.9'", + "version": "==77.0.3" + }, "six": { "hashes": [ "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", diff --git a/benchmark.py b/benchmark.py index 47460af..eb40e6b 100644 --- a/benchmark.py +++ b/benchmark.py @@ -24,8 +24,8 @@ parent_dir = Path(__file__).resolve().parent sys.path.append(str(parent_dir)) -from sources.benchmarking import BenchmarkTracker, benchmark -from sources.manager_cache import CacheManager +from sources.benchmarking import BenchmarkTracker, benchmark # noqa: E402 +from sources.manager_cache import CacheManager # noqa: E402 # Import conditionally to avoid errors if running without full dependencies try: @@ -54,16 +54,18 @@ def run_full_benchmark(username, use_cache=True): os.environ["INPUT_SHOW_COMMIT"] = "True" os.environ["INPUT_SHOW_LANGUAGE_PER_REPO"] = "True" os.environ["GITHUB_REPOSITORY"] = f"{username}/{username}" - + # Control caching behavior if not use_cache: # Clear cache before running cache_manager = CacheManager(username) cache_manager.clear_cache() - + # Run the main function try: - waka_main() + from asyncio import run + + run(waka_main()) except Exception as e: print(f"Error running benchmark: {e}") @@ -72,7 +74,7 @@ def print_system_info(): """Print system information for context.""" import platform import multiprocessing - + print("System Information:") print(f" - Python version: {platform.python_version()}") print(f" - OS: {platform.system()} {platform.release()}") @@ -83,41 +85,33 @@ def print_system_info(): def main(): """Main benchmark function.""" parser = argparse.ArgumentParser(description="Benchmark waka-readme-stats") + parser.add_argument("--username", required=True, help="GitHub username to use for benchmarking") parser.add_argument( - "--username", - required=True, - help="GitHub username to use for benchmarking" + "--full", + action="store_true", + help="Run full benchmark suite (including API calls)", ) - parser.add_argument( - "--full", - action="store_true", - help="Run full benchmark suite (including API calls)" - ) - parser.add_argument( - "--no-cache", - action="store_true", - help="Disable caching for benchmarking" - ) - + parser.add_argument("--no-cache", action="store_true", help="Disable caching for benchmarking") + args = parser.parse_args() - + print("Starting benchmarks for waka-readme-stats...\n") print_system_info() - + # Run with cache if not args.no_cache: print("Running benchmark with caching enabled...") start_time = time.time() run_full_benchmark(args.username, use_cache=True) print(f"Completed in {time.time() - start_time:.2f}s with caching enabled\n") - + # Run without cache for comparison if requested if args.no_cache: print("Running benchmark with caching disabled...") start_time = time.time() run_full_benchmark(args.username, use_cache=False) print(f"Completed in {time.time() - start_time:.2f}s with caching disabled\n") - + # Print detailed benchmark results print(BenchmarkTracker.get_summary()) diff --git a/pyproject.toml b/pyproject.toml index f705dcd..6a6db81 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,3 +9,25 @@ exclude = ''' | assets )/ ''' + +[tool.mypy] +python_version = "3.13" +disallow_untyped_defs = false +disallow_incomplete_defs = false +check_untyped_defs = true +disallow_untyped_decorators = false +no_implicit_optional = true +warn_redundant_casts = true +warn_no_return = true +warn_return_any = false +warn_unused_ignores = false + +[[tool.mypy.overrides]] +module = "pytest.*" +ignore_missing_imports = true + +[tool.pyright] +include = ["sources"] +reportMissingImports = true +reportMissingTypeStubs = false +reportInvalidTypeForm = "none" diff --git a/sources/benchmarking.py b/sources/benchmarking.py index 8760099..01c3f10 100644 --- a/sources/benchmarking.py +++ b/sources/benchmarking.py @@ -72,31 +72,32 @@ def get_summary(cls) -> str: summary = "Performance Benchmark Summary:\n" summary += "=================================\n" - + for result in cls._results: summary += f"{result}\n" - + # Add metadata if present if result.metadata: for key, value in result.metadata.items(): summary += f" - {key}: {value}\n" - + summary += "=================================\n" summary += f"Total execution time: {cls.get_total_execution_time():.4f}s\n" - + return summary def benchmark(name: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None) -> Callable: """Decorator to benchmark a function's execution time. - + Args: name: Optional name for the benchmark metadata: Optional metadata about the benchmark - + Returns: Decorated function """ + def decorator(func: Callable) -> Callable: @wraps(func) def wrapper(*args: Any, **kwargs: Any) -> Any: @@ -104,48 +105,42 @@ def wrapper(*args: Any, **kwargs: Any) -> Any: start_time = time.time() result = func(*args, **kwargs) end_time = time.time() - + execution_time = end_time - start_time - + # Add dynamic metadata if provided final_metadata = metadata.copy() if metadata else {} - if 'args_count' not in final_metadata: - final_metadata['args_count'] = len(args) - - benchmark_result = BenchmarkResult( - name=benchmark_name, - execution_time=execution_time, - metadata=final_metadata - ) - + if "args_count" not in final_metadata: + final_metadata["args_count"] = len(args) + + benchmark_result = BenchmarkResult(name=benchmark_name, execution_time=execution_time, metadata=final_metadata) + BenchmarkTracker.add_result(benchmark_result) return result + return wrapper + return decorator def benchmark_block(name: str, metadata: Optional[Dict[str, Any]] = None) -> Tuple[Callable, Callable]: """Context manager for benchmarking a block of code. - + Args: name: Name for the benchmark metadata: Optional metadata about the benchmark - + Returns: Start and end functions for the benchmark """ start_time = [0.0] # Use a list to allow modification in nested scope - + def start() -> None: start_time[0] = time.time() - + def end() -> None: execution_time = time.time() - start_time[0] - benchmark_result = BenchmarkResult( - name=name, - execution_time=execution_time, - metadata=metadata - ) + benchmark_result = BenchmarkResult(name=name, execution_time=execution_time, metadata=metadata) BenchmarkTracker.add_result(benchmark_result) - + return start, end diff --git a/sources/benchmarking_test.py b/sources/benchmarking_test.py index 1790191..e752f6b 100644 --- a/sources/benchmarking_test.py +++ b/sources/benchmarking_test.py @@ -1,9 +1,8 @@ import time -from unittest.mock import patch import pytest -from sources.benchmarking import benchmark, benchmark_block, BenchmarkTracker, BenchmarkResult +from benchmarking import benchmark, benchmark_block, BenchmarkTracker, BenchmarkResult @pytest.fixture @@ -16,18 +15,19 @@ def clear_benchmark_results(): def test_benchmark_decorator(clear_benchmark_results): """Test the benchmark decorator functionality.""" + # Define a function to benchmark @benchmark() def example_function(sleep_time): time.sleep(sleep_time) return "result" - + # Run the function result = example_function(0.01) - + # Check the function still returns correctly assert result == "result" - + # Check that the benchmark was recorded benchmark_results = BenchmarkTracker.get_results() assert len(benchmark_results) == 1 @@ -38,12 +38,13 @@ def example_function(sleep_time): def test_benchmark_with_custom_name(clear_benchmark_results): """Test benchmark decorator with custom name.""" + @benchmark(name="CustomTest") def example_function(): return "result" - + example_function() - + benchmark_results = BenchmarkTracker.get_results() assert len(benchmark_results) == 1 assert benchmark_results[0].name == "CustomTest" @@ -51,12 +52,13 @@ def example_function(): def test_benchmark_with_metadata(clear_benchmark_results): """Test benchmark decorator with custom metadata.""" + @benchmark(metadata={"category": "io_operations"}) def example_function(): return "result" - + example_function() - + benchmark_results = BenchmarkTracker.get_results() assert len(benchmark_results) == 1 assert benchmark_results[0].metadata.get("category") == "io_operations" @@ -66,11 +68,11 @@ def example_function(): def test_benchmark_block(clear_benchmark_results): """Test the benchmark_block context manager.""" start, end = benchmark_block("test_block", {"type": "code_block"}) - + start() time.sleep(0.01) end() - + benchmark_results = BenchmarkTracker.get_results() assert len(benchmark_results) == 1 assert benchmark_results[0].name == "test_block" @@ -82,19 +84,17 @@ def test_benchmark_tracker_get_total_execution_time(clear_benchmark_results): """Test getting total execution time from the tracker.""" BenchmarkTracker.add_result(BenchmarkResult("test1", 1.5)) BenchmarkTracker.add_result(BenchmarkResult("test2", 2.5)) - + assert BenchmarkTracker.get_total_execution_time() == 4.0 def test_benchmark_tracker_get_summary(clear_benchmark_results): """Test getting a summary from the tracker.""" - BenchmarkTracker.add_result(BenchmarkResult( - "test1", 1.5, {"category": "api_calls"})) - BenchmarkTracker.add_result(BenchmarkResult( - "test2", 2.5, {"category": "data_processing"})) - + BenchmarkTracker.add_result(BenchmarkResult("test1", 1.5, {"category": "api_calls"})) + BenchmarkTracker.add_result(BenchmarkResult("test2", 2.5, {"category": "data_processing"})) + summary = BenchmarkTracker.get_summary() - + assert "Performance Benchmark Summary:" in summary assert "test1: 1.5000s" in summary assert "test2: 2.5000s" in summary @@ -112,7 +112,7 @@ def test_benchmark_tracker_clear_results(clear_benchmark_results): """Test clearing benchmark results.""" BenchmarkTracker.add_result(BenchmarkResult("test1", 1.5)) assert len(BenchmarkTracker.get_results()) == 1 - + BenchmarkTracker.clear_results() assert len(BenchmarkTracker.get_results()) == 0 diff --git a/sources/manager_cache.py b/sources/manager_cache.py index d7db222..9ff363b 100644 --- a/sources/manager_cache.py +++ b/sources/manager_cache.py @@ -12,7 +12,7 @@ class CacheManager: significantly reducing API calls and processing time for users with many repos. """ - CACHE_DIR = '.cache' + CACHE_DIR = ".cache" CACHE_EXPIRY = 86400 # Cache expiry in seconds (24 hours) def __init__(self, user_id: str): @@ -42,7 +42,7 @@ def get_cached_data(self, repo_name: str) -> Optional[Dict[str, Any]]: return None try: - with open(self.cache_path, 'r') as f: + with open(self.cache_path, "r") as f: cache_data = json.load(f) if repo_name not in cache_data: @@ -50,10 +50,10 @@ def get_cached_data(self, repo_name: str) -> Optional[Dict[str, Any]]: repo_cache = cache_data[repo_name] # Check if cache is expired - if time.time() - repo_cache.get('timestamp', 0) > self.CACHE_EXPIRY: + if time.time() - repo_cache.get("timestamp", 0) > self.CACHE_EXPIRY: return None - return repo_cache.get('data') + return repo_cache.get("data") except (json.JSONDecodeError, IOError): # If cache file is corrupted or cannot be read, return None return None @@ -68,19 +68,16 @@ def update_cache(self, repo_name: str, data: Dict[str, Any]) -> None: cache_data = {} if self.cache_path.exists(): try: - with open(self.cache_path, 'r') as f: + with open(self.cache_path, "r") as f: cache_data = json.load(f) except (json.JSONDecodeError, IOError): # If cache file is corrupted, start with an empty cache cache_data = {} # Update cache with new data - cache_data[repo_name] = { - 'timestamp': time.time(), - 'data': data - } + cache_data[repo_name] = {"timestamp": time.time(), "data": data} - with open(self.cache_path, 'w') as f: + with open(self.cache_path, "w") as f: json.dump(cache_data, f) def clear_cache(self) -> None: @@ -101,12 +98,12 @@ def get_repo_last_modified(self, repo_name: str) -> Optional[float]: return None try: - with open(self.cache_path, 'r') as f: + with open(self.cache_path, "r") as f: cache_data = json.load(f) if repo_name not in cache_data: return None - return cache_data[repo_name].get('timestamp') + return cache_data[repo_name].get("timestamp") except (json.JSONDecodeError, IOError): return None diff --git a/sources/manager_cache_test.py b/sources/manager_cache_test.py index 791b64c..14a8863 100644 --- a/sources/manager_cache_test.py +++ b/sources/manager_cache_test.py @@ -1,23 +1,22 @@ -import json import os import time from pathlib import Path import pytest -from sources.manager_cache import CacheManager +from manager_cache import CacheManager @pytest.fixture def cache_manager(): - manager = CacheManager('test_user') + manager = CacheManager("test_user") # Ensure clean state for tests - if Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json').exists(): - os.remove(Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json')) + if Path(CacheManager.CACHE_DIR, "test_user_repo_cache.json").exists(): + os.remove(Path(CacheManager.CACHE_DIR, "test_user_repo_cache.json")) yield manager # Clean up after tests - if Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json').exists(): - os.remove(Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json')) + if Path(CacheManager.CACHE_DIR, "test_user_repo_cache.json").exists(): + os.remove(Path(CacheManager.CACHE_DIR, "test_user_repo_cache.json")) def test_ensure_cache_dir_creation(cache_manager): @@ -27,106 +26,106 @@ def test_ensure_cache_dir_creation(cache_manager): def test_get_cached_data_no_cache_file(cache_manager): """Test getting data when no cache file exists.""" - assert cache_manager.get_cached_data('repo1') is None + assert cache_manager.get_cached_data("repo1") is None def test_update_and_get_cache(cache_manager): """Test updating and retrieving cache.""" - test_data = {'name': 'repo1', 'language': 'Python'} - cache_manager.update_cache('repo1', test_data) - - assert Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json').exists() - assert cache_manager.get_cached_data('repo1') == test_data + test_data = {"name": "repo1", "language": "Python"} + cache_manager.update_cache("repo1", test_data) + + assert Path(CacheManager.CACHE_DIR, "test_user_repo_cache.json").exists() + assert cache_manager.get_cached_data("repo1") == test_data def test_update_existing_cache(cache_manager): """Test updating existing cache entry.""" # Set initial data - initial_data = {'name': 'repo1', 'language': 'Python'} - cache_manager.update_cache('repo1', initial_data) - + initial_data = {"name": "repo1", "language": "Python"} + cache_manager.update_cache("repo1", initial_data) + # Update with new data - updated_data = {'name': 'repo1', 'language': 'JavaScript'} - cache_manager.update_cache('repo1', updated_data) - + updated_data = {"name": "repo1", "language": "JavaScript"} + cache_manager.update_cache("repo1", updated_data) + # Verify update worked - assert cache_manager.get_cached_data('repo1') == updated_data + assert cache_manager.get_cached_data("repo1") == updated_data def test_multiple_repos_cache(cache_manager): """Test caching multiple repositories.""" - repo1_data = {'name': 'repo1', 'language': 'Python'} - repo2_data = {'name': 'repo2', 'language': 'JavaScript'} - - cache_manager.update_cache('repo1', repo1_data) - cache_manager.update_cache('repo2', repo2_data) - - assert cache_manager.get_cached_data('repo1') == repo1_data - assert cache_manager.get_cached_data('repo2') == repo2_data + repo1_data = {"name": "repo1", "language": "Python"} + repo2_data = {"name": "repo2", "language": "JavaScript"} + + cache_manager.update_cache("repo1", repo1_data) + cache_manager.update_cache("repo2", repo2_data) + + assert cache_manager.get_cached_data("repo1") == repo1_data + assert cache_manager.get_cached_data("repo2") == repo2_data def test_clear_cache(cache_manager): """Test clearing the cache.""" # Add some data - cache_manager.update_cache('repo1', {'data': 'test'}) - + cache_manager.update_cache("repo1", {"data": "test"}) + # Verify it exists - assert cache_manager.get_cached_data('repo1') is not None - + assert cache_manager.get_cached_data("repo1") is not None + # Clear and verify it's gone cache_manager.clear_cache() - assert cache_manager.get_cached_data('repo1') is None - assert not Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json').exists() + assert cache_manager.get_cached_data("repo1") is None + assert not Path(CacheManager.CACHE_DIR, "test_user_repo_cache.json").exists() def test_cache_expiry(cache_manager, monkeypatch): """Test that expired cache entries are not returned.""" # Add data - cache_manager.update_cache('repo1', {'data': 'test'}) - + cache_manager.update_cache("repo1", {"data": "test"}) + # Verify it exists - assert cache_manager.get_cached_data('repo1') is not None - + assert cache_manager.get_cached_data("repo1") is not None + # Mock time to simulate passage of time beyond expiry current_time = time.time() future_time = current_time + CacheManager.CACHE_EXPIRY + 100 - monkeypatch.setattr(time, 'time', lambda: future_time) - + monkeypatch.setattr(time, "time", lambda: future_time) + # Verify expired cache is not returned - assert cache_manager.get_cached_data('repo1') is None + assert cache_manager.get_cached_data("repo1") is None def test_corrupted_cache_file(cache_manager): """Test handling of corrupted cache files.""" # Create a corrupted JSON file os.makedirs(CacheManager.CACHE_DIR, exist_ok=True) - with open(Path(CacheManager.CACHE_DIR, 'test_user_repo_cache.json'), 'w') as f: + with open(Path(CacheManager.CACHE_DIR, "test_user_repo_cache.json"), "w") as f: f.write('{"not valid JSON"') - + # Should handle gracefully and return None - assert cache_manager.get_cached_data('repo1') is None - + assert cache_manager.get_cached_data("repo1") is None + # Should be able to update cache even after corruption - cache_manager.update_cache('repo1', {'data': 'new'}) - assert cache_manager.get_cached_data('repo1') == {'data': 'new'} + cache_manager.update_cache("repo1", {"data": "new"}) + assert cache_manager.get_cached_data("repo1") == {"data": "new"} def test_get_repo_last_modified(cache_manager, monkeypatch): """Test getting the last modified timestamp.""" # Mock time for consistent testing test_time = 1617000000.0 - monkeypatch.setattr(time, 'time', lambda: test_time) - + monkeypatch.setattr(time, "time", lambda: test_time) + # Add data - cache_manager.update_cache('repo1', {'data': 'test'}) - + cache_manager.update_cache("repo1", {"data": "test"}) + # Check timestamp - assert cache_manager.get_repo_last_modified('repo1') == test_time - + assert cache_manager.get_repo_last_modified("repo1") == test_time + # Non-existent repo - assert cache_manager.get_repo_last_modified('non_existent') is None + assert cache_manager.get_repo_last_modified("non_existent") is None def test_get_repo_last_modified_no_cache(cache_manager): """Test getting timestamp when no cache exists.""" - assert cache_manager.get_repo_last_modified('repo1') is None + assert cache_manager.get_repo_last_modified("repo1") is None diff --git a/sources/manager_github.py b/sources/manager_github.py index baea94b..1096aba 100644 --- a/sources/manager_github.py +++ b/sources/manager_github.py @@ -62,26 +62,20 @@ def prepare_github_env(): GitHubManager.USER = github.get_user() rmtree(clone_path, ignore_errors=True) - GitHubManager._REMOTE_NAME = ( - f"{GitHubManager.USER.login}/{GitHubManager.USER.login}" - ) + GitHubManager._REMOTE_NAME = f"{GitHubManager.USER.login}/{GitHubManager.USER.login}" # URL for repository with authentication token git_url = f"https://{EM.GH_TOKEN}@github.com/{GitHubManager._REMOTE_NAME}.git" GitHubManager._REPO_PATH = git_url GitHubManager.REMOTE = github.get_repo(GitHubManager._REMOTE_NAME) - GitHubManager.REPO = Repo.clone_from( - GitHubManager._REPO_PATH, to_path=clone_path - ) + GitHubManager.REPO = Repo.clone_from(GitHubManager._REPO_PATH, to_path=clone_path) # Initialize cache with user login GitHubManager.CACHE = CacheManager(GitHubManager.USER.login) if EM.COMMIT_SINGLE: GitHubManager.REPO.git.checkout(GitHubManager.branch(EM.PULL_BRANCH_NAME)) - GitHubManager.REPO.git.checkout( - "--orphan", GitHubManager._SINGLE_COMMIT_BRANCH - ) + GitHubManager.REPO.git.checkout("--orphan", GitHubManager._SINGLE_COMMIT_BRANCH) else: GitHubManager.REPO.git.checkout(GitHubManager.branch(EM.PUSH_BRANCH_NAME)) @@ -101,8 +95,7 @@ def _get_author() -> Actor: else: return Actor( EM.COMMIT_USERNAME or "readme-bot", - EM.COMMIT_EMAIL - or "41898282+github-actions[bot]@users.noreply.github.com", + EM.COMMIT_EMAIL or "41898282+github-actions[bot]@users.noreply.github.com", ) @staticmethod @@ -114,11 +107,7 @@ def branch(requested_branch: str) -> str: :param requested_branch: Requested branch name. :returns: Commit author. """ - return ( - GitHubManager.REMOTE.default_branch - if requested_branch == "" - else requested_branch - ) + return GitHubManager.REMOTE.default_branch if requested_branch == "" else requested_branch @staticmethod def _copy_file_and_add_to_repo(src_path: str): @@ -142,17 +131,13 @@ def update_readme(stats: str): environmental variables. """ DBM.i("Updating README...") - readme_path = join( - GitHubManager.REPO.working_tree_dir, GitHubManager.REMOTE.get_readme().path - ) + readme_path = join(GitHubManager.REPO.working_tree_dir, GitHubManager.REMOTE.get_readme().path) with open(readme_path, "r") as readme_file: readme_contents = readme_file.read() # Create formatted stats section with comments - readme_stats = ( - f"{GitHubManager._START_COMMENT}\n{stats}\n{GitHubManager._END_COMMENT}" - ) + readme_stats = f"{GitHubManager._START_COMMENT}\n{stats}\n{GitHubManager._END_COMMENT}" # Replace old stats section with new one new_readme = sub(GitHubManager._README_REGEX, readme_stats, readme_contents) @@ -183,19 +168,12 @@ def update_chart(name: str, path: str) -> str: # Create URL for the raw chart image branch = GitHubManager.branch(EM.PUSH_BRANCH_NAME) - chart_path = ( - f"https://raw.githubusercontent.com/" - f"{GitHubManager._REMOTE_NAME}/{branch}/{path}" - ) + chart_path = f"https://raw.githubusercontent.com/" f"{GitHubManager._REMOTE_NAME}/{branch}/{path}" output += f"![{name} chart]({chart_path})\n\n" else: DBM.i("\tInlining chart...") - hint = ( - "You can use [this website]" - "(https://codebeautify.org/base64-to-image-converter) " - "to view the generated base64 image." - ) + hint = "You can use [this website]" "(https://codebeautify.org/base64-to-image-converter) " "to view the generated base64 image." with open(path, "rb") as input_file: img_data = b64encode(input_file.read()).decode("utf-8") @@ -210,20 +188,13 @@ def commit_update(): """ actor = GitHubManager._get_author() DBM.i("Committing files to repo...") - GitHubManager.REPO.index.commit( - EM.COMMIT_MESSAGE, author=actor, committer=actor - ) + GitHubManager.REPO.index.commit(EM.COMMIT_MESSAGE, author=actor, committer=actor) if EM.COMMIT_SINGLE: DBM.i("Pushing files to repo as a single commit...") - refspec = ( - f"{GitHubManager._SINGLE_COMMIT_BRANCH}:" - f"{GitHubManager.branch(EM.PUSH_BRANCH_NAME)}" - ) + refspec = f"{GitHubManager._SINGLE_COMMIT_BRANCH}:" f"{GitHubManager.branch(EM.PUSH_BRANCH_NAME)}" - headers = GitHubManager.REPO.remotes.origin.push( - force=True, refspec=refspec - ) + headers = GitHubManager.REPO.remotes.origin.push(force=True, refspec=refspec) else: DBM.i("Pushing files to repo...") headers = GitHubManager.REPO.remotes.origin.push() @@ -245,10 +216,7 @@ def set_github_output(stats: str): DBM.p("Not in GitHub environment, not setting action output!") return else: - DBM.i( - "Outputting readme contents, check the latest comment " - "for the generated stats." - ) + DBM.i("Outputting readme contents, check the latest comment " "for the generated stats.") prefix = "README stats current output:" eol = "".join(choice(ascii_letters) for _ in range(10)) @@ -288,9 +256,7 @@ def get_repository_data(repo_name: str): DBM.i(f"Fetching fresh data for repository: {repo_name}") try: github = Github(EM.GH_TOKEN) - repo_data = github.get_repo( - f"{GitHubManager.USER.login}/{repo_name}" - ).raw_data + repo_data = github.get_repo(f"{GitHubManager.USER.login}/{repo_name}").raw_data # Update cache with new data GitHubManager.CACHE.update_cache(repo_name, repo_data) diff --git a/sources/yearly_commit_calculator.py b/sources/yearly_commit_calculator.py index cc39a7d..fecd75b 100644 --- a/sources/yearly_commit_calculator.py +++ b/sources/yearly_commit_calculator.py @@ -53,27 +53,17 @@ async def calculate_commit_data(repositories: Dict) -> Tuple[Dict, Dict]: date_data = dict() # Filter out ignored repositories - active_repos = [ - repo for repo in repositories if repo["name"] not in EM.IGNORED_REPOS - ] + active_repos = [repo for repo in repositories if repo["name"] not in EM.IGNORED_REPOS] # Use caching to only process repositories that have changed cached_repos, new_repos = separate_cached_and_new_repos(active_repos) - DBM.i( - f"Processing {len(cached_repos)} cached repositories and {len(new_repos)} new repositories" - ) + DBM.i(f"Processing {len(cached_repos)} cached repositories and {len(new_repos)} new repositories") # Process cached repositories for ind, repo in enumerate(cached_repos): - repo_name = ( - "[private]" - if repo["isPrivate"] - else f"{repo['owner']['login']}/{repo['name']}" - ) - DBM.i( - f"\t{ind + 1}/{len(cached_repos)} Using cached data for repo: {repo_name}" - ) + repo_name = "[private]" if repo["isPrivate"] else f"{repo['owner']['login']}/{repo['name']}" + DBM.i(f"\t{ind + 1}/{len(cached_repos)} Using cached data for repo: {repo_name}") # Get cached commit data and update yearly and date data commit_cache = get_cached_commit_data(repo) @@ -82,11 +72,7 @@ async def calculate_commit_data(repositories: Dict) -> Tuple[Dict, Dict]: # Process new repositories for ind, repo in enumerate(new_repos): - repo_name = ( - "[private]" - if repo["isPrivate"] - else f"{repo['owner']['login']}/{repo['name']}" - ) + repo_name = "[private]" if repo["isPrivate"] else f"{repo['owner']['login']}/{repo['name']}" DBM.i(f"\t{ind + 1}/{len(new_repos)} Retrieving new repo: {repo_name}") await update_data_with_commit_stats(repo, yearly_data, date_data) @@ -140,9 +126,7 @@ def get_cached_commit_data(repo: Dict) -> Optional[Dict]: return GHM.CACHE.get_cached_data(f"{repo['name']}_commits") -def update_yearly_data_from_cache( - commit_cache: Dict, yearly_data: Dict, date_data: Dict -) -> None: +def update_yearly_data_from_cache(commit_cache: Dict, yearly_data: Dict, date_data: Dict) -> None: """ Updates yearly data dictionaries from cached commit data. @@ -185,9 +169,7 @@ def update_yearly_data_from_cache( @benchmark(name="Update Commit Stats", metadata={"operation": "repo_processing"}) -async def update_data_with_commit_stats( - repo_details: Dict, yearly_data: Dict, date_data: Dict -): +async def update_data_with_commit_stats(repo_details: Dict, yearly_data: Dict, date_data: Dict): """ Updates yearly commit data with commits from given repository. Skips update if the commit isn't related to any repository. @@ -197,9 +179,7 @@ async def update_data_with_commit_stats( :param date_data: Commit date dictionary to update. """ owner = repo_details["owner"]["login"] - branch_data = await DM.get_remote_graphql( - "repo_branch_list", owner=owner, name=repo_details["name"] - ) + branch_data = await DM.get_remote_graphql("repo_branch_list", owner=owner, name=repo_details["name"]) if len(branch_data) == 0: DBM.w("\t\tBranch data not found, skipping repository...") return @@ -227,9 +207,7 @@ async def update_data_with_commit_stats( quarter = (datetime.fromisoformat(date).month - 1) // 3 + 1 # Update repo-specific date data - repo_date_data[repo_details["name"]][branch["name"]][commit["oid"]] = ( - commit["committedDate"] - ) + repo_date_data[repo_details["name"]][branch["name"]][commit["oid"]] = commit["committedDate"] # Update repository's yearly data if repo_details["primaryLanguage"] is not None: @@ -237,49 +215,29 @@ async def update_data_with_commit_stats( repo_yearly_data[curr_year] = {} if quarter not in repo_yearly_data[curr_year]: repo_yearly_data[curr_year][quarter] = {} - if ( - repo_details["primaryLanguage"]["name"] - not in repo_yearly_data[curr_year][quarter] - ): - repo_yearly_data[curr_year][quarter][ - repo_details["primaryLanguage"]["name"] - ] = {"add": 0, "del": 0} - - repo_yearly_data[curr_year][quarter][ - repo_details["primaryLanguage"]["name"] - ]["add"] += commit["additions"] - repo_yearly_data[curr_year][quarter][ - repo_details["primaryLanguage"]["name"] - ]["del"] += commit["deletions"] + if repo_details["primaryLanguage"]["name"] not in repo_yearly_data[curr_year][quarter]: + repo_yearly_data[curr_year][quarter][repo_details["primaryLanguage"]["name"]] = {"add": 0, "del": 0} + + repo_yearly_data[curr_year][quarter][repo_details["primaryLanguage"]["name"]]["add"] += commit["additions"] + repo_yearly_data[curr_year][quarter][repo_details["primaryLanguage"]["name"]]["del"] += commit["deletions"] # Also update the main yearly data if curr_year not in yearly_data: yearly_data[curr_year] = {} if quarter not in yearly_data[curr_year]: yearly_data[curr_year][quarter] = {} - if ( - repo_details["primaryLanguage"]["name"] - not in yearly_data[curr_year][quarter] - ): - yearly_data[curr_year][quarter][ - repo_details["primaryLanguage"]["name"] - ] = {"add": 0, "del": 0} - - yearly_data[curr_year][quarter][ - repo_details["primaryLanguage"]["name"] - ]["add"] += commit["additions"] - yearly_data[curr_year][quarter][ - repo_details["primaryLanguage"]["name"] - ]["del"] += commit["deletions"] + if repo_details["primaryLanguage"]["name"] not in yearly_data[curr_year][quarter]: + yearly_data[curr_year][quarter][repo_details["primaryLanguage"]["name"]] = {"add": 0, "del": 0} + + yearly_data[curr_year][quarter][repo_details["primaryLanguage"]["name"]]["add"] += commit["additions"] + yearly_data[curr_year][quarter][repo_details["primaryLanguage"]["name"]]["del"] += commit["deletions"] # Update main date data if repo_details["name"] not in date_data: date_data[repo_details["name"]] = {} if branch["name"] not in date_data[repo_details["name"]]: date_data[repo_details["name"]][branch["name"]] = {} - date_data[repo_details["name"]][branch["name"]][commit["oid"]] = commit[ - "committedDate" - ] + date_data[repo_details["name"]][branch["name"]][commit["oid"]] = commit["committedDate"] if not EM.DEBUG_RUN: await sleep(0.4) diff --git a/test.txt b/test.txt deleted file mode 100644 index 524acff..0000000 --- a/test.txt +++ /dev/null @@ -1 +0,0 @@ -Test file From 3b1f223549838b3f0552a7831681af8a45b58215 Mon Sep 17 00:00:00 2001 From: Imamuzzaki Abu Salam Date: Sun, 23 Mar 2025 22:08:24 +0700 Subject: [PATCH 09/12] chore: add force_with_lease option to GitHub Actions CI workflow and clean up benchmarking imports --- .github/workflows/ci.yml | 1 + sources/yearly_commit_calculator.py | 11 +---------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index eba44d5..5721fe2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -74,6 +74,7 @@ jobs: with: github_token: ${{ secrets.github_token }} branch: ${{ github.head_ref }} + force_with_lease: true - name: Create Assets Folder πŸ“₯ run: mkdir assets diff --git a/sources/yearly_commit_calculator.py b/sources/yearly_commit_calculator.py index fecd75b..fbd1392 100644 --- a/sources/yearly_commit_calculator.py +++ b/sources/yearly_commit_calculator.py @@ -12,7 +12,7 @@ # Try to import benchmarking utilities try: - from benchmarking import benchmark, benchmark_block + from benchmarking import benchmark except ImportError: # Define no-op benchmarking functions if not available def benchmark(name=None, metadata=None): @@ -21,15 +21,6 @@ def decorator(func): return decorator - def benchmark_block(name=None, metadata=None): - def start(): - pass - - def end(): - pass - - return start, end - @benchmark(name="Calculate Commit Data", metadata={"operation": "commit_processing"}) async def calculate_commit_data(repositories: Dict) -> Tuple[Dict, Dict]: From c03a95e0538f9a6aa5109de4b9fad9dfd5d79e13 Mon Sep 17 00:00:00 2001 From: Imamuzzaki Abu Salam Date: Sun, 23 Mar 2025 22:11:13 +0700 Subject: [PATCH 10/12] chore: update CI workflow to fetch and merge the current branch before committing coverage.svg, and change force_with_lease to force --- .github/workflows/ci.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5721fe2..d7e78bd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -65,6 +65,8 @@ jobs: run: | git config --local user.email "github-actions[bot]@users.noreply.github.com" git config --local user.name "github-actions[bot]" + git fetch origin ${{ github.head_ref }} + git merge origin/${{ github.head_ref }} git add coverage.svg git commit -m "Updated coverage.svg" @@ -74,7 +76,7 @@ jobs: with: github_token: ${{ secrets.github_token }} branch: ${{ github.head_ref }} - force_with_lease: true + force: true - name: Create Assets Folder πŸ“₯ run: mkdir assets From 8689e1bd5e53bae7701e4ec8530bcec2a4d52679 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 23 Mar 2025 15:25:49 +0000 Subject: [PATCH 11/12] Updated coverage.svg --- coverage.svg | 1 + 1 file changed, 1 insertion(+) create mode 100644 coverage.svg diff --git a/coverage.svg b/coverage.svg new file mode 100644 index 0000000..f6cf2c7 --- /dev/null +++ b/coverage.svg @@ -0,0 +1 @@ +Coverage: 92%Coverage92% \ No newline at end of file From c9b078e9927b9965689e2c613c80731b82346451 Mon Sep 17 00:00:00 2001 From: Imamuzzaki Abu Salam Date: Sun, 23 Mar 2025 23:18:07 +0700 Subject: [PATCH 12/12] test: add comprehensive unit tests for FileManager functionality - Introduced tests for localization management, file writing, and binary caching. - Included setup and teardown fixtures for a clean test environment. - Ensured coverage for both successful operations and error handling. --- sources/manager_file_test.py | 209 +++++++++++++++++++++++++++++++++++ 1 file changed, 209 insertions(+) create mode 100644 sources/manager_file_test.py diff --git a/sources/manager_file_test.py b/sources/manager_file_test.py new file mode 100644 index 0000000..c8f7832 --- /dev/null +++ b/sources/manager_file_test.py @@ -0,0 +1,209 @@ +import pytest +import os +import json +from unittest.mock import patch, mock_open + +from manager_file import FileManager, init_localization_manager +from manager_environment import EnvironmentManager as EM + + +@pytest.fixture +def sample_translation_data(): + return {"en": {"Monday": "Monday", "Languages": "Languages"}, "fr": {"Monday": "Lundi", "Languages": "Langages"}} + + +@pytest.fixture +def setup_and_teardown(): + """Fixture to setup and teardown test environment""" + # Create test assets directory if it doesn't exist + if not os.path.exists(FileManager.ASSETS_DIR): + os.makedirs(FileManager.ASSETS_DIR) + + # Reset localization dictionary before each test + FileManager._LOCALIZATION = {} + + yield + + # Cleanup any test files created in the assets directory + test_files = ["test_file.txt", "test_binary.pkl"] + for file in test_files: + file_path = os.path.join(FileManager.ASSETS_DIR, file) + if os.path.exists(file_path): + os.remove(file_path) + + # Reset localization dictionary after each test + FileManager._LOCALIZATION = {} + + +def test_init_localization_manager(setup_and_teardown, sample_translation_data): + """Test initialization of localization manager""" + with patch("builtins.open", mock_open(read_data=json.dumps(sample_translation_data))): + with patch("manager_file.load_json", return_value=sample_translation_data): + with patch.object(EM, "LOCALE", "en"): + init_localization_manager() + assert FileManager._LOCALIZATION == sample_translation_data["en"] + + +def test_load_localization(setup_and_teardown, sample_translation_data): + """Test loading localization from file""" + with patch("builtins.open", mock_open(read_data=json.dumps(sample_translation_data))): + with patch("manager_file.load_json", return_value=sample_translation_data): + with patch.object(EM, "LOCALE", "fr"): + FileManager.load_localization("translation.json") + assert FileManager._LOCALIZATION == sample_translation_data["fr"] + + +def test_translate_string(setup_and_teardown): + """Test translating strings""" + FileManager._LOCALIZATION = {"Monday": "Lundi", "Languages": "Langages"} + + assert FileManager.t("Monday") == "Lundi" + assert FileManager.t("Languages") == "Langages" + + +def test_translate_missing_key(setup_and_teardown): + """Test translating with missing key raises KeyError""" + FileManager._LOCALIZATION = {"Key1": "Value1"} + + with pytest.raises(KeyError): + FileManager.t("NonExistentKey") + + +def test_write_file(setup_and_teardown): + """Test writing content to a file""" + test_content = "Test content" + test_filename = "test_file.txt" + + with patch("builtins.open", mock_open()) as mock_file: + FileManager.write_file(test_filename, test_content) + mock_file.assert_called_once_with(test_filename, "w", encoding="utf-8") + mock_file().write.assert_called_once_with(test_content) + + +def test_write_file_append(setup_and_teardown): + """Test appending content to a file""" + test_content = "Test append content" + test_filename = "test_file.txt" + + with patch("builtins.open", mock_open()) as mock_file: + FileManager.write_file(test_filename, test_content, append=True) + mock_file.assert_called_once_with(test_filename, "a", encoding="utf-8") + mock_file().write.assert_called_once_with(test_content) + + +def test_write_file_to_assets(setup_and_teardown): + """Test writing content to a file in assets directory""" + test_content = "Test assets content" + test_filename = "test_file.txt" + expected_path = os.path.join(FileManager.ASSETS_DIR, test_filename) + + with patch("builtins.open", mock_open()) as mock_file: + FileManager.write_file(test_filename, test_content, assets=True) + mock_file.assert_called_once_with(expected_path, "w", encoding="utf-8") + mock_file().write.assert_called_once_with(test_content) + + +def test_cache_binary_write(setup_and_teardown): + """Test writing binary data to a cache file""" + test_content = {"key": "value"} + test_filename = "test_binary.pkl" + + with patch("builtins.open", mock_open()) as mock_file: + with patch("manager_file.dump_pickle") as mock_dump: + FileManager.cache_binary(test_filename, test_content) + mock_file.assert_called_once_with(test_filename, "wb") + mock_dump.assert_called_once_with(test_content, mock_file()) + + +def test_cache_binary_write_to_assets(setup_and_teardown): + """Test writing binary data to a cache file in assets directory""" + test_content = {"key": "value"} + test_filename = "test_binary.pkl" + expected_path = os.path.join(FileManager.ASSETS_DIR, test_filename) + + with patch("builtins.open", mock_open()) as mock_file: + with patch("manager_file.dump_pickle") as mock_dump: + FileManager.cache_binary(test_filename, test_content, assets=True) + mock_file.assert_called_once_with(expected_path, "wb") + mock_dump.assert_called_once_with(test_content, mock_file()) + + +def test_cache_binary_read(setup_and_teardown): + """Test reading binary data from a cache file""" + test_result = {"key": "value"} + test_filename = "test_binary.pkl" + + with patch("builtins.open", mock_open()) as mock_file: + with patch("manager_file.load_pickle", return_value=test_result) as mock_load: + with patch("manager_file.isfile", return_value=True): + result = FileManager.cache_binary(test_filename) + mock_file.assert_called_once_with(test_filename, "rb") + mock_load.assert_called_once_with(mock_file()) + assert result == test_result + + +def test_cache_binary_read_from_assets(setup_and_teardown): + """Test reading binary data from a cache file in assets directory""" + test_result = {"key": "value"} + test_filename = "test_binary.pkl" + expected_path = os.path.join(FileManager.ASSETS_DIR, test_filename) + + with patch("builtins.open", mock_open()) as mock_file: + with patch("manager_file.load_pickle", return_value=test_result) as mock_load: + with patch("manager_file.isfile", return_value=True): + result = FileManager.cache_binary(test_filename, assets=True) + mock_file.assert_called_once_with(expected_path, "rb") + mock_load.assert_called_once_with(mock_file()) + assert result == test_result + + +def test_cache_binary_read_nonexistent_file(setup_and_teardown): + """Test reading binary data from a nonexistent cache file""" + test_filename = "nonexistent_file.pkl" + + with patch("manager_file.isfile", return_value=False): + result = FileManager.cache_binary(test_filename) + assert result is None + + +def test_cache_binary_read_exception(setup_and_teardown): + """Test handling exceptions when reading binary cache file""" + test_filename = "test_binary.pkl" + + with patch("builtins.open", mock_open()): + with patch("manager_file.load_pickle", side_effect=Exception("Test exception")): + with patch("manager_file.isfile", return_value=True): + result = FileManager.cache_binary(test_filename) + assert result is None + + +def test_integration_write_and_read_file(setup_and_teardown): + """Integration test: write content to file and read it back""" + test_content = "Test integration content" + test_filename = "test_file.txt" + FileManager.write_file(test_filename, test_content) + + with open(test_filename, "r", encoding="utf-8") as file: + content = file.read() + + assert content == test_content + os.remove(test_filename) + + +def test_integration_cache_binary(setup_and_teardown): + """Integration test: write binary data to cache and read it back""" + test_content = {"test_key": "test_value"} + test_filename = "test_binary.pkl" + + # Write test data + FileManager.cache_binary(test_filename, test_content) + + # Read test data + result = FileManager.cache_binary(test_filename) + + assert result == test_content + os.remove(test_filename) + + +if __name__ == "__main__": + pytest.main()