diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 2fd8eb0..2d7f415 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -37,8 +37,8 @@ jobs: - clang - gcc preset: - - debug-strict - - release-strict + - debug-python-strict + - release-python-strict steps: - name: Checkout @@ -118,17 +118,34 @@ jobs: echo "CPPFLAGS=-I$GCC_HOME/include" >>$GITHUB_ENV echo "LDFLAGS=-L$GCC_HOME/lib/gcc/${{ env.GCC_VERSION }}" >>$GITHUB_ENV - - name: Configure with preset + - name: Install Python dependencies run: | - cmake --preset ${{ matrix.preset }} -DENABLE_CLANG_TIDY=${{ matrix.compiler == 'clang' }} + pip3 install -U pytest - - name: Build with preset + - name: Configure run: | - cmake --build --preset ${{ matrix.preset }} --parallel + cmake \ + --preset ${{ matrix.preset }} \ + -DENABLE_CLANG_TIDY=${{ matrix.compiler == 'clang' }} - - name: Test with preset + - name: Build run: | - ctest --preset ${{ matrix.preset }} --parallel + cmake \ + --build \ + --preset ${{ matrix.preset }} \ + --parallel + + - name: Run C++ tests + run: | + ctest \ + --preset ${{ matrix.preset }} \ + --parallel + + - name: Run Python tests + env: + PYTHONPATH: 'python/src:build/${{ matrix.preset }}-python-strict/binding' + run: | + pytest python/tests -v - name: Upload build logs if: failure() diff --git a/.gitignore b/.gitignore index 13d0eeb..e126d5e 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,14 @@ out/ *.dylib *.dll +# Python +__pycache__/ +*.pyc +*.pyo +*.pyd +*.pyw +*.pyz + # Debug and temporary files *.log *.tmp diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0808d76..0c76fc3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,11 +17,27 @@ repos: # CMake formatting with gersemi - repo: https://github.com/BlankSpruce/gersemi - rev: 0.21.0 + rev: 0.22.1 hooks: - id: gersemi files: (\.cmake|CMakeLists\.txt)$ + # Python formatting with black + - repo: https://github.com/psf/black + rev: 25.1.0 + hooks: + - id: black + files: \.py$ + args: ['-S'] + + # Python import sorting with isort + - repo: https://github.com/pycqa/isort + rev: 6.0.1 + hooks: + - id: isort + files: \.py$ + args: ['--profile', 'black', '--filter-files'] + # Markdown linting and formatting - repo: https://github.com/DavidAnson/markdownlint-cli2 rev: v0.18.1 diff --git a/.vscode/settings.json b/.vscode/settings.json index 82fdb7c..1584050 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -18,16 +18,23 @@ "cmake.automaticReconfigure": true, "cmake.configureOnEdit": true, "cmake.configureOnOpen": true, + "cursorpyright.analysis.extraPaths": [ + "python/src", + "build/debug-python/binding", + "build/release-python/binding", + "build/debug-python-no-tests/binding", + "build/release-python-no-tests/binding", + "build/debug-python-strict/binding", + "build/release-python-strict/binding" + ], "editor.codeActionsOnSave": { "source.fixAll.clangd": "explicit", "source.organizeImports": "explicit" }, - "editor.detectIndentation": false, "editor.semanticTokenColorCustomizations": { "enabled": true }, "editor.suggest.insertMode": "replace", - "editor.tabSize": 2, "files.associations": { "*.cpp": "cpp", "*.h": "cpp", @@ -36,6 +43,10 @@ "files.insertFinalNewline": true, "files.trimFinalNewlines": true, "git.ignoreLimitWarning": true, + "isort.args": [ + "--profile", + "black" + ], "markdownlint.config": { "extends": ".markdownlint.yaml" } diff --git a/CMakeLists.txt b/CMakeLists.txt index e0b65a2..bae5fc0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -22,6 +22,7 @@ list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake) # Options option(BUILD_TESTS "Build tests" ${IS_MAIN_PROJECT}) option(BUILD_EXAMPLES "Build examples" ${IS_MAIN_PROJECT}) +option(BUILD_PYTHON_BINDINGS "Build Python bindings" OFF) option(ENABLE_WARNINGS "Enable compiler warnings" ON) # macOS-specific fixes for Mach-O linker errors @@ -117,6 +118,11 @@ if(BUILD_EXAMPLES) add_subdirectory(examples) endif() +# Build Python bindings if enabled +if(BUILD_PYTHON_BINDINGS) + add_subdirectory(binding) +endif() + # Installation include(GNUInstallDirs) include(CMakePackageConfigHelpers) diff --git a/CMakePresets.json b/CMakePresets.json index 5d99003..c18d0b7 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -57,7 +57,7 @@ { "name": "debug-no-tests", "displayName": "Debug Configuration (No Tests & Examples)", - "description": "Debug build without tests and examples for faster configuration", + "description": "Debug build without tests and examples", "inherits": "debug", "cacheVariables": { "BUILD_TESTS": "OFF", @@ -67,7 +67,7 @@ { "name": "release-no-tests", "displayName": "Release Configuration (No Tests & Examples)", - "description": "Release build without tests and examples for faster configuration", + "description": "Release build without tests and examples", "inherits": "release", "cacheVariables": { "BUILD_TESTS": "OFF", @@ -76,7 +76,7 @@ }, { "name": "debug-strict", - "displayName": "Debug Configuration (Static Analysis + Strict Warnings)", + "displayName": "Debug Configuration (Static Analysis & Strict Warnings)", "description": "Debug build with static analysis and warnings treated as errors", "inherits": "debug", "cacheVariables": { @@ -87,7 +87,7 @@ }, { "name": "release-strict", - "displayName": "Release Configuration (Static Analysis + Strict Warnings)", + "displayName": "Release Configuration (Static Analysis & Strict Warnings)", "description": "Release build with static analysis and warnings treated as errors", "inherits": "release", "cacheVariables": { @@ -95,6 +95,60 @@ "ENABLE_CPPCHECK": "ON", "WARNINGS_AS_ERRORS": "ON" } + }, + { + "name": "debug-python", + "displayName": "Debug Configuration (Python Bindings)", + "description": "Debug build with Python bindings enabled", + "inherits": "debug", + "cacheVariables": { + "BUILD_PYTHON_BINDINGS": "ON" + } + }, + { + "name": "release-python", + "displayName": "Release Configuration (Python Bindings)", + "description": "Release build with Python bindings enabled", + "inherits": "release", + "cacheVariables": { + "BUILD_PYTHON_BINDINGS": "ON" + } + }, + { + "name": "debug-python-no-tests", + "displayName": "Debug Configuration (Python Bindings, No Tests & Examples)", + "description": "Debug build with Python bindings enabled, without tests and examples", + "inherits": "debug-no-tests", + "cacheVariables": { + "BUILD_PYTHON_BINDINGS": "ON" + } + }, + { + "name": "release-python-no-tests", + "displayName": "Release Configuration (Python Bindings, No Tests & Examples)", + "description": "Release build with Python bindings enabled, without tests and examples", + "inherits": "release-no-tests", + "cacheVariables": { + "BUILD_PYTHON_BINDINGS": "ON" + } + }, + { + "name": "debug-python-strict", + "displayName": "Debug Configuration (Python Bindings, Static Analysis & Strict Warnings)", + "description": "Debug build with Python bindings enabled, static analysis and warnings treated as errors", + "inherits": "debug-strict", + "cacheVariables": { + "BUILD_PYTHON_BINDINGS": "ON" + } + }, + { + "name": "release-python-strict", + "displayName": "Release Configuration (Python Bindings, Static Analysis & Strict Warnings)", + "description": "Release build with Python bindings enabled, static analysis and warnings treated as errors", + "inherits": "release-strict", + "cacheVariables": { + "BUILD_PYTHON_BINDINGS": "ON" + } } ], "buildPresets": [ @@ -128,17 +182,59 @@ }, { "name": "debug-strict", - "displayName": "Debug Build (Strict)", - "description": "Build debug configuration with strict warnings", + "displayName": "Debug Build (Static Analysis & Strict Warnings)", + "description": "Build debug configuration with static analysis and warnings treated as errors", "configurePreset": "debug-strict", "jobs": 0 }, { "name": "release-strict", - "displayName": "Release Build (Strict)", - "description": "Build release configuration with strict warnings", + "displayName": "Release Build (Static Analysis & Strict Warnings)", + "description": "Build release configuration with static analysis and warnings treated as errors", "configurePreset": "release-strict", "jobs": 0 + }, + { + "name": "debug-python", + "displayName": "Debug Build (Python Bindings)", + "description": "Build debug configuration with Python bindings", + "configurePreset": "debug-python", + "jobs": 0 + }, + { + "name": "release-python", + "displayName": "Release Build (Python Bindings)", + "description": "Build release configuration with Python bindings", + "configurePreset": "release-python", + "jobs": 0 + }, + { + "name": "debug-python-no-tests", + "displayName": "Debug Build (Python Bindings, No Tests & Examples)", + "description": "Build debug configuration with Python bindings, without tests and examples", + "configurePreset": "debug-python-no-tests", + "jobs": 0 + }, + { + "name": "release-python-no-tests", + "displayName": "Release Build (Python Bindings, No Tests & Examples)", + "description": "Build release configuration with Python bindings, without tests and examples", + "configurePreset": "release-python-no-tests", + "jobs": 0 + }, + { + "name": "debug-python-strict", + "displayName": "Debug Build (Python Bindings, Static Analysis & Strict Warnings)", + "description": "Build debug configuration with Python bindings, static analysis and warnings treated as errors", + "configurePreset": "debug-python-strict", + "jobs": 0 + }, + { + "name": "release-python-strict", + "displayName": "Release Build (Python Bindings, Static Analysis & Strict Warnings)", + "description": "Build release configuration with Python bindings, static analysis and warnings treated as errors", + "configurePreset": "release-python-strict", + "jobs": 0 } ], "testPresets": [ @@ -151,7 +247,7 @@ "outputOnFailure": true }, "execution": { - "noTestsAction": "error", + "noTestsAction": "ignore", "stopOnFailure": false } }, @@ -164,14 +260,14 @@ "outputOnFailure": true }, "execution": { - "noTestsAction": "error", + "noTestsAction": "ignore", "stopOnFailure": false } }, { "name": "debug-strict", - "displayName": "Debug Tests (Strict)", - "description": "Run tests in debug configuration with strict warnings", + "displayName": "Debug Tests (Static Analysis & Strict Warnings)", + "description": "Run tests in debug configuration with static analysis and warnings treated as errors", "configurePreset": "debug-strict", "output": { "outputOnFailure": true @@ -183,8 +279,8 @@ }, { "name": "release-strict", - "displayName": "Release Tests (Strict)", - "description": "Run tests in release configuration with strict warnings", + "displayName": "Release Tests (Static Analysis & Strict Warnings)", + "description": "Run tests in release configuration with static analysis and warnings treated as errors", "configurePreset": "release-strict", "output": { "outputOnFailure": true @@ -193,6 +289,58 @@ "noTestsAction": "error", "stopOnFailure": false } + }, + { + "name": "debug-python", + "displayName": "Debug Tests (Python Bindings)", + "description": "Run tests in debug configuration with Python bindings", + "configurePreset": "debug-python", + "output": { + "outputOnFailure": true + }, + "execution": { + "noTestsAction": "ignore", + "stopOnFailure": false + } + }, + { + "name": "release-python", + "displayName": "Release Tests (Python Bindings)", + "description": "Run tests in release configuration with Python bindings", + "configurePreset": "release-python", + "output": { + "outputOnFailure": true + }, + "execution": { + "noTestsAction": "ignore", + "stopOnFailure": false + } + }, + { + "name": "debug-python-strict", + "displayName": "Debug Tests (Python Bindings, Static Analysis & Strict Warnings)", + "description": "Run tests in debug configuration with Python bindings, static analysis and warnings treated as errors", + "configurePreset": "debug-python-strict", + "output": { + "outputOnFailure": true + }, + "execution": { + "noTestsAction": "error", + "stopOnFailure": false + } + }, + { + "name": "release-python-strict", + "displayName": "Release Tests (Python Bindings, Static Analysis & Strict Warnings)", + "description": "Run tests in release configuration with Python bindings, static analysis and warnings treated as errors", + "configurePreset": "release-python-strict", + "output": { + "outputOnFailure": true + }, + "execution": { + "noTestsAction": "error", + "stopOnFailure": false + } } ], "packagePresets": [ @@ -207,13 +355,49 @@ "displayName": "Release Package", "description": "Create package from release build", "configurePreset": "release" + }, + { + "name": "debug-strict", + "displayName": "Debug Package (Static Analysis & Strict Warnings)", + "description": "Create package from debug build with static analysis and warnings treated as errors", + "configurePreset": "debug-strict" + }, + { + "name": "release-strict", + "displayName": "Release Package (Static Analysis & Strict Warnings)", + "description": "Create package from release build with static analysis and warnings treated as errors", + "configurePreset": "release-strict" + }, + { + "name": "debug-python", + "displayName": "Debug Package (Python Bindings)", + "description": "Create package from debug build with Python bindings", + "configurePreset": "debug-python" + }, + { + "name": "release-python", + "displayName": "Release Package (Python Bindings)", + "description": "Create package from release build with Python bindings", + "configurePreset": "release-python" + }, + { + "name": "debug-python-strict", + "displayName": "Debug Package (Python Bindings, Static Analysis & Strict Warnings)", + "description": "Create package from debug build with Python bindings, static analysis and warnings treated as errors", + "configurePreset": "debug-python-strict" + }, + { + "name": "release-python-strict", + "displayName": "Release Package (Python Bindings, Static Analysis & Strict Warnings)", + "description": "Create package from release build with Python bindings, static analysis and warnings treated as errors", + "configurePreset": "release-python-strict" } ], "workflowPresets": [ { "name": "debug-workflow", "displayName": "Debug Workflow", - "description": "Complete debug workflow: configure, build, test", + "description": "Complete debug workflow", "steps": [ { "type": "configure", @@ -232,7 +416,7 @@ { "name": "release-workflow", "displayName": "Release Workflow", - "description": "Complete release workflow: configure, build, test", + "description": "Complete release workflow", "steps": [ { "type": "configure", @@ -247,6 +431,120 @@ "name": "release" } ] + }, + { + "name": "debug-strict-workflow", + "displayName": "Debug Workflow (Static Analysis & Strict Warnings)", + "description": "Complete debug workflow with static analysis and warnings treated as errors: configure, build, test", + "steps": [ + { + "type": "configure", + "name": "debug-strict" + }, + { + "type": "build", + "name": "debug-strict" + }, + { + "type": "test", + "name": "debug-strict" + } + ] + }, + { + "name": "release-strict-workflow", + "displayName": "Release Workflow (Static Analysis & Strict Warnings)", + "description": "Complete release workflow with static analysis and warnings treated as errors: configure, build, test", + "steps": [ + { + "type": "configure", + "name": "release-strict" + }, + { + "type": "build", + "name": "release-strict" + }, + { + "type": "test", + "name": "release-strict" + } + ] + }, + { + "name": "debug-python-workflow", + "displayName": "Debug Workflow (Python Bindings)", + "description": "Complete debug workflow with Python bindings", + "steps": [ + { + "type": "configure", + "name": "debug-python" + }, + { + "type": "build", + "name": "debug-python" + }, + { + "type": "test", + "name": "debug-python" + } + ] + }, + { + "name": "release-python-workflow", + "displayName": "Release Workflow (Python Bindings)", + "description": "Complete release workflow with Python bindings", + "steps": [ + { + "type": "configure", + "name": "release-python" + }, + { + "type": "build", + "name": "release-python" + }, + { + "type": "test", + "name": "release-python" + } + ] + }, + { + "name": "debug-python-strict-workflow", + "displayName": "Debug Workflow (Python Bindings, Static Analysis & Strict Warnings)", + "description": "Complete debug workflow with Python bindings, static analysis and warnings treated as errors", + "steps": [ + { + "type": "configure", + "name": "debug-python-strict" + }, + { + "type": "build", + "name": "debug-python-strict" + }, + { + "type": "test", + "name": "debug-python-strict" + } + ] + }, + { + "name": "release-python-strict-workflow", + "displayName": "Release Workflow (Python Bindings, Static Analysis & Strict Warnings)", + "description": "Complete release workflow with Python bindings, static analysis and warnings treated as errors", + "steps": [ + { + "type": "configure", + "name": "release-python-strict" + }, + { + "type": "build", + "name": "release-python-strict" + }, + { + "type": "test", + "name": "release-python-strict" + } + ] } ] } diff --git a/README.md b/README.md index a114275..b98bf86 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,7 @@ This project serves as both a learning resource and a reference implementation f - **Type Safety**: Concept-based constraints preventing common programming errors - **Performance Tools**: Built-in timing utilities and benchmark framework - **Error Handling**: Multiple error handling strategies (exceptions, `Result` type, `std::expected`) +- **Python Bindings**: Complete pybind11 integration with modern Python 3.13 features ### Code Quality & Development @@ -53,6 +54,9 @@ This project serves as both a learning resource and a reference implementation f - [Build the project](#build-the-project) - [Run individual examples](#run-individual-examples) - [Run tests](#run-tests) + - [Build Python bindings](#build-python-bindings) + - [Run Python examples](#run-python-examples) + - [Run Python tests](#run-python-tests) - [CMake Presets](#cmake-presets) - [Configure Presets](#configure-presets) - [Build \& Test Presets](#build--test-presets) @@ -92,6 +96,7 @@ This project demonstrates practical applications of: - **C++23 compatible compiler** ([Clang] 20+ / [GCC] 14+) - **[CMake] 3.23+** - **[Ninja] build system** (required for CMake - faster builds than Make) +- **Python 3.13+** (optional, for Python bindings) It's recommended to use a development container for the best development experience. See [`.devcontainer/README.md`](.devcontainer/README.md) for more details. @@ -134,20 +139,47 @@ cmake --build --preset release ctest --preset release ``` +#### Build Python bindings + +```bash +cmake --preset release-python +cmake --build --preset release-python +``` + +#### Run Python examples + +```bash +python3 python/examples/basic_usage.py +python3 python/examples/advanced_usage.py +``` + +#### Run Python tests + +```bash +# pip3 install pytest +pytest python/tests -v +``` + ### CMake Presets This project uses CMake presets for streamlined build configuration. #### Configure Presets -| Preset | Description | -| ------------------ | ----------------------------------------------------------------- | -| `debug` | Debug build with symbols and no optimization | -| `release` | Release build with full optimization | -| `debug-no-tests` | Debug build without tests and examples (faster config) | -| `release-no-tests` | Release build without tests and examples (faster config) | -| `debug-strict` | Debug build with static analysis and warnings treated as errors | -| `release-strict` | Release build with static analysis and warnings treated as errors | +| Preset | Description | +| ------------------------- | ---------------------------------------------------------------------------------- | +| `debug` | Debug build with symbols and no optimization | +| `release` | Release build with full optimization | +| `debug-no-tests` | Debug build without tests and examples (faster config) | +| `release-no-tests` | Release build without tests and examples (faster config) | +| `debug-strict` | Debug build with static analysis and warnings treated as errors | +| `release-strict` | Release build with static analysis and warnings treated as errors | +| `debug-python` | Debug build with Python bindings | +| `release-python` | Release build with Python bindings | +| `debug-python-no-tests` | Debug build with Python bindings, without tests and examples | +| `release-python-no-tests` | Release build with Python bindings, without tests and examples | +| `debug-python-strict` | Debug build with Python bindings, static analysis and warnings treated as errors | +| `release-python-strict` | Release build with Python bindings, static analysis and warnings treated as errors | #### Build & Test Presets @@ -155,10 +187,18 @@ Each configure preset has corresponding build and test presets with the same nam #### Workflow Presets -| Preset | Description | -| ------------------ | --------------------------------------------------- | -| `debug-workflow` | Complete debug workflow: configure + build + test | -| `release-workflow` | Complete release workflow: configure + build + test | +Each workflow preset is a combination of the configure, build, and test presets. + +| Preset | Description | +| -------------------------------- | ---------------------------------------------------- | +| `debug-workflow` | Complete workflow for `debug` preset | +| `release-workflow` | Complete workflow for `release` preset | +| `debug-strict-workflow` | Complete workflow for `debug-strict` preset | +| `release-strict-workflow` | Complete workflow for `release-strict` preset | +| `debug-python-workflow` | Complete workflow for `debug-python` preset | +| `release-python-workflow` | Complete workflow for `release-python` preset | +| `debug-python-strict-workflow` | Complete workflow for `debug-python-strict` preset | +| `release-python-strict-workflow` | Complete workflow for `release-python-strict` preset | #### Usage Examples @@ -189,7 +229,7 @@ cmake --workflow --preset release-workflow # Complete release cycle ### Build Options -See [`cmake/README.md`](cmake/README.md#options) for available build options. +See [`cmake/README.md`](cmake/README.md#options) for additional build options. ### Pre-commit Setup (Recommended) @@ -223,6 +263,7 @@ pre-commit run --all-files - Checks for added large files - **clang-format**: Formats C++ code according to the project style - **gersemi**: Formats CMake files with consistent indentation +- **black**: Formats Python code with consistent style - **markdownlint-cli2**: Lints Markdown files with consistent formatting The hooks will run automatically on `git commit` and prevent commits with formatting issues. @@ -245,6 +286,7 @@ auto main() -> int { Container numbers{42, 17, 89, 3, 56}; std::println("Original: {}", numbers); + // Sort the container in place SortContainer(numbers); std::println("Sorted: {}", numbers); @@ -260,47 +302,61 @@ auto main() -> int { ```text cpp-demo-project/ -├── .github/ # GitHub Actions configuration -│ └── workflows/ # GitHub Actions workflows -├── .vscode/ # VS Code configuration -│ ├── launch.json # VS Code launch configuration -│ ├── settings.json # VS Code settings -│ └── tasks.json # VS Code tasks -├── build/ # Build output (generated by CMake) -│ ├── debug/ # Debug build output -│ ├── release/ # Release build output +├── .github/ # GitHub Actions configuration +│ └── workflows/ # GitHub Actions workflows +├── .vscode/ # VS Code configuration +│ ├── launch.json # VS Code launch configuration +│ ├── settings.json # VS Code settings +│ └── tasks.json # VS Code tasks +├── build/ # Build output (generated by CMake) +│ ├── debug/ # Debug build output +│ ├── release/ # Release build output │ └── [other presets] -├── cmake/ # CMake modules and utilities -│ ├── CompilerWarnings.cmake # Compiler warning configuration -│ ├── Dependencies.cmake # External dependencies configuration -│ ├── ModuleHelpers.cmake # Module helper functions -│ ├── StaticAnalysis.cmake # Static analysis tools -│ ├── config.cmake.in # Package configuration -│ └── README.md # CMake modules documentation -├── include/ # Public header files -│ ├── algorithms/ # STL algorithm wrappers with concepts -│ ├── concepts/ # Custom concepts and type traits -│ ├── containers/ # Modern container wrapper with ranges support -│ ├── exceptions/ # Custom exception hierarchy and Result type -│ ├── memory/ # Resource management and RAII utilities -│ ├── random/ # Type-safe random number generation -│ ├── shapes/ # Polymorphic shapes with factory functions -│ └── timing/ # Performance measurement and benchmarking -├── src/ # Source implementation files -│ ├── CMakeLists.txt # Components configuration -│ └── [mirrors include structure] -├── examples/ # Usage examples and demonstrations -├── tests/ # Test suite using Catch2 v3 -├── .clang-format # clang-format configuration (for C++ code formatting) -├── .clang-tidy # clang-tidy configuration (for static analysis) -├── .clangd # clangd configuration (for code completion) -├── .gersemirc # gersemi configuration (for CMake code formatting) -├── .markdownlint.yaml # markdownlint configuration (for Markdown formatting) -├── .pre-commit-config.yaml # pre-commit hooks configuration -├── CMakeLists.txt # Main project configuration -├── CMakePresets.json # CMake presets configuration -├── LICENSE # MIT License -└── README.md # This file +├── cmake/ # CMake modules and utilities +│ ├── CompilerWarnings.cmake # Compiler warning configuration +│ ├── Dependencies.cmake # External dependencies configuration +│ ├── ModuleHelpers.cmake # Module helper functions +│ ├── StaticAnalysis.cmake # Static analysis tools +│ ├── config.cmake.in # Package configuration +│ └── README.md # CMake modules documentation +├── include/ # Public C++ header files +│ ├── algorithms/ # STL algorithm wrappers with concepts +│ ├── concepts/ # Custom concepts and type traits +│ ├── containers/ # Modern container wrapper with ranges support +│ ├── exceptions/ # Custom exception hierarchy and Result type +│ ├── memory/ # Resource management and RAII utilities +│ ├── random/ # Type-safe random number generation +│ ├── shapes/ # Polymorphic shapes with factory functions +│ └── timing/ # Performance measurement and benchmarking +├── src/ # C++ source implementation files +│ ├── CMakeLists.txt # Components configuration +│ └── [module]/ # C++ source implementation for the component +├── examples/ # C++ usage examples and demonstrations +│ └── [module]_example.cpp # C++ usage examples for the component +├── tests/ # C++ test suite using Catch2 v3 +│ └── test_[module].cpp # C++ unit tests for the component +├── binding/ # pybind11 C++ binding files +│ ├── CMakeLists.txt # Python bindings configuration +│ ├── cpp_features.cpp # Main pybind11 module +│ └── [module]_binding.cpp # Individual module bindings +├── python/ # Python wrapper modules +│ ├── src/ # Python source implementation files +│ │ └── [module].py # Python source implementation for the component +│ ├── examples/ # Python usage examples and demonstrations +│ │ └── [module]_example.py # Python usage examples for the component +│ └── tests/ # Python test suite using pytest +│ ├── conftest.py # pytest configuration and common fixtures +│ └── test_[module].py # Python unit tests for the component +├── .clang-format # clang-format configuration (for C++ code formatting) +├── .clang-tidy # clang-tidy configuration (for static analysis) +├── .clangd # clangd configuration (for code completion) +├── .gersemirc # gersemi configuration (for CMake code formatting) +├── .markdownlint.yaml # markdownlint configuration (for Markdown formatting) +├── .pre-commit-config.yaml # pre-commit hooks configuration +├── CMakeLists.txt # Main project configuration +├── CMakePresets.json # CMake presets configuration +├── LICENSE # MIT License +└── README.md # This file ``` ## 🔧 Components Overview @@ -320,12 +376,19 @@ cpp-demo-project/ ### Code Style -This project follows the **Google C++ Style Guide** with some modifications: - -- **Automatic formatting**: Uses `.clang-format` for C++ code and `gersemi` for CMake files -- **Static analysis**: Enabled with `.clang-tidy` for code quality checks -- **Modern C++ practices**: Follows Core Guidelines and C++23 best practices -- **Documentation**: Comprehensive Doxygen-style documentation +- **Consistent formatting** + - Uses `clang-format` for C++ code + - Uses `gersemi` for CMake files + - Uses `black` for Python code + - Uses `markdownlint-cli2` for Markdown files +- **Static analysis** + - Uses `clang-tidy` and `cppcheck` for static analysis +- **Modern practices** + - Follows Core Guidelines and modern C++23 best practices + - Follows PEP 8 and modern Python 3.13 conventions +- **Comprehensive documentation** + - Doxygen-style documentation for C++ code + - Numpy-style docstrings for Python code ### Pre-commit Configuration @@ -344,18 +407,34 @@ repos: # C++ formatting with clang-format - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v20.1.7 + rev: v20.1.8 hooks: - id: clang-format files: \.(cpp|hpp|h)$ # CMake formatting with gersemi - repo: https://github.com/BlankSpruce/gersemi - rev: 0.19.3 + rev: 0.22.1 hooks: - id: gersemi files: (\.cmake|CMakeLists\.txt)$ + # Python formatting with black + - repo: https://github.com/psf/black + rev: 25.1.0 + hooks: + - id: black + files: \.py$ + args: ['-S'] + + # Python import sorting with isort + - repo: https://github.com/pycqa/isort + rev: 6.0.1 + hooks: + - id: isort + files: \.py$ + args: ['--profile', 'black', '--filter-files'] + # Markdown linting and formatting - repo: https://github.com/DavidAnson/markdownlint-cli2 rev: v0.18.1 diff --git a/binding/.clang-tidy b/binding/.clang-tidy new file mode 100644 index 0000000..91ee0e3 --- /dev/null +++ b/binding/.clang-tidy @@ -0,0 +1,17 @@ +--- +InheritParentConfig: true +Checks: > + bugprone-*, + -bugprone-exception-escape, + cert-*, + clang-analyzer-*, + google-*, + -google-build-using-namespace, + misc-*, + -misc-const-correctness, + -misc-use-internal-linkage, + modernize-*, + performance-*, + readability-*, + -readability-identifier-length, + -readability-magic-numbers diff --git a/binding/CMakeLists.txt b/binding/CMakeLists.txt new file mode 100644 index 0000000..c1778e8 --- /dev/null +++ b/binding/CMakeLists.txt @@ -0,0 +1,51 @@ +# Python bindings + +# Ensure pybind11 is available +if(NOT TARGET pybind11::pybind11) + message( + FATAL_ERROR + "pybind11 not found. Enable BUILD_PYTHON_BINDINGS and ensure pybind11 is available." + ) +endif() + +# Create the main Python module +pybind11_add_module(cpp_features + cpp_features.cpp + algorithms_binding.cpp + containers_binding.cpp + exceptions_binding.cpp + random_binding.cpp + shapes_binding.cpp + timing_binding.cpp +) + +# Link against all the required libraries +target_link_libraries(cpp_features PRIVATE demo::lib) + +# Include directories +target_include_directories( + cpp_features + PRIVATE + ${CMAKE_SOURCE_DIR}/include + ${CMAKE_SOURCE_DIR}/src +) + +# Set properties for the Python module +set_target_properties( + cpp_features + PROPERTIES + CXX_STANDARD + 23 + CXX_STANDARD_REQUIRED + ON + CXX_VISIBILITY_PRESET + hidden + VISIBILITY_INLINES_HIDDEN + ON +) + +# Compile definitions +target_compile_definitions(cpp_features PRIVATE VERSION_INFO="${PROJECT_VERSION}") + +# Install the Python module +install(TARGETS cpp_features COMPONENT python LIBRARY DESTINATION .) diff --git a/binding/algorithms_binding.cpp b/binding/algorithms_binding.cpp new file mode 100644 index 0000000..c7dfa84 --- /dev/null +++ b/binding/algorithms_binding.cpp @@ -0,0 +1,86 @@ +/** + * @file algorithms_binding.cpp + * @brief Python bindings for the algorithms module + */ + +#include +#include +#include +#include +#include + +#include + +#include "algorithms/stl.hpp" +#include "containers/container.hpp" + +namespace py = pybind11; +using namespace cpp_features::algorithms; +using cpp_features::containers::Container; + +namespace { + +template +auto CountIfWrapper(const Range &range, + const std::function)> &predicate) { + return CountIf(range, predicate); +} + +template > +auto TransformToVectorWrapper( + const Range &range, const std::function)> &transform) { + return TransformToVector(range, transform); +} + +template +auto FindMinMaxWrapper(const Range &range) { + return FindMinMax(range); +} + +} // namespace + +void BindAlgorithms(py::module &m) { + // Bind sort functions + m.def("sort", &SortContainer>); + m.def("sort", &SortContainer>); + m.def("sort", &SortContainer>); + m.def("sort", &SortContainer>); + m.def("sort", &SortContainer>); + m.def("sort", &SortContainer>); + + // Bind count_if functions + m.def("count_if", &CountIfWrapper>); + m.def("count_if", &CountIfWrapper>); + m.def("count_if", &CountIfWrapper>); + m.def("count_if", &CountIfWrapper>); + m.def("count_if", &CountIfWrapper>); + m.def("count_if", &CountIfWrapper>); + + // Bind transform functions + m.def("transform_to_list", &TransformToVectorWrapper, int>); + m.def("transform_to_list", &TransformToVectorWrapper, double>); + m.def("transform_to_list", &TransformToVectorWrapper, std::string>); + m.def("transform_to_list", &TransformToVectorWrapper, int>); + m.def("transform_to_list", &TransformToVectorWrapper, double>); + m.def("transform_to_list", &TransformToVectorWrapper, std::string>); + m.def("transform_to_list", &TransformToVectorWrapper, int>); + m.def("transform_to_list", &TransformToVectorWrapper, double>); + m.def("transform_to_list", &TransformToVectorWrapper, std::string>); + m.def("transform_to_list", &TransformToVectorWrapper, int>); + m.def("transform_to_list", &TransformToVectorWrapper, double>); + m.def("transform_to_list", &TransformToVectorWrapper, std::string>); + m.def("transform_to_list", &TransformToVectorWrapper, int>); + m.def("transform_to_list", &TransformToVectorWrapper, double>); + m.def("transform_to_list", &TransformToVectorWrapper, std::string>); + m.def("transform_to_list", &TransformToVectorWrapper, int>); + m.def("transform_to_list", &TransformToVectorWrapper, double>); + m.def("transform_to_list", &TransformToVectorWrapper, std::string>); + + // Bind find_min_max functions + m.def("find_min_max", &FindMinMaxWrapper>); + m.def("find_min_max", &FindMinMaxWrapper>); + m.def("find_min_max", &FindMinMaxWrapper>); + m.def("find_min_max", &FindMinMaxWrapper>); + m.def("find_min_max", &FindMinMaxWrapper>); + m.def("find_min_max", &FindMinMaxWrapper>); +} diff --git a/binding/containers_binding.cpp b/binding/containers_binding.cpp new file mode 100644 index 0000000..4ef6690 --- /dev/null +++ b/binding/containers_binding.cpp @@ -0,0 +1,134 @@ +/** + * @file containers_binding.cpp + * @brief Python bindings for the containers module + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "concepts/concepts.hpp" +#include "concepts/utility_concepts.hpp" +#include "containers/container.hpp" + +namespace py = pybind11; +using namespace cpp_features::containers; +using cpp_features::concepts::CopyableType; +using cpp_features::concepts::TransformFunction; + +namespace { + +template +auto GetItem(const Container &self, typename Container::size_type index) -> T { + if (auto result = self.At(index); result) { + return result->get(); + } + throw py::index_error("Index out of bounds"); +} + +template +auto GetTransformWrapper(const Container &self, + const std::function &transform) { + return self.template GetTransformedView(transform); +} + +template +auto GetIter(const Container &self) { + return py::make_iterator(self.begin(), self.end()); +} + +template +auto GetRepr(const Container &self, std::string_view type_name) { + return std::format("<{}Container(size={}) at {}>", type_name, self.GetSize(), + static_cast(&self)); +} + +} // namespace + +void BindContainers(py::module &m) { + using IntContainer = Container; + using FloatContainer = Container; + using StringContainer = Container; + + // Bind ContainerError enum + py::enum_(m, "ContainerError") + .value("EMPTY", ContainerError::kEmpty) + .value("OUT_OF_BOUNDS", ContainerError::kOutOfBounds) + .value("INVALID_OPERATION", ContainerError::kInvalidOperation); + + // Bind Container + py::class_(m, "IntContainer") + .def(py::init<>()) + .def(py::init()) + .def(py::init>()) + .def(py::init>()) + .def("add", py::overload_cast(&IntContainer::Add)) + .def("remove", &IntContainer::Remove) + .def("size", &IntContainer::GetSize) + .def("empty", &IntContainer::IsEmpty) + .def("at", &GetItem) + .def("view", &IntContainer::GetView) + .def("filter", &IntContainer::GetFilteredView>) + .def("transform", &GetTransformWrapper) + .def("transform", &GetTransformWrapper) + .def("transform", &GetTransformWrapper) + .def("__len__", &IntContainer::GetSize) + .def("__bool__", [](const IntContainer &self) { return !self.IsEmpty(); }) + .def("__getitem__", &GetItem) + .def("__iter__", &GetIter, py::keep_alive<0, 1>()) + .def("__str__", [](const IntContainer &self) { return std::format("{}", self); }) + .def("__repr__", [](const IntContainer &self) { return GetRepr(self, "Int"); }); + + // Bind Container + py::class_(m, "FloatContainer") + .def(py::init<>()) + .def(py::init()) + .def(py::init>()) + .def(py::init>()) + .def("add", py::overload_cast(&FloatContainer::Add)) + .def("remove", &FloatContainer::Remove) + .def("size", &FloatContainer::GetSize) + .def("empty", &FloatContainer::IsEmpty) + .def("at", &GetItem) + .def("view", &FloatContainer::GetView) + .def("filter", &FloatContainer::GetFilteredView>) + .def("transform", &GetTransformWrapper) + .def("transform", &GetTransformWrapper) + .def("transform", &GetTransformWrapper) + .def("__len__", &FloatContainer::GetSize) + .def("__bool__", [](const FloatContainer &self) { return !self.IsEmpty(); }) + .def("__getitem__", &GetItem) + .def("__iter__", &GetIter, py::keep_alive<0, 1>()) + .def("__str__", [](const FloatContainer &self) { return std::format("{}", self); }) + .def("__repr__", [](const FloatContainer &self) { return GetRepr(self, "Float"); }); + + // Bind Container + py::class_(m, "StringContainer") + .def(py::init<>()) + .def(py::init()) + .def(py::init>()) + .def(py::init>()) + .def("add", py::overload_cast(&StringContainer::Add)) + .def("remove", &StringContainer::Remove) + .def("size", &StringContainer::GetSize) + .def("empty", &StringContainer::IsEmpty) + .def("at", &GetItem) + .def("view", &StringContainer::GetView) + .def("filter", &StringContainer::GetFilteredView>) + .def("transform", &GetTransformWrapper) + .def("transform", &GetTransformWrapper) + .def("transform", &GetTransformWrapper) + .def("__len__", &StringContainer::GetSize) + .def("__bool__", [](const StringContainer &self) { return !self.IsEmpty(); }) + .def("__getitem__", &GetItem) + .def("__iter__", &GetIter, py::keep_alive<0, 1>()) + .def("__str__", [](const StringContainer &self) { return std::format("{}", self); }) + .def("__repr__", [](const StringContainer &self) { return GetRepr(self, "String"); }); +} diff --git a/binding/cpp_features.cpp b/binding/cpp_features.cpp new file mode 100644 index 0000000..c92f728 --- /dev/null +++ b/binding/cpp_features.cpp @@ -0,0 +1,43 @@ +/** + * @file cpp_features.cpp + * @brief Main pybind11 module + * + * This file contains the main pybind11 module definition that brings together all the individual + * component bindings into a single Python module. + */ + +#include +#include + +namespace py = pybind11; + +// Forward declarations for binding functions +void BindAlgorithms(py::module &m); +void BindContainers(py::module &m); +void BindExceptions(py::module &m); +void BindRandom(py::module &m); +void BindShapes(py::module &m); +void BindTiming(py::module &m); + +PYBIND11_MODULE(cpp_features, m) { + m.doc() = "Python wrappers for the C++ demo project"; + m.attr("__version__") = PYBIND11_STRINGIFY(VERSION_INFO); + + auto algorithms = m.def_submodule("algorithms"); + BindAlgorithms(algorithms); + + auto containers = m.def_submodule("containers"); + BindContainers(containers); + + auto exceptions = m.def_submodule("exceptions"); + BindExceptions(exceptions); + + auto random = m.def_submodule("random"); + BindRandom(random); + + auto shapes = m.def_submodule("shapes"); + BindShapes(shapes); + + auto timing = m.def_submodule("timing"); + BindTiming(timing); +} diff --git a/binding/exceptions_binding.cpp b/binding/exceptions_binding.cpp new file mode 100644 index 0000000..abec2a3 --- /dev/null +++ b/binding/exceptions_binding.cpp @@ -0,0 +1,30 @@ +/** + * @file exceptions_binding.cpp + * @brief Python bindings for the exceptions module + */ + +#include + +#include "exceptions/custom_exception.hpp" + +namespace py = pybind11; +using namespace cpp_features::exceptions; + +void BindExceptions(py::module &m) { + // Bind ErrorSeverity enum + py::enum_(m, "ErrorSeverity") + .value("TRACE", ErrorSeverity::kTrace) + .value("DEBUG", ErrorSeverity::kDebug) + .value("INFO", ErrorSeverity::kInfo) + .value("WARNING", ErrorSeverity::kWarning) + .value("ERROR", ErrorSeverity::kError) + .value("FATAL", ErrorSeverity::kFatal); + + m.def("severity_to_string", &SeverityToString); + + // Register exceptions so C++ throws translate to Python exceptions + auto py_base_exception = py::register_exception(m, "BaseException"); + py::register_exception(m, "ValidationException", py_base_exception.ptr()); + py::register_exception(m, "ResourceException", py_base_exception.ptr()); + py::register_exception(m, "CalculationException", py_base_exception.ptr()); +} diff --git a/binding/random_binding.cpp b/binding/random_binding.cpp new file mode 100644 index 0000000..4c72c05 --- /dev/null +++ b/binding/random_binding.cpp @@ -0,0 +1,65 @@ +/** + * @file random_binding.cpp + * @brief Python bindings for the random module + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "containers/container.hpp" +#include "random/random_gen.hpp" + +namespace py = pybind11; +using namespace cpp_features::random; +using cpp_features::containers::Container; + +namespace { + +template +auto SampleFromRangeWrapper(const Range &range, std::size_t count) { + return SampleFromRange(range, count); +} + +} // namespace + +void BindRandom(py::module &m) { + // Bind RandomGenerator class + py::class_(m, "RandomGenerator") + .def(py::init<>()) + .def(py::init()) + .def("generate_int", &RandomGenerator::GenerateInt) + .def("generate_real", &RandomGenerator::GenerateReal) + .def("generate_int_list", &RandomGenerator::GenerateIntVector) + .def("generate_real_list", &RandomGenerator::GenerateRealVector) + .def("generate_bool", &RandomGenerator::GenerateBool) + .def("generate_normal", &RandomGenerator::GenerateNormal) + .def("generate_normal_float", &RandomGenerator::GenerateNormal) + .def("seed", &RandomGenerator::Seed) + .def("seed_with_time", &RandomGenerator::SeedWithTime) + .def("__repr__", [](const RandomGenerator &self) { + return std::format("", static_cast(&self)); + }); + + // Bind utility functions + m.def("shuffle_container", &ShuffleContainer>); + m.def("shuffle_container", &ShuffleContainer>); + m.def("shuffle_container", &ShuffleContainer>); + m.def("shuffle_container", &ShuffleContainer>); + m.def("shuffle_container", &ShuffleContainer>); + m.def("shuffle_container", &ShuffleContainer>); + m.def("shuffle_container", &ShuffleContainer); + + m.def("sample_from_range", &SampleFromRangeWrapper>); + m.def("sample_from_range", &SampleFromRangeWrapper>); + m.def("sample_from_range", &SampleFromRangeWrapper>); + m.def("sample_from_range", &SampleFromRangeWrapper>); + m.def("sample_from_range", &SampleFromRangeWrapper>); + m.def("sample_from_range", &SampleFromRangeWrapper>); + m.def("sample_from_range", &SampleFromRangeWrapper); +} diff --git a/binding/shapes_binding.cpp b/binding/shapes_binding.cpp new file mode 100644 index 0000000..b11adc0 --- /dev/null +++ b/binding/shapes_binding.cpp @@ -0,0 +1,83 @@ +/** + * @file shapes_binding.cpp + * @brief Python bindings for the shapes module + */ + +#include +#include +#include + +#include +#include + +#include "shapes/circle.hpp" +#include "shapes/rectangle.hpp" +#include "shapes/shape.hpp" + +namespace py = pybind11; +using namespace cpp_features::shapes; + +void BindShapes(py::module &m) { + // Bind the abstract Shape base class + py::class_>(m, "Shape") + .def("get_area", &Shape::GetArea) + .def("get_perimeter", &Shape::GetPerimeter) + .def("draw", &Shape::Draw) + .def("get_name", [](const Shape &self) { return std::string{self.GetName()}; }) + .def("__str__", [](const Shape &self) { return std::string{self.GetName()}; }) + .def("__repr__", [](const Shape &self) { + return std::format("<{} at {}>", self.GetName(), static_cast(&self)); + }); + + // Bind Circle class + py::class_>(m, "Circle") + .def(py::init()) + .def("get_radius", &Circle::GetRadius) + .def("__eq__", [](const Circle &self, const Circle &other) { return self == other; }) + .def("__lt__", [](const Circle &self, const Circle &other) { return (self <=> other) < 0; }) + .def("__le__", [](const Circle &self, const Circle &other) { return (self <=> other) <= 0; }) + .def("__gt__", [](const Circle &self, const Circle &other) { return (self <=> other) > 0; }) + .def("__ge__", [](const Circle &self, const Circle &other) { return (self <=> other) >= 0; }) + .def("__str__", [](const Circle &self) { return std::format("{}", self); }) + .def("__repr__", [](const Circle &self) { + return std::format("", self.GetRadius(), + static_cast(&self)); + }); + + // Bind Rectangle::Dimensions struct + py::class_(m, "RectangleDimensions") + .def(py::init()) + .def_readwrite("width", &Rectangle::Dimensions::width) + .def_readwrite("height", &Rectangle::Dimensions::height) + .def("__str__", + [](const Rectangle::Dimensions &self) { + return std::format("(w = {:.2f}, h = {:.2f})", self.width, self.height); + }) + .def("__repr__", [](const Rectangle::Dimensions &self) { + return std::format("", self.width, + self.height, static_cast(&self)); + }); + + // Bind Rectangle class + py::class_>(m, "Rectangle") + .def(py::init()) + .def(py::init()) + .def(py::init()) + .def("get_width", &Rectangle::GetWidth) + .def("get_height", &Rectangle::GetHeight) + .def("is_square", &Rectangle::IsSquare) + .def("__eq__", [](const Rectangle &self, const Rectangle &other) { return self == other; }) + .def("__lt__", + [](const Rectangle &self, const Rectangle &other) { return (self <=> other) < 0; }) + .def("__le__", + [](const Rectangle &self, const Rectangle &other) { return (self <=> other) <= 0; }) + .def("__gt__", + [](const Rectangle &self, const Rectangle &other) { return (self <=> other) > 0; }) + .def("__ge__", + [](const Rectangle &self, const Rectangle &other) { return (self <=> other) >= 0; }) + .def("__str__", [](const Rectangle &r) { return std::format("{}", r); }) + .def("__repr__", [](const Rectangle &r) { + return std::format("", r.GetWidth(), + r.GetHeight(), static_cast(&r)); + }); +} diff --git a/binding/timing_binding.cpp b/binding/timing_binding.cpp new file mode 100644 index 0000000..e2c504d --- /dev/null +++ b/binding/timing_binding.cpp @@ -0,0 +1,52 @@ +/** + * @file timing_binding.cpp + * @brief Python bindings for the timing module + */ + +#include +#include +#include +#include + +#include + +#include "timing/timer.hpp" + +namespace py = pybind11; +using namespace cpp_features::timing; + +namespace { + +template +auto GetElapsedWrapper(const Timer &timer) { + return timer.GetElapsed(); +} + +auto TimeFunctionWrapper(const std::function &func) -> std::int64_t { + return TimeFunction(func); +} + +} // namespace + +void BindTiming(py::module &m) { + // Bind utility functions + m.def("to_human_readable", &ToHumanReadable); + m.def("time_function", &TimeFunctionWrapper); + + // Bind Timer class + py::class_(m, "Timer") + .def(py::init<>()) + .def("start", &Timer::Start) + .def("stop", &Timer::Stop) + .def("reset", &Timer::Reset) + .def("get_elapsed_ns", &GetElapsedWrapper) + .def("get_elapsed_us", &GetElapsedWrapper) + .def("get_elapsed_ms", &GetElapsedWrapper) + .def("get_elapsed_s", &GetElapsedWrapper) + .def("get_elapsed_string", &Timer::GetElapsedString) + .def("__str__", &Timer::GetElapsedString) + .def("__repr__", [](const Timer &self) { + return std::format("", self.GetElapsedString(), + static_cast(&self)); + }); +} diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index 8ab60bd..5bb809f 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -10,10 +10,12 @@ when packages are not found. Functions provided: setup_catch2_dependency - Find or fetch Catch2 testing framework + setup_pybind11_dependency - Find or fetch pybind11 Python binding library setup_project_dependencies - Setup all project dependencies Dependencies managed: - Catch2 v3.8.1 - Modern C++ testing framework (only if BUILD_TESTS is ON) + - pybind11 - Python binding library (only if BUILD_PYTHON_BINDINGS is ON) Example: include(Dependencies) @@ -101,6 +103,90 @@ function(setup_catch2_dependency) endif() endfunction() +#[=======================================================================[.rst: +setup_pybind11_dependency +-------------------------- + +Find or fetch pybind11 Python binding library. + + setup_pybind11_dependency() + +This function: + - Checks if BUILD_PYTHON_BINDINGS option is enabled + - Attempts to find pybind11 using find_package() + - Falls back to FetchContent if not found + - Sets up pybind11 for Python extension module creation + +Dependencies: + - Git (for fetching from GitHub) + - Python 3.13+ development headers + - pybind11 v3.0.0 from https://github.com/pybind/pybind11.git + +Variables used: + BUILD_PYTHON_BINDINGS - Must be ON to enable pybind11 setup + +Example: + set(BUILD_PYTHON_BINDINGS ON) + setup_pybind11_dependency() +#]=======================================================================] +function(setup_pybind11_dependency) + if(NOT BUILD_PYTHON_BINDINGS) + return() + endif() + + find_package( + Python3 + COMPONENTS + Interpreter + Development + QUIET + ) + + if(NOT Python3_FOUND) + message(WARNING "Python3 not found. Python bindings will not be built.") + return() + endif() + + find_package(pybind11 QUIET) + + if(NOT pybind11_FOUND) + message(STATUS "pybind11 not found, fetching from GitHub...") + + FetchContent_Declare( + pybind11 + GIT_REPOSITORY https://github.com/pybind/pybind11.git + GIT_TAG v3.0.0 + GIT_SHALLOW TRUE + ) + + # Temporarily disable static analysis for dependencies + if(DEFINED CMAKE_CXX_CLANG_TIDY) + set(CMAKE_CXX_CLANG_TIDY_BACKUP ${CMAKE_CXX_CLANG_TIDY}) + unset(CMAKE_CXX_CLANG_TIDY) + endif() + if(DEFINED CMAKE_CXX_CPPCHECK) + set(CMAKE_CXX_CPPCHECK_BACKUP ${CMAKE_CXX_CPPCHECK}) + unset(CMAKE_CXX_CPPCHECK) + endif() + + FetchContent_MakeAvailable(pybind11) + + # Restore static analysis settings + if(DEFINED CMAKE_CXX_CLANG_TIDY_BACKUP) + set(CMAKE_CXX_CLANG_TIDY ${CMAKE_CXX_CLANG_TIDY_BACKUP} PARENT_SCOPE) + unset(CMAKE_CXX_CLANG_TIDY_BACKUP) + endif() + if(DEFINED CMAKE_CXX_CPPCHECK_BACKUP) + set(CMAKE_CXX_CPPCHECK ${CMAKE_CXX_CPPCHECK_BACKUP} PARENT_SCOPE) + unset(CMAKE_CXX_CPPCHECK_BACKUP) + endif() + + message(STATUS "pybind11 fetched successfully") + else() + message(STATUS "Found pybind11: ${pybind11_VERSION}") + endif() +endfunction() + #[=======================================================================[.rst: setup_project_dependencies --------------------------- @@ -114,6 +200,7 @@ to configure the complete dependency environment for the project. Currently sets up: - Catch2 testing framework (if BUILD_TESTS is enabled) + - pybind11 Python binding library (if BUILD_PYTHON_BINDINGS is enabled) This function can be extended to handle additional dependencies as the project grows. @@ -124,4 +211,5 @@ Example: #]=======================================================================] function(setup_project_dependencies) setup_catch2_dependency() + setup_pybind11_dependency() endfunction() diff --git a/cmake/README.md b/cmake/README.md index ff5ca6e..1e2bb8c 100644 --- a/cmake/README.md +++ b/cmake/README.md @@ -6,12 +6,15 @@ This directory contains custom CMake modules and utilities for the project. The project provides several configuration options: -- `BUILD_TESTS`: Build test suite (default: ON for main project) -- `BUILD_EXAMPLES`: Build example programs (default: ON for main project) -- `ENABLE_WARNINGS`: Enable compiler warnings (default: ON) -- `WARNINGS_AS_ERRORS`: Treat warnings as errors (default: OFF) -- `ENABLE_CLANG_TIDY`: Enable clang-tidy static analysis (default: OFF) -- `ENABLE_CPPCHECK`: Enable cppcheck static analysis (default: OFF) +| Option | Description | Default | +| ----------------------- | ----------------------------------- | ------------------- | +| `BUILD_TESTS` | Build test suite | `ON` (main project) | +| `BUILD_EXAMPLES` | Build example applications | `ON` (main project) | +| `BUILD_PYTHON_BINDINGS` | Build Python bindings with pybind11 | `OFF` | +| `ENABLE_WARNINGS` | Enable compiler warnings | `ON` | +| `WARNINGS_AS_ERRORS` | Treat warnings as errors | `OFF` | +| `ENABLE_CLANG_TIDY` | Enable clang-tidy static analysis | `OFF` | +| `ENABLE_CPPCHECK` | Enable cppcheck static analysis | `OFF` | ## Files @@ -48,7 +51,8 @@ setup_project_dependencies() Currently handles: -- **Catch2 v3.8.1**: Modern C++ testing framework (only when `BUILD_TESTS` is enabled) +- **Catch2**: Modern C++ testing framework (only when `BUILD_TESTS` is enabled) +- **pybind11**: Python bindings (only when `BUILD_PYTHON_BINDINGS` is enabled) Features: diff --git a/include/algorithms/stl.hpp b/include/algorithms/stl.hpp index ca9ea95..cac769a 100644 --- a/include/algorithms/stl.hpp +++ b/include/algorithms/stl.hpp @@ -53,8 +53,7 @@ void SortContainer(Container &container) { * @return The number of elements that satisfy the predicate * * Counts the number of elements in the range for which the predicate returns true. - * The predicate must be callable with elements of the range and return a value - * convertible to bool. + * The predicate must be callable with elements of the range and return a value convertible to bool. * * @code * std::vector numbers{1, 2, 3, 4, 5, 6}; @@ -77,9 +76,8 @@ auto CountIf(Range &&range, Predicate predicate) -> std::size_t { * @param transform The transformation function to apply to each element * @return A vector containing the transformed elements * - * Applies the transformation function to each element in the input range and - * collects the results in a new vector. The transformation function must be - * callable with elements of the range. + * Applies the transformation function to each element in the input range and collects the results + * in a new vector. The transformation function must be callable with elements of the range. * * @code * std::vector numbers{1, 2, 3, 4, 5}; diff --git a/include/concepts/callable_concepts.hpp b/include/concepts/callable_concepts.hpp index 268e607..30983c1 100644 --- a/include/concepts/callable_concepts.hpp +++ b/include/concepts/callable_concepts.hpp @@ -93,6 +93,30 @@ template concept TimerCallback = std::invocable && std::same_as, void>; +/** + * @brief Concept for transformation functions that can be used with container transformations + * + * @tparam Func The type of the transformation function + * @tparam Input The type of the input elements + * @tparam Output The type of the output elements + * + * This concept ensures that a type can be used as a transformation function for container + * operations. The function must be invocable with an Input type and return an Output type. + * + * @code + * template Func> + * auto transform_elements(const std::vector &vec, Func func) { + * return vec | std::views::transform(func); + * } + * + * auto square = [](int n) { return n * n; }; + * auto squared_numbers = transform_elements(numbers, square); + * @endcode + */ +template +concept TransformFunction = + std::invocable && std::convertible_to, Output>; + /** * @brief Concept for predicate functions that can be used with container filtering * @@ -101,13 +125,11 @@ concept TimerCallback = std::invocable && * * This concept ensures that a type can be used as a predicate for filtering operations. * The predicate must be invocable with a const reference to T and return a type that - * is convertible to bool. This allows for flexible usage with lambdas, function pointers, - * functors, and other callable objects. + * is convertible to bool. * * @code * template Predicate> * auto filter_elements(const std::vector &vec, Predicate predicate) { - * // Use predicate to filter elements * return vec | std::views::filter(predicate); * } * @@ -116,7 +138,6 @@ concept TimerCallback = std::invocable && * @endcode */ template -concept PredicateFor = std::invocable && - std::convertible_to, bool>; +concept PredicateFor = TransformFunction; } // namespace cpp_features::concepts diff --git a/include/containers/container.hpp b/include/containers/container.hpp index 71f9f07..47eedae 100644 --- a/include/containers/container.hpp +++ b/include/containers/container.hpp @@ -276,8 +276,9 @@ class Container { /** * @brief Get a transformed view of elements * - * @tparam Func Type of the transformation function - * @param transform_func Function to apply to each element + * @tparam U The type of the output elements + * @tparam Func The type of the transformation function. Must satisfy TransformFunction + * @param transform_func The function to apply to each element * @return A ranges view containing transformed elements * * Returns a lazy-evaluated view where each element is transformed by the provided function. @@ -286,7 +287,7 @@ class Container { * auto doubled = container.GetTransformedView([](int n) { return n * 2; }); * @endcode */ - template + template Func> [[nodiscard]] auto GetTransformedView(Func transform_func) const { return data_ | std::views::transform(transform_func); } diff --git a/include/random/random_gen.hpp b/include/random/random_gen.hpp index 579fcc4..3df49c9 100644 --- a/include/random/random_gen.hpp +++ b/include/random/random_gen.hpp @@ -281,7 +281,7 @@ class RandomGenerator { * @endcode */ template -void ShuffleContainer(Range &&range) { +void ShuffleContainer(Range &range) { thread_local std::random_device rd; thread_local std::mt19937 gen{rd()}; std::shuffle(std::ranges::begin(range), std::ranges::end(range), gen); diff --git a/python/examples/algorithms_example.py b/python/examples/algorithms_example.py new file mode 100644 index 0000000..46d0082 --- /dev/null +++ b/python/examples/algorithms_example.py @@ -0,0 +1,67 @@ +"""Example demonstrating the usage of the algorithms module.""" + +from demo.algorithms import ( + count_if, + find_min_max, + pipeline, + sort_inplace, + transform_to_list, +) +from demo.containers import Container + + +def main() -> None: + """Run all algorithm examples.""" + print('=== Algorithms Module Example ===') + + # Demonstrate sort_inplace with integers + numbers = Container(int, [42, 17, 89, 3, 56, 23, 78, 12, 95, 34]) + print(f'Original numbers: {numbers}') + + sort_inplace(numbers) + print(f'Sorted numbers: {numbers}') + + # Demonstrate count_if + even_count = count_if(numbers, lambda n: n % 2 == 0) + print(f'Count of even numbers: {even_count}') + + large_count = count_if(numbers, lambda n: n > 50) + print(f'Count of numbers > 50: {large_count}') + + # Demonstrate transform_to_list + squared = transform_to_list(numbers, lambda n: n * n) + print(f'Squared numbers: {squared}') + + # Demonstrate find_min_max + min_val, max_val = find_min_max(numbers) + print(f'Min: {min_val}, Max: {max_val}') + + # Demonstrate with strings + words = Container(str, ['cherry', 'banana', 'elderberry', 'date', 'apple']) + print(f'Original words: {words}') + + sort_inplace(words) + print(f'Sorted words: {words}') + + long_words = count_if(words, lambda word: len(word) > 5) + print(f'Count of words > 5 characters: {long_words}') + + uppercased = transform_to_list(words, lambda word: word.upper()) + print(f'Uppercased words: {uppercased}') + + min_word, max_word = find_min_max(words) + print(f'Min: {min_word}, Max: {max_word} (Lexicographically)') + + # Demonstrate pipeline + process = pipeline( + lambda data: transform_to_list(data, lambda x: x * x), + find_min_max, + ) + pipeline_result = process(numbers) + print(f'Pipeline result: {pipeline_result}') + + print('=== Algorithms Module Example Completed ===') + + +if __name__ == '__main__': + main() diff --git a/python/examples/containers_example.py b/python/examples/containers_example.py new file mode 100644 index 0000000..1960ecf --- /dev/null +++ b/python/examples/containers_example.py @@ -0,0 +1,349 @@ +#!/usr/bin/env python3 +"""Containers module example demonstrating type-safe container operations. + +This example shows how to use the container wrapper for type-safe operations +including filtering, transformation, and collection management. +""" + +import sys +from pathlib import Path +from typing import Any, Callable, List + +# Add the python module to the path +sys.path.insert(0, str(Path(__file__).parent.parent / 'src')) + +from containers import Container, create_container + + +def basic_container_demo() -> None: + """Demonstrate basic container creation and operations. + + Shows fundamental container operations including creation, addition, + removal, and basic iteration. + """ + print('=== Basic Container Demo ===') + + # Create containers with different types + int_container = create_container([1, 3, 2, 5, 4]) + str_container = Container(str, ['apple', 'banana', 'cherry']) + + print(f'Integer container: {list(int_container)}') + print(f'String container: {list(str_container)}') + + # Add and remove items + int_container.add(6) + removed = int_container.remove(3) + print(f'After adding 6 and removing 3 (removed {removed}): {list(int_container)}') + + # Container properties + print(f'Integer container size: {len(int_container)}') + print(f'Is empty: {int_container.is_empty()}') + print(f'Contains 5: {int_container.contains(5)}') + + print() + + +def filtering_demo() -> None: + """Demonstrate container filtering capabilities. + + Shows various filtering patterns and how to chain filter operations + for complex data selection. + """ + print('=== Filtering Demo ===') + + # Create test data + numbers = create_container(list(range(1, 21))) # 1 to 20 + words = Container(str, ['apple', 'banana', 'cherry', 'date', 'elderberry', 'fig']) + + # Numeric filtering + even_numbers = numbers.filter(lambda x: x % 2 == 0) + large_numbers = numbers.filter(lambda x: x > 15) + divisible_by_three = numbers.filter(lambda x: x % 3 == 0) + + print(f'Original numbers: {list(numbers)}') + print(f'Even numbers: {even_numbers}') + print(f'Large numbers (>15): {large_numbers}') + print(f'Divisible by 3: {divisible_by_three}') + + # String filtering + long_words = words.filter(lambda s: len(s) > 5) + words_with_e = words.filter(lambda s: 'e' in s) + words_starting_with_c = words.filter(lambda s: s.startswith('c')) + + print(f'\nOriginal words: {list(words)}') + print(f'Long words (>5 chars): {long_words}') + print(f'Words containing "e": {words_with_e}') + print(f'Words starting with "c": {words_starting_with_c}') + + # Complex filtering with multiple conditions + complex_filter = numbers.filter(lambda x: x % 2 == 0 and x > 10) + print(f'Even numbers > 10: {complex_filter}') + + print() + + +def transformation_demo() -> None: + """Demonstrate container transformation capabilities. + + Shows how to transform container elements using mapping functions + and create new containers with modified data. + """ + print('=== Transformation Demo ===') + + # Numeric transformations + numbers = create_container([1, 2, 3, 4, 5]) + + squared = numbers.transform(lambda x: x * x) + doubled = numbers.transform(lambda x: x * 2) + negated = numbers.transform(lambda x: -x) + + print(f'Original: {list(numbers)}') + print(f'Squared: {squared}') + print(f'Doubled: {doubled}') + print(f'Negated: {negated}') + + # String transformations + words = Container(str, ['hello', 'world', 'python', 'container']) + + lengths = words.transform(len) + uppercase = words.transform(str.upper) + reversed_words = words.transform(lambda s: s[::-1]) + first_chars = words.transform(lambda s: s[0] if s else '') + + print(f'\nOriginal words: {list(words)}') + print(f'Word lengths: {lengths}') + print(f'Uppercase: {uppercase}') + print(f'Reversed: {reversed_words}') + print(f'First characters: {first_chars}') + + # Complex transformations + numbers_to_strings = numbers.transform(lambda x: f'Number: {x}') + print(f'Complex transformation: {numbers_to_strings}') + + print() + + +def chained_operations_demo() -> None: + """Demonstrate chaining of container operations. + + Shows how to combine filtering and transformation operations + to create complex data processing pipelines. + """ + print('=== Chained Operations Demo ===') + + # Create sample data + data = create_container(list(range(1, 11))) + print(f'Original data: {list(data)}') + + # Chain operations step by step + step1 = data.filter(lambda x: x % 2 == 1) # Keep odd numbers + step2 = step1.transform(lambda x: x * x) # Square them + step3 = Container(int, step2).filter(lambda x: x < 50) # Keep those < 50 + + print(f'Step 1 - Odd numbers: {step1}') + print(f'Step 2 - Squared: {step2}') + print(f'Step 3 - Squares < 50: {step3}') + + # Working with strings - pipeline processing + text_data = Container( + str, ['The', 'quick', 'brown', 'fox', 'jumps', 'over', 'lazy', 'dog'] + ) + + # Process: keep words > 3 chars, convert to uppercase, get lengths + processed_text = text_data.filter(lambda w: len(w) > 3) + upper_text = Container(str, processed_text).transform(str.upper) + final_lengths = Container(str, upper_text).transform(len) + + print(f'\nText processing pipeline:') + print(f'Original: {list(text_data)}') + print(f'Words > 3 chars: {processed_text}') + print(f'Uppercase: {upper_text}') + print(f'Final lengths: {final_lengths}') + + print() + + +def type_safety_demo() -> None: + """Demonstrate type safety features of containers. + + Shows how containers maintain type safety and provide + compile-time and runtime type checking capabilities. + """ + print('=== Type Safety Demo ===') + + # Homogeneous containers + int_container = Container(int, [1, 2, 3]) + str_container = Container(str, ['a', 'b', 'c']) + float_container = Container(float, [1.1, 2.2, 3.3]) + + print(f'Integer container: {list(int_container)}') + print(f'String container: {list(str_container)}') + print(f'Float container: {list(float_container)}') + + # Type-specific operations + print(f'\nType-specific operations:') + + # Integer operations + int_sum = sum(int_container) + int_max = max(int_container) + print(f'Integer sum: {int_sum}, max: {int_max}') + + # String operations + str_joined = ' '.join(str_container) + str_total_length = sum(len(s) for s in str_container) + print(f'Joined strings: "{str_joined}", total length: {str_total_length}') + + # Float operations with precision + float_avg = sum(float_container) / len(float_container) + print(f'Float average: {float_avg:.3f}') + + # Demonstrate type preservation in transformations + int_squared = int_container.transform(lambda x: x * x) + str_lengths = str_container.transform(len) + float_rounded = float_container.transform(lambda x: round(x, 1)) + + print(f'\nType-preserving transformations:') + print(f'Integer squared: {int_squared}') + print(f'String lengths: {str_lengths}') + print(f'Float rounded: {float_rounded}') + + print() + + +def advanced_operations_demo() -> None: + """Demonstrate advanced container operations. + + Shows complex use cases including batch processing, conditional + operations, and advanced filtering patterns. + """ + print('=== Advanced Operations Demo ===') + + # Batch processing with multiple containers + datasets = [ + create_container([1, 2, 3, 4, 5]), + create_container([6, 7, 8, 9, 10]), + create_container([11, 12, 13, 14, 15]), + ] + + print('Batch processing multiple containers:') + for i, dataset in enumerate(datasets, 1): + processed = dataset.filter(lambda x: x % 2 == 0).transform(lambda x: x * 2) + print(f' Dataset {i}: {list(dataset)} -> {processed}') + + # Conditional operations + mixed_data = create_container([-3, -1, 0, 2, 5, 8, 12]) + + # Separate positive and negative numbers + positive = mixed_data.filter(lambda x: x > 0) + negative = mixed_data.filter(lambda x: x < 0) + zero_or_positive = mixed_data.filter(lambda x: x >= 0) + + print(f'\nConditional separation:') + print(f'Original: {list(mixed_data)}') + print(f'Positive: {positive}') + print(f'Negative: {negative}') + print(f'Zero or positive: {zero_or_positive}') + + # Statistical operations + stats_data = create_container([1, 2, 2, 3, 4, 4, 4, 5, 6]) + + unique_values = list(set(stats_data)) + value_counts = {val: list(stats_data).count(val) for val in unique_values} + + print(f'\nStatistical analysis:') + print(f'Data: {list(stats_data)}') + print(f'Unique values: {sorted(unique_values)}') + print(f'Value counts: {value_counts}') + + # Find most frequent value + most_frequent = max(value_counts.items(), key=lambda x: x[1]) + print(f'Most frequent value: {most_frequent[0]} (appears {most_frequent[1]} times)') + + print() + + +def performance_demo() -> None: + """Demonstrate performance characteristics of container operations. + + Shows how different operations scale and provides insights into + performance considerations for large datasets. + """ + print('=== Performance Demo ===') + + # Create larger datasets for performance testing + small_data = create_container(list(range(100))) + medium_data = create_container(list(range(1000))) + large_data = create_container(list(range(10000))) + + datasets = [ + ('Small (100 elements)', small_data), + ('Medium (1000 elements)', medium_data), + ('Large (10000 elements)', large_data), + ] + + print('Performance comparison across dataset sizes:') + + for name, dataset in datasets: + # Time filtering operation + import time + + start_time = time.perf_counter() + filtered = dataset.filter(lambda x: x % 10 == 0) + filter_time = time.perf_counter() - start_time + + start_time = time.perf_counter() + transformed = dataset.transform(lambda x: x * 2) + transform_time = time.perf_counter() - start_time + + print(f' {name}:') + print(f' Filter time: {filter_time:.6f}s, result size: {len(filtered)}') + print( + f' Transform time: {transform_time:.6f}s, result size: {len(transformed)}' + ) + + # Memory efficiency demonstration + print(f'\nMemory efficiency:') + efficient_chain = large_data.filter(lambda x: x % 100 == 0).transform( + lambda x: x // 100 + ) + print( + f'Chained operations on large dataset: {len(efficient_chain)} elements processed' + ) + + print() + + +def main() -> int: + """Run all container examples. + + Returns + ------- + int + Exit code (0 for success, 1 for error) + """ + print('Containers Module Example') + print('========================') + print() + + try: + basic_container_demo() + filtering_demo() + transformation_demo() + chained_operations_demo() + type_safety_demo() + advanced_operations_demo() + performance_demo() + + print('All container examples completed successfully!') + return 0 + + except Exception as e: + print(f'Error running container examples: {e}') + import traceback + + traceback.print_exc() + return 1 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/python/examples/exceptions_example.py b/python/examples/exceptions_example.py new file mode 100644 index 0000000..8c4f820 --- /dev/null +++ b/python/examples/exceptions_example.py @@ -0,0 +1,521 @@ +#!/usr/bin/env python3 +"""Exceptions module example demonstrating safe error handling with Result types. + +This example shows how to use the exceptions module for functional error +handling, avoiding exceptions in favor of explicit Result types. +""" + +import math +import sys +from pathlib import Path +from typing import Callable, List, Optional + +# Add the python module to the path +sys.path.insert(0, str(Path(__file__).parent.parent / 'src')) + +from exceptions import Result, chain_operations, safe_divide, safe_sqrt + + +def basic_result_demo() -> None: + """Demonstrate basic Result type usage. + + Shows how to create, check, and unwrap Result values for + safe error handling without exceptions. + """ + print('=== Basic Result Demo ===') + + # Creating successful Results + success_result = Result.ok(42) + print(f'Success result: {success_result}') + print(f'Is success: {success_result.is_ok}') + print(f'Value: {success_result.unwrap()}') + + # Creating error Results + error_result = Result.error('Something went wrong') + print(f'\nError result: {error_result}') + print(f'Is error: {error_result.is_error}') + print(f'Error message: {error_result.error_message}') + + # Safe unwrapping with defaults + safe_value = error_result.unwrap_or(-1) + print(f'Error result with default: {safe_value}') + + # Pattern matching style checking + results = [success_result, error_result] + + print(f'\nProcessing multiple results:') + for i, result in enumerate(results): + if result.is_ok: + print(f' Result {i}: Success with value {result.unwrap()}') + else: + print(f' Result {i}: Error - {result.error_message}') + + print() + + +def safe_operations_demo() -> None: + """Demonstrate safe mathematical operations. + + Shows how to perform mathematical operations that can fail + safely using Result types instead of raising exceptions. + """ + print('=== Safe Operations Demo ===') + + # Safe division examples + division_tests = [ + (10.0, 2.0), + (15.0, 3.0), + (7.0, 0.0), # Division by zero + (-8.0, 2.0), + (0.0, 5.0), + ] + + print('Safe division tests:') + for dividend, divisor in division_tests: + result = safe_divide(dividend, divisor) + if result.is_ok: + print(f' {dividend} / {divisor} = {result.unwrap():.3f}') + else: + print(f' {dividend} / {divisor} = Error: {result.error_message}') + + # Safe square root examples + sqrt_tests = [16.0, 25.0, -4.0, 0.0, 2.5] + + print(f'\nSafe square root tests:') + for value in sqrt_tests: + result = safe_sqrt(value) + if result.is_ok: + print(f' sqrt({value}) = {result.unwrap():.4f}') + else: + print(f' sqrt({value}) = Error: {result.error_message}') + + print() + + +def result_chaining_demo() -> None: + """Demonstrate Result chaining and transformation. + + Shows how to chain operations on Results and transform + values while preserving error states. + """ + print('=== Result Chaining Demo ===') + + # Basic chaining with map + print('Result transformation with map:') + + success_num = Result.ok(5) + doubled = success_num.map(lambda x: x * 2) + squared = doubled.map(lambda x: x * x) + + print(f'Original: {success_num.unwrap()}') + print(f'Doubled: {doubled.unwrap()}') + print(f'Squared: {squared.unwrap()}') + + # Chaining with error propagation + print(f'\nError propagation in chains:') + + error_num = Result.error('Invalid input') + error_doubled = error_num.map(lambda x: x * 2) + error_squared = error_doubled.map(lambda x: x * x) + + print(f'Error original: {error_num.error_message}') + print(f'Error doubled: {error_doubled.error_message}') + print(f'Error squared: {error_squared.error_message}') + + # Complex mathematical chains + print(f'\nComplex mathematical chains:') + + test_values = [4.0, 16.0, -1.0, 0.0, 9.0] + + for value in test_values: + # Chain: square root -> divide by 2 -> multiply by 3 + result = ( + safe_sqrt(value) + .and_then(lambda x: safe_divide(x, 2.0)) + .map(lambda x: x * 3.0) + ) + + if result.is_ok: + print(f' Process({value}): sqrt -> /2 -> *3 = {result.unwrap():.4f}') + else: + print(f' Process({value}): Error - {result.error_message}') + + print() + + +def operation_chaining_demo() -> None: + """Demonstrate chaining of multiple operations. + + Shows how to compose multiple fallible operations into + safe pipelines using the chain_operations function. + """ + print('=== Operation Chaining Demo ===') + + # Define safe operations + def safe_add_one(x: float) -> Result[float]: + """Safely add one to a number.""" + return Result.ok(x + 1.0) + + def safe_multiply_by_two(x: float) -> Result[float]: + """Safely multiply by two.""" + return Result.ok(x * 2.0) + + def safe_subtract_ten(x: float) -> Result[float]: + """Safely subtract ten.""" + return Result.ok(x - 10.0) + + def safe_validate_positive(x: float) -> Result[float]: + """Validate that a number is positive.""" + if x > 0: + return Result.ok(x) + else: + return Result.error(f'Value {x} is not positive') + + # Simple chain + print('Simple operation chain:') + simple_chain = chain_operations(safe_add_one, safe_multiply_by_two) + + test_values = [5.0, 10.0, 0.0, -3.0] + for value in test_values: + result = simple_chain(value) + if result.is_ok: + print(f' {value} -> +1 -> *2 = {result.unwrap()}') + else: + print(f' {value} -> Error: {result.error_message}') + + # Complex chain with validation + print(f'\nComplex chain with validation:') + complex_chain = chain_operations( + safe_add_one, safe_multiply_by_two, safe_subtract_ten, safe_validate_positive + ) + + for value in test_values: + result = complex_chain(value) + if result.is_ok: + print(f' {value} -> +1 -> *2 -> -10 -> validate = {result.unwrap()}') + else: + print(f' {value} -> Error: {result.error_message}') + + # Mathematical chain with potential failures + print(f'\nMathematical chain with potential failures:') + + def safe_reciprocal(x: float) -> Result[float]: + """Safely calculate reciprocal.""" + if x == 0: + return Result.error('Cannot calculate reciprocal of zero') + return Result.ok(1.0 / x) + + def safe_logarithm(x: float) -> Result[float]: + """Safely calculate natural logarithm.""" + if x <= 0: + return Result.error(f'Cannot calculate log of {x}') + return Result.ok(math.log(x)) + + math_chain = chain_operations(safe_reciprocal, safe_logarithm) + + math_test_values = [1.0, 2.0, 0.5, 0.0, -1.0] + for value in math_test_values: + result = math_chain(value) + if result.is_ok: + print(f' {value} -> 1/x -> ln(x) = {result.unwrap():.4f}') + else: + print(f' {value} -> Error: {result.error_message}') + + print() + + +def error_accumulation_demo() -> None: + """Demonstrate error accumulation and handling patterns. + + Shows different strategies for handling multiple potential + errors and accumulating results or errors. + """ + print('=== Error Accumulation Demo ===') + + # Processing lists with potential errors + def safe_parse_number(s: str) -> Result[float]: + """Safely parse a string as a number.""" + try: + value = float(s) + return Result.ok(value) + except ValueError: + return Result.error(f'Cannot parse "{s}" as number') + + def safe_sqrt_wrapper(x: float) -> Result[float]: + """Wrapper for safe square root.""" + return safe_sqrt(x) + + # Test data with some invalid entries + input_strings = ['4.0', '16.0', 'invalid', '25.0', '-1.0', '9.0', 'bad'] + + print('Processing list with error accumulation:') + + # Collect successful results and errors separately + successful_results = [] + errors = [] + + for input_str in input_strings: + parse_result = safe_parse_number(input_str) + if parse_result.is_ok: + sqrt_result = safe_sqrt_wrapper(parse_result.unwrap()) + if sqrt_result.is_ok: + successful_results.append((input_str, sqrt_result.unwrap())) + else: + errors.append((input_str, sqrt_result.error_message)) + else: + errors.append((input_str, parse_result.error_message)) + + print(f'Input data: {input_strings}') + print(f'Successful results: {successful_results}') + print(f'Errors: {errors}') + print( + f'Success rate: {len(successful_results)}/{len(input_strings)} ({len(successful_results)/len(input_strings)*100:.1f}%)' + ) + + # Alternative: fail-fast approach + print(f'\nFail-fast approach:') + + def process_all_or_fail(strings: List[str]) -> Result[List[float]]: + """Process all strings or fail on first error.""" + results = [] + for s in strings: + parse_result = safe_parse_number(s) + if parse_result.is_error: + return Result.error(f'Failed on "{s}": {parse_result.error_message}') + + sqrt_result = safe_sqrt_wrapper(parse_result.unwrap()) + if sqrt_result.is_error: + return Result.error(f'Failed on "{s}": {sqrt_result.error_message}') + + results.append(sqrt_result.unwrap()) + + return Result.ok(results) + + fail_fast_result = process_all_or_fail(input_strings) + if fail_fast_result.is_ok: + print(f'All processed successfully: {fail_fast_result.unwrap()}') + else: + print(f'Processing failed: {fail_fast_result.error_message}') + + # Process only valid inputs + valid_inputs = ['4.0', '16.0', '25.0', '9.0'] + valid_result = process_all_or_fail(valid_inputs) + print( + f'Valid inputs result: {valid_result.unwrap() if valid_result.is_ok else valid_result.error_message}' + ) + + print() + + +def practical_patterns_demo() -> None: + """Demonstrate practical error handling patterns. + + Shows real-world scenarios where Result types provide + cleaner error handling than traditional exceptions. + """ + print('=== Practical Patterns Demo ===') + + # Configuration parsing pattern + def parse_config_value(key: str, value: str, value_type: str) -> Result[any]: + """Parse a configuration value based on its expected type.""" + try: + if value_type == 'int': + return Result.ok(int(value)) + elif value_type == 'float': + return Result.ok(float(value)) + elif value_type == 'bool': + if value.lower() in ('true', '1', 'yes'): + return Result.ok(True) + elif value.lower() in ('false', '0', 'no'): + return Result.ok(False) + else: + return Result.error(f'Invalid boolean value: {value}') + else: + return Result.ok(value) # String + except ValueError: + return Result.error(f'Cannot parse {key}="{value}" as {value_type}') + + print('Configuration parsing:') + config_data = [ + ('port', '8080', 'int'), + ('debug', 'true', 'bool'), + ('timeout', '5.5', 'float'), + ('host', 'localhost', 'string'), + ('workers', 'invalid', 'int'), # Error case + ('ssl', 'maybe', 'bool'), # Error case + ] + + parsed_config = {} + config_errors = [] + + for key, value, value_type in config_data: + result = parse_config_value(key, value, value_type) + if result.is_ok: + parsed_config[key] = result.unwrap() + print( + f' {key}: {value} -> {parsed_config[key]} ({type(parsed_config[key]).__name__})' + ) + else: + config_errors.append((key, result.error_message)) + print(f' {key}: Error - {result.error_message}') + + print(f'Parsed config: {parsed_config}') + print(f'Config errors: {config_errors}') + + # Data validation pipeline + print(f'\nData validation pipeline:') + + def validate_email(email: str) -> Result[str]: + """Basic email validation.""" + if '@' in email and '.' in email.split('@')[1]: + return Result.ok(email) + return Result.error(f'Invalid email format: {email}') + + def validate_age(age: int) -> Result[int]: + """Age validation.""" + if 0 <= age <= 150: + return Result.ok(age) + return Result.error(f'Invalid age: {age}') + + def validate_name(name: str) -> Result[str]: + """Name validation.""" + if len(name.strip()) >= 2: + return Result.ok(name.strip()) + return Result.error(f'Name too short: "{name}"') + + # Test user data + users_data = [ + ('john.doe@email.com', 25, 'John Doe'), + ('invalid-email', 30, 'Jane Smith'), + ('bob@test.com', -5, 'Bob Wilson'), + ('alice@example.org', 35, 'A'), # Name too short + ('valid@user.com', 28, 'Valid User'), + ] + + print('User validation results:') + for email, age, name in users_data: + email_result = validate_email(email) + age_result = validate_age(age) + name_result = validate_name(name) + + if email_result.is_ok and age_result.is_ok and name_result.is_ok: + print( + f' ✓ Valid user: {name_result.unwrap()} ({age_result.unwrap()}) - {email_result.unwrap()}' + ) + else: + errors = [] + if email_result.is_error: + errors.append(email_result.error_message) + if age_result.is_error: + errors.append(age_result.error_message) + if name_result.is_error: + errors.append(name_result.error_message) + print(f' ✗ Invalid user: {"; ".join(errors)}') + + print() + + +def advanced_result_patterns_demo() -> None: + """Demonstrate advanced Result usage patterns. + + Shows sophisticated error handling techniques including + result combination, conditional processing, and recovery. + """ + print('=== Advanced Result Patterns Demo ===') + + # Result combination + def combine_results(r1: Result[float], r2: Result[float]) -> Result[float]: + """Combine two results by adding their values.""" + if r1.is_ok and r2.is_ok: + return Result.ok(r1.unwrap() + r2.unwrap()) + elif r1.is_error and r2.is_error: + return Result.error(f'Both failed: {r1.error_message}; {r2.error_message}') + elif r1.is_error: + return Result.error(f'First failed: {r1.error_message}') + else: + return Result.error(f'Second failed: {r2.error_message}') + + print('Result combination:') + test_pairs = [ + (Result.ok(5.0), Result.ok(3.0)), + (Result.ok(2.0), Result.error('Second error')), + (Result.error('First error'), Result.ok(4.0)), + (Result.error('First error'), Result.error('Second error')), + ] + + for i, (r1, r2) in enumerate(test_pairs): + combined = combine_results(r1, r2) + if combined.is_ok: + print(f' Pair {i+1}: {combined.unwrap()}') + else: + print(f' Pair {i+1}: Error - {combined.error_message}') + + # Conditional processing + print(f'\nConditional processing:') + + def process_if_positive(x: float) -> Result[float]: + """Process number only if positive.""" + if x > 0: + return safe_sqrt(x).map(lambda y: y * 2) + else: + return Result.ok(0.0) # Default for non-positive + + conditional_tests = [4.0, -2.0, 0.0, 16.0, -9.0] + for value in conditional_tests: + result = process_if_positive(value) + print( + f' Process({value}): {result.unwrap() if result.is_ok else result.error_message}' + ) + + # Error recovery + print(f'\nError recovery patterns:') + + def divide_with_fallback(a: float, b: float, fallback: float) -> Result[float]: + """Divide with fallback value on error.""" + result = safe_divide(a, b) + if result.is_error: + return Result.ok(fallback) + return result + + recovery_tests = [(10.0, 2.0, 1.0), (8.0, 0.0, 1.0), (15.0, 3.0, 1.0)] + for a, b, fallback in recovery_tests: + result = divide_with_fallback(a, b, fallback) + print(f' {a}/{b} with fallback {fallback}: {result.unwrap()}') + + print() + + +def main() -> int: + """Run all exception handling examples. + + Returns + ------- + int + Exit code (0 for success, 1 for error) + """ + print('Exceptions Module Example') + print('========================') + print() + + try: + basic_result_demo() + safe_operations_demo() + result_chaining_demo() + operation_chaining_demo() + error_accumulation_demo() + practical_patterns_demo() + advanced_result_patterns_demo() + + print('All exception handling examples completed successfully!') + return 0 + + except Exception as e: + print(f'Error running exception examples: {e}') + import traceback + + traceback.print_exc() + return 1 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/python/examples/integration_example.py b/python/examples/integration_example.py new file mode 100644 index 0000000..e8e5133 --- /dev/null +++ b/python/examples/integration_example.py @@ -0,0 +1,573 @@ +#!/usr/bin/env python3 +"""Integration example demonstrating cross-module usage patterns. + +This example shows how to combine multiple modules to create complex +applications and demonstrates real-world integration scenarios. +""" + +import sys +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple + +# Add the python module to the path +sys.path.insert(0, str(Path(__file__).parent.parent / 'src')) + +from random import RandomGenerator + +from algorithms import count_if, find_min_max, sort_inplace, transform +from containers import Container, create_container +from exceptions import Result, safe_divide, safe_sqrt +from memory import managed_resources +from shapes import analyze_shape, compare_shapes, create_shape +from timing import Timer, benchmark_function, measure_time + + +def geometric_data_analysis() -> None: + """Demonstrate geometric data analysis with multiple modules. + + Shows how to generate random geometric data, analyze it using + containers and algorithms, and manage resources safely. + """ + print('=== Geometric Data Analysis ===') + + # Generate random geometric data + gen = RandomGenerator(seed=123) + + with managed_resources() as manager: + print('Generating random geometric shapes...') + + # Create random shapes + shapes = [] + for i in range(20): + shape_type = gen.choice(0.6) # 60% circles, 40% rectangles + + if shape_type: # Circle + radius = gen.uniform(1.0, 10.0) + shape = manager.create_circle(radius) + else: # Rectangle + width = gen.uniform(2.0, 8.0) + height = gen.uniform(2.0, 8.0) + shape = manager.create_rectangle(width, height) + + shapes.append(shape) + + print(f'Created {len(shapes)} random shapes') + + # Analyze shapes using containers and algorithms + with measure_time('Shape analysis'): + # Extract metrics + areas = [shape.get_area() for shape in shapes] + perimeters = [shape.get_perimeter() for shape in shapes] + + # Create containers for analysis + area_container = create_container(areas) + perimeter_container = create_container(perimeters) + + # Sort for analysis + sort_inplace(areas) + sort_inplace(perimeters) + + # Find extrema + min_area, max_area = find_min_max(areas) + min_perim, max_perim = find_min_max(perimeters) + + # Statistical analysis + large_shapes = count_if(areas, lambda x: x > 25.0) + complex_shapes = count_if(perimeters, lambda x: x > 15.0) + + # Transform for efficiency analysis + efficiency_ratios = [] + for area, perim in zip(areas, perimeters): + if perim > 0: + efficiency_ratios.append(area / perim) + else: + efficiency_ratios.append(0.0) + + sort_inplace(efficiency_ratios) + + # Display results + print(f'\nAnalysis Results:') + print(f' Area statistics:') + print(f' Range: {min_area:.2f} - {max_area:.2f}') + print(f' Large shapes (area > 25): {large_shapes}') + print(f' Perimeter statistics:') + print(f' Range: {min_perim:.2f} - {max_perim:.2f}') + print(f' Complex shapes (perimeter > 15): {complex_shapes}') + + print() + + +def data_processing_pipeline() -> None: + """Demonstrate a complete data processing pipeline. + + Shows error handling, data transformation, statistical analysis, + and performance monitoring in an integrated workflow. + """ + print('=== Data Processing Pipeline ===') + + # Generate test dataset + gen = RandomGenerator(seed=456) + raw_data = gen.integers(-50, 150, 100) + + print(f'Processing {len(raw_data)} data points...') + + with measure_time('Complete pipeline'): + # Stage 1: Data validation and cleaning + with measure_time(' Data validation'): + valid_data = [] + errors = [] + + for i, value in enumerate(raw_data): + if 0 <= value <= 100: + valid_data.append(value) + else: + errors.append((i, value, 'out_of_range')) + + print(f' Valid data points: {len(valid_data)}/{len(raw_data)}') + print(f' Errors detected: {len(errors)}') + + # Stage 2: Data transformation with error handling + with measure_time(' Data transformation'): + transformed_results = [] + transformation_errors = [] + + for value in valid_data: + # Safe square root transformation + sqrt_result = safe_sqrt(float(value)) + if sqrt_result.is_ok: + # Scale and add noise + scaled = sqrt_result.unwrap() * 2.5 + noise = gen.normal(0.0, 0.1) + final_value = scaled + noise + transformed_results.append(final_value) + else: + transformation_errors.append((value, sqrt_result.error_message)) + + print(f' Transformed values: {len(transformed_results)}') + print(f' Transformation errors: {len(transformation_errors)}') + + # Stage 3: Statistical analysis + with measure_time(' Statistical analysis'): + if transformed_results: + # Use containers for analysis + data_container = create_container(transformed_results) + + # Basic statistics + sorted_data = sorted(transformed_results) + mean = sum(sorted_data) / len(sorted_data) + median = sorted_data[len(sorted_data) // 2] + min_val, max_val = find_min_max(sorted_data) + + # Quartiles + q1_idx = len(sorted_data) // 4 + q3_idx = 3 * len(sorted_data) // 4 + q1 = sorted_data[q1_idx] + q3 = sorted_data[q3_idx] + + # Count values in different ranges + low_values = count_if(transformed_results, lambda x: x < mean - 1.0) + high_values = count_if(transformed_results, lambda x: x > mean + 1.0) + + stats = { + 'count': len(sorted_data), + 'mean': mean, + 'median': median, + 'min': min_val, + 'max': max_val, + 'q1': q1, + 'q3': q3, + 'range': max_val - min_val, + 'iqr': q3 - q1, + 'low_outliers': low_values, + 'high_outliers': high_values, + } + + print(f' Statistical summary:') + print(f' Count: {stats["count"]}') + print(f' Mean: {stats["mean"]:.3f}') + print(f' Median: {stats["median"]:.3f}') + print(f' Range: {stats["min"]:.3f} - {stats["max"]:.3f}') + print(f' IQR: {stats["iqr"]:.3f}') + print( + f' Outliers: {stats["low_outliers"]} low, {stats["high_outliers"]} high' + ) + + # Stage 4: Results validation + with measure_time(' Results validation'): + validation_results = [] + + for value in transformed_results: + # Validate transformed values are reasonable + if 0 <= value <= 50: # Expected range after transformation + validation_results.append(('valid', value)) + else: + validation_results.append(('suspicious', value)) + + valid_count = count_if(validation_results, lambda x: x[0] == 'valid') + suspicious_count = len(validation_results) - valid_count + + print(f' Validation results:') + print(f' Valid: {valid_count}') + print(f' Suspicious: {suspicious_count}') + + print() + + +def scientific_simulation() -> None: + """Demonstrate a scientific simulation with error handling. + + Shows Monte Carlo simulation with statistical analysis, + memory management, and comprehensive error handling. + """ + print('=== Scientific Simulation ===') + + # Monte Carlo simulation parameters + num_experiments = 5 + samples_per_experiment = 10000 + + print(f'Running {num_experiments} Monte Carlo experiments...') + print(f'Samples per experiment: {samples_per_experiment}') + + with managed_resources() as manager: + experiment_results = [] + + for exp_id in range(num_experiments): + print(f'\nExperiment {exp_id + 1}:') + + # Create random generator for this experiment + gen = RandomGenerator(seed=exp_id * 1000) + + with measure_time(f' Experiment {exp_id + 1}'): + # Monte Carlo π estimation + inside_circle = 0 + sample_points = [] + + for _ in range(samples_per_experiment): + x = gen.uniform(-1.0, 1.0) + y = gen.uniform(-1.0, 1.0) + + distance_squared = x * x + y * y + sample_points.append((x, y, distance_squared)) + + if distance_squared <= 1.0: + inside_circle += 1 + + # Calculate π estimate + pi_estimate = 4.0 * inside_circle / samples_per_experiment + error = abs(pi_estimate - 3.14159265359) + + # Analyze sample distribution + distances = [point[2] for point in sample_points] + sort_inplace(distances) + + min_dist, max_dist = find_min_max(distances) + close_points = count_if(distances, lambda d: d < 0.1) + far_points = count_if(distances, lambda d: d > 0.9) + + experiment_result = { + 'experiment_id': exp_id + 1, + 'pi_estimate': pi_estimate, + 'error': error, + 'inside_circle': inside_circle, + 'total_samples': samples_per_experiment, + 'min_distance': min_dist, + 'max_distance': max_dist, + 'close_points': close_points, + 'far_points': far_points, + } + + experiment_results.append(experiment_result) + + print(f' π estimate: {pi_estimate:.6f}') + print(f' Error: {error:.6f}') + print(f' Points inside circle: {inside_circle}') + print(f' Distance range: {min_dist:.4f} - {max_dist:.4f}') + + # Aggregate analysis across experiments + print(f'\nAggregate Analysis:') + + pi_estimates = [result['pi_estimate'] for result in experiment_results] + errors = [result['error'] for result in experiment_results] + + sort_inplace(pi_estimates) + sort_inplace(errors) + + mean_pi = sum(pi_estimates) / len(pi_estimates) + mean_error = sum(errors) / len(errors) + best_estimate = min(errors) + worst_estimate = max(errors) + + print(f' Mean π estimate: {mean_pi:.6f}') + print(f' Mean error: {mean_error:.6f}') + print(f' Best error: {best_estimate:.6f}') + print(f' Worst error: {worst_estimate:.6f}') + + # Consistency analysis + pi_range = max(pi_estimates) - min(pi_estimates) + print(f' Estimate range: {pi_range:.6f}') + + consistent_experiments = count_if(errors, lambda e: e < 0.01) + print( + f' Consistent experiments (error < 0.01): {consistent_experiments}/{num_experiments}' + ) + + print() + + +def performance_benchmark_suite() -> None: + """Demonstrate comprehensive performance benchmarking. + + Shows how to benchmark different approaches using timing, + containers, algorithms, and statistical analysis. + """ + print('=== Performance Benchmark Suite ===') + + # Test different sorting algorithms + test_sizes = [100, 500, 1000] + algorithms_to_test = ['python_sort', 'manual_sort', 'container_sort'] + + print('Benchmarking sorting algorithms:') + + benchmark_results = {} + + for size in test_sizes: + print(f'\nTest size: {size} elements') + + # Generate test data + gen = RandomGenerator(seed=42) + test_data = gen.integers(1, 1000, size) + + size_results = {} + + # Python's built-in sort + def python_sort_test(): + data = test_data.copy() + data.sort() + return data + + stats = benchmark_function( + python_sort_test, iterations=50, name=f'Python sort ({size})' + ) + size_results['python_sort'] = stats + print(f' Python sort: {stats["human_readable"]["mean"]}') + + # Manual implementation + def manual_sort_test(): + data = test_data.copy() + sort_inplace(data) + return data + + stats = benchmark_function( + manual_sort_test, iterations=50, name=f'Manual sort ({size})' + ) + size_results['manual_sort'] = stats + print(f' Manual sort: {stats["human_readable"]["mean"]}') + + # Container-based approach + def container_sort_test(): + container = create_container(test_data.copy()) + data = list(container) + sort_inplace(data) + return data + + stats = benchmark_function( + container_sort_test, iterations=50, name=f'Container sort ({size})' + ) + size_results['container_sort'] = stats + print(f' Container sort: {stats["human_readable"]["mean"]}') + + benchmark_results[size] = size_results + + # Analyze scaling behavior + print(f'\nScaling Analysis:') + + for algorithm in algorithms_to_test: + print(f' {algorithm}:') + + prev_time = None + for size in test_sizes: + current_time = benchmark_results[size][algorithm]['time_ns'] + + if prev_time is not None: + ratio = current_time / prev_time + size_ratio = size / prev_size + print( + f' {prev_size} -> {size}: {ratio:.2f}x slower ({size_ratio:.1f}x data)' + ) + + prev_time = current_time + prev_size = size + + print() + + +def real_world_application_demo() -> None: + """Demonstrate a real-world application scenario. + + Shows a complete application that processes user data, + validates inputs, performs calculations, and handles errors gracefully. + """ + print('=== Real-World Application Demo ===') + + # Simulate a geometric calculator application + print('Geometric Calculator Application') + + with managed_resources() as manager: + # Simulate user input data (normally from forms/files) + user_inputs = [ + {'type': 'circle', 'params': [5.0], 'user_id': 'user_001'}, + {'type': 'rectangle', 'params': [4.0, 6.0], 'user_id': 'user_002'}, + {'type': 'circle', 'params': [-2.0], 'user_id': 'user_003'}, # Invalid + {'type': 'rectangle', 'params': [3.0, 8.0], 'user_id': 'user_004'}, + {'type': 'square', 'params': [4.0], 'user_id': 'user_005'}, + { + 'type': 'invalid_shape', + 'params': [1.0], + 'user_id': 'user_006', + }, # Invalid + ] + + print(f'Processing {len(user_inputs)} user requests...') + + # Process each user input + successful_calculations = [] + failed_calculations = [] + + with measure_time('User request processing'): + for request in user_inputs: + user_id = request['user_id'] + shape_type = request['type'] + params = request['params'] + + print(f'\nProcessing request from {user_id}:') + print(f' Requested: {shape_type} with params {params}') + + try: + # Validate and create shape + if shape_type == 'circle': + if len(params) == 1 and params[0] > 0: + shape = manager.create_circle(params[0]) + else: + raise ValueError('Invalid circle parameters') + elif shape_type == 'rectangle': + if len(params) == 2 and all(p > 0 for p in params): + shape = manager.create_rectangle(params[0], params[1]) + else: + raise ValueError('Invalid rectangle parameters') + elif shape_type == 'square': + if len(params) == 1 and params[0] > 0: + shape = manager.create_rectangle(params[0], params[0]) + else: + raise ValueError('Invalid square parameters') + else: + raise ValueError(f'Unknown shape type: {shape_type}') + + # Calculate properties + area = shape.get_area() + perimeter = shape.get_perimeter() + metrics = analyze_shape(shape) + + # Store results + calculation_result = { + 'user_id': user_id, + 'shape_type': shape_type, + 'params': params, + 'area': area, + 'perimeter': perimeter, + 'efficiency': metrics.efficiency, + 'aspect_ratio': metrics.aspect_ratio, + 'status': 'success', + } + + successful_calculations.append(calculation_result) + + print(f' ✓ Success: area={area:.2f}, perimeter={perimeter:.2f}') + print(f' Efficiency: {metrics.efficiency:.4f}') + + except Exception as e: + error_result = { + 'user_id': user_id, + 'shape_type': shape_type, + 'params': params, + 'error': str(e), + 'status': 'error', + } + + failed_calculations.append(error_result) + print(f' ✗ Error: {e}') + + # Generate summary report + print(f'\n=== Summary Report ===') + print(f'Total requests: {len(user_inputs)}') + print(f'Successful: {len(successful_calculations)}') + print(f'Failed: {len(failed_calculations)}') + + if successful_calculations: + # Analyze successful calculations + areas = [calc['area'] for calc in successful_calculations] + perimeters = [calc['perimeter'] for calc in successful_calculations] + efficiencies = [calc['efficiency'] for calc in successful_calculations] + + sort_inplace(areas) + sort_inplace(efficiencies) + + print(f'\nSuccessful Calculations Analysis:') + print(f' Area range: {min(areas):.2f} - {max(areas):.2f}') + print(f' Average efficiency: {sum(efficiencies)/len(efficiencies):.4f}') + print(f' Most efficient: {max(efficiencies):.4f}') + + # Group by shape type + shape_types = {} + for calc in successful_calculations: + shape_type = calc['shape_type'] + if shape_type not in shape_types: + shape_types[shape_type] = [] + shape_types[shape_type].append(calc) + + print(f' By shape type:') + for shape_type, calcs in shape_types.items(): + avg_area = sum(c['area'] for c in calcs) / len(calcs) + print( + f' {shape_type}: {len(calcs)} shapes, avg area: {avg_area:.2f}' + ) + + if failed_calculations: + print(f'\nFailed Calculations:') + for failure in failed_calculations: + print(f' {failure["user_id"]}: {failure["error"]}') + + print() + + +def main() -> int: + """Run all integration examples. + + Returns + ------- + int + Exit code (0 for success, 1 for error) + """ + print('Integration Example') + print('==================') + print() + + try: + geometric_data_analysis() + data_processing_pipeline() + scientific_simulation() + performance_benchmark_suite() + real_world_application_demo() + + print('All integration examples completed successfully!') + return 0 + + except Exception as e: + print(f'Error running integration examples: {e}') + import traceback + + traceback.print_exc() + return 1 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/python/examples/random_example.py b/python/examples/random_example.py new file mode 100644 index 0000000..dd5ebf6 --- /dev/null +++ b/python/examples/random_example.py @@ -0,0 +1,520 @@ +#!/usr/bin/env python3 +"""Random module example demonstrating type-safe random number generation. + +This example shows how to use the random module for generating random numbers, +working with distributions, and creating reproducible random sequences. +""" + +import math +import sys +from pathlib import Path +from typing import Dict, List, Tuple + +# Add the python module to the path +sys.path.insert(0, str(Path(__file__).parent.parent / 'src')) + +from random import Normal, RandomGenerator, UniformInt, randint +from random import random as random_float +from random import sample, shuffle + + +def basic_random_demo() -> None: + """Demonstrate basic random number generation. + + Shows how to create random generators and generate different + types of random values with proper seeding for reproducibility. + """ + print('=== Basic Random Demo ===') + + # Create random generator with seed for reproducible results + gen = RandomGenerator(seed=42) + + print('Basic random value generation:') + print(f'Random integer (1-100): {gen.randint(1, 100)}') + print(f'Random float (0-1): {gen.random():.6f}') + print(f'Random uniform (5-10): {gen.uniform(5.0, 10.0):.4f}') + print(f'Random boolean: {gen.choice()}') + print(f'Random boolean (70% chance): {gen.choice(0.7)}') + print(f'Random normal (μ=0, σ=1): {gen.normal(mean=0.0, stddev=1.0):.4f}') + + # Generate with different seeds + print(f'\nReproducibility with seeds:') + + generators = [ + RandomGenerator(seed=123), + RandomGenerator(seed=123), + RandomGenerator(seed=456), + ] + + for i, generator in enumerate(generators): + values = [generator.randint(1, 10) for _ in range(5)] + print(f' Generator {i+1} (seed {[123, 123, 456][i]}): {values}') + + # Unseeded generator (different each time) + print(f'\nUnseeded generator (non-reproducible):') + unseeded = RandomGenerator() + unseeded_values = [unseeded.randint(1, 10) for _ in range(5)] + print(f' Unseeded values: {unseeded_values}') + + print() + + +def vector_generation_demo() -> None: + """Demonstrate random vector generation. + + Shows how to generate arrays of random values efficiently + for statistical analysis and data generation. + """ + print('=== Vector Generation Demo ===') + + gen = RandomGenerator(seed=789) + + # Integer vectors + print('Integer vector generation:') + + small_ints = gen.integers(1, 10, 8) + large_ints = gen.integers(100, 1000, 5) + negative_ints = gen.integers(-50, 50, 10) + + print(f'Small integers (1-10): {small_ints}') + print(f'Large integers (100-1000): {large_ints}') + print(f'Mixed integers (-50 to 50): {negative_ints}') + + # Float vectors + print(f'\nFloat vector generation:') + + unit_floats = gen.floats(0.0, 1.0, 6) + scaled_floats = gen.floats(10.0, 20.0, 5) + precise_floats = gen.floats(-1.0, 1.0, 8) + + print(f'Unit floats (0-1): {[f"{x:.4f}" for x in unit_floats]}') + print(f'Scaled floats (10-20): {[f"{x:.2f}" for x in scaled_floats]}') + print(f'Precise floats (-1 to 1): {[f"{x:.6f}" for x in precise_floats]}') + + # Statistical properties + print(f'\nStatistical properties of generated vectors:') + + large_sample = gen.floats(0.0, 1.0, 1000) + + mean = sum(large_sample) / len(large_sample) + variance = sum((x - mean) ** 2 for x in large_sample) / len(large_sample) + std_dev = math.sqrt(variance) + + print(f'Large sample (n=1000):') + print(f' Mean: {mean:.4f} (expected ~0.5)') + print(f' Std dev: {std_dev:.4f} (expected ~0.289)') + print(f' Min: {min(large_sample):.4f}') + print(f' Max: {max(large_sample):.4f}') + + print() + + +def distributions_demo() -> None: + """Demonstrate different probability distributions. + + Shows how to use various distribution classes for generating + random numbers from specific probability distributions. + """ + print('=== Distributions Demo ===') + + gen = RandomGenerator(seed=456) + + # Uniform integer distribution + print('Uniform integer distribution:') + + uniform_dist = UniformInt(gen, 10, 20) + uniform_samples = uniform_dist.samples(10) + + print(f' Range: 10-20') + print(f' Samples: {uniform_samples}') + print(f' Single sample: {uniform_dist.sample()}') + + # Normal distribution + print(f'\nNormal distribution:') + + # Standard normal (μ=0, σ=1) + standard_normal = Normal(gen, mean=0.0, stddev=1.0) + std_samples = standard_normal.samples(8) + + print(f' Standard normal (μ=0, σ=1):') + print(f' Samples: {[f"{x:.3f}" for x in std_samples]}') + + # Custom normal distribution + custom_normal = Normal(gen, mean=100.0, stddev=15.0) + custom_samples = custom_normal.samples(8) + + print(f' Custom normal (μ=100, σ=15):') + print(f' Samples: {[f"{x:.1f}" for x in custom_samples]}') + + # Compare distribution properties + print(f'\nDistribution properties analysis:') + + # Generate large samples for analysis + large_uniform = uniform_dist.samples(1000) + large_normal = standard_normal.samples(1000) + + # Uniform distribution analysis + uniform_mean = sum(large_uniform) / len(large_uniform) + uniform_range = max(large_uniform) - min(large_uniform) + + print(f' Uniform distribution (n=1000):') + print(f' Mean: {uniform_mean:.2f} (expected: 15.0)') + print(f' Range: {uniform_range} (expected: 10)') + print(f' Min: {min(large_uniform)}, Max: {max(large_uniform)}') + + # Normal distribution analysis + normal_mean = sum(large_normal) / len(large_normal) + normal_var = sum((x - normal_mean) ** 2 for x in large_normal) / len(large_normal) + normal_std = math.sqrt(normal_var) + + print(f' Normal distribution (n=1000):') + print(f' Mean: {normal_mean:.4f} (expected: 0.0)') + print(f' Std dev: {normal_std:.4f} (expected: 1.0)') + + # Count values within 1, 2, 3 standard deviations + within_1std = sum(1 for x in large_normal if abs(x) <= 1.0) + within_2std = sum(1 for x in large_normal if abs(x) <= 2.0) + within_3std = sum(1 for x in large_normal if abs(x) <= 3.0) + + print(f' Within 1σ: {within_1std/10:.1f}% (expected: ~68%)') + print(f' Within 2σ: {within_2std/10:.1f}% (expected: ~95%)') + print(f' Within 3σ: {within_3std/10:.1f}% (expected: ~99.7%)') + + print() + + +def sampling_and_shuffling_demo() -> None: + """Demonstrate sampling and shuffling operations. + + Shows how to randomly sample from collections and shuffle + data while maintaining randomness properties. + """ + print('=== Sampling and Shuffling Demo ===') + + # Create test data + data = list(range(1, 21)) # 1 to 20 + names = ['Alice', 'Bob', 'Charlie', 'Diana', 'Eve', 'Frank', 'Grace', 'Henry'] + + print(f'Original data: {data}') + print(f'Original names: {names}') + + # Shuffling + print(f'\nShuffling operations:') + + shuffled_data = data.copy() + shuffle(shuffled_data) + print(f'Shuffled numbers: {shuffled_data}') + + shuffled_names = names.copy() + shuffle(shuffled_names) + print(f'Shuffled names: {shuffled_names}') + + # Verify shuffling doesn't lose elements + print(f'Data integrity check: {sorted(shuffled_data) == sorted(data)}') + + # Sampling without replacement + print(f'\nSampling without replacement:') + + sample_sizes = [3, 5, 8] + for size in sample_sizes: + sampled = sample(data, size) + print(f' Sample {size} from numbers: {sorted(sampled)}') + + name_sample = sample(names, 4) + print(f' Sample 4 names: {name_sample}') + + # Multiple shuffles to show randomness + print(f'\nMultiple shuffles (demonstrating randomness):') + + original = list(range(1, 11)) + shuffles = [] + + for i in range(5): + shuffled = original.copy() + shuffle(shuffled) + shuffles.append(shuffled) + print(f' Shuffle {i+1}: {shuffled}') + + # Check if all shuffles are different + unique_shuffles = len(set(tuple(s) for s in shuffles)) + print(f' Unique shuffles: {unique_shuffles}/{len(shuffles)}') + + print() + + +def statistical_simulation_demo() -> None: + """Demonstrate statistical simulations and Monte Carlo methods. + + Shows how to use random generation for statistical simulations + and Monte Carlo estimation techniques. + """ + print('=== Statistical Simulation Demo ===') + + gen = RandomGenerator(seed=12345) + + # Dice rolling simulation + print('Dice rolling simulation:') + + num_rolls = 10000 + dice_results = gen.integers(1, 6, num_rolls) + + # Count frequencies + frequencies = {i: dice_results.count(i) for i in range(1, 7)} + + print(f' {num_rolls} dice rolls:') + for face, count in frequencies.items(): + percentage = (count / num_rolls) * 100 + print(f' Face {face}: {count} times ({percentage:.1f}%)') + + # Expected is ~16.67% for fair dice + expected_freq = num_rolls / 6 + chi_squared = sum( + (count - expected_freq) ** 2 / expected_freq for count in frequencies.values() + ) + print(f' Chi-squared statistic: {chi_squared:.2f}') + + # Coin flipping simulation + print(f'\nCoin flipping simulation:') + + num_flips = 5000 + heads_probability = 0.5 + + heads_count = 0 + for _ in range(num_flips): + if gen.choice(heads_probability): + heads_count += 1 + + heads_percentage = (heads_count / num_flips) * 100 + tails_count = num_flips - heads_count + + print(f' {num_flips} coin flips:') + print(f' Heads: {heads_count} ({heads_percentage:.1f}%)') + print(f' Tails: {tails_count} ({100-heads_percentage:.1f}%)') + + # Monte Carlo π estimation + print(f'\nMonte Carlo π estimation:') + + num_points = 100000 + inside_circle = 0 + + # Generate random points in unit square and count those inside unit circle + for _ in range(num_points): + x = gen.uniform(-1.0, 1.0) + y = gen.uniform(-1.0, 1.0) + if x * x + y * y <= 1.0: + inside_circle += 1 + + pi_estimate = 4.0 * inside_circle / num_points + error = abs(pi_estimate - math.pi) + + print(f' Points tested: {num_points}') + print(f' Points inside circle: {inside_circle}') + print(f' Estimated π: {pi_estimate:.6f}') + print(f' Actual π: {math.pi:.6f}') + print(f' Error: {error:.6f}') + + # Random walk simulation + print(f'\nRandom walk simulation:') + + steps = 1000 + position = 0 + positions = [position] + + for _ in range(steps): + step = 1 if gen.choice() else -1 + position += step + positions.append(position) + + max_pos = max(positions) + min_pos = min(positions) + final_pos = positions[-1] + + print(f' Steps: {steps}') + print(f' Final position: {final_pos}') + print(f' Max position: {max_pos}') + print(f' Min position: {min_pos}') + print(f' Range: {max_pos - min_pos}') + + print() + + +def data_generation_demo() -> None: + """Demonstrate random data generation for testing. + + Shows how to generate realistic test data for various + applications including names, coordinates, and datasets. + """ + print('=== Data Generation Demo ===') + + gen = RandomGenerator(seed=999) + + # Generate test coordinates + print('Random coordinate generation:') + + # 2D coordinates + num_points = 8 + coordinates_2d = [ + (gen.uniform(-10.0, 10.0), gen.uniform(-10.0, 10.0)) for _ in range(num_points) + ] + + print(f' 2D coordinates:') + for i, (x, y) in enumerate(coordinates_2d): + print(f' Point {i+1}: ({x:.2f}, {y:.2f})') + + # 3D coordinates + coordinates_3d = [ + (gen.uniform(-5.0, 5.0), gen.uniform(-5.0, 5.0), gen.uniform(-5.0, 5.0)) + for _ in range(5) + ] + + print(f' 3D coordinates:') + for i, (x, y, z) in enumerate(coordinates_3d): + print(f' Point {i+1}: ({x:.2f}, {y:.2f}, {z:.2f})') + + # Generate test datasets + print(f'\nTest dataset generation:') + + # Student grades dataset + num_students = 10 + subjects = ['Math', 'Science', 'English', 'History'] + + print(f' Student grades dataset:') + for i in range(num_students): + student_id = f'S{i+1:03d}' + grades = {subject: gen.randint(60, 100) for subject in subjects} + avg_grade = sum(grades.values()) / len(grades) + print(f' {student_id}: {grades} (avg: {avg_grade:.1f})') + + # Sales data simulation + print(f'\nSales data simulation:') + + months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun'] + products = ['Widget A', 'Widget B', 'Widget C'] + + for month in months: + monthly_sales = {} + for product in products: + # Simulate seasonal variation + base_sales = gen.randint(100, 500) + seasonal_factor = gen.uniform(0.8, 1.2) + sales = int(base_sales * seasonal_factor) + monthly_sales[product] = sales + + total_sales = sum(monthly_sales.values()) + print(f' {month}: {monthly_sales} (total: {total_sales})') + + # Random configuration generation + print(f'\nRandom configuration generation:') + + config_templates = [ + {'name': 'timeout', 'type': 'int', 'range': (1, 60)}, + {'name': 'retry_count', 'type': 'int', 'range': (1, 5)}, + {'name': 'success_rate', 'type': 'float', 'range': (0.8, 1.0)}, + {'name': 'buffer_size', 'type': 'int', 'range': (1024, 8192)}, + {'name': 'compression', 'type': 'bool', 'range': None}, + ] + + num_configs = 3 + for i in range(num_configs): + config = {} + for template in config_templates: + name = template['name'] + if template['type'] == 'int': + min_val, max_val = template['range'] + config[name] = gen.randint(min_val, max_val) + elif template['type'] == 'float': + min_val, max_val = template['range'] + config[name] = gen.uniform(min_val, max_val) + elif template['type'] == 'bool': + config[name] = gen.choice() + + print(f' Config {i+1}: {config}') + + print() + + +def convenience_functions_demo() -> None: + """Demonstrate convenience functions for quick random generation. + + Shows how to use module-level functions for simple random + generation tasks without creating generator instances. + """ + print('=== Convenience Functions Demo ===') + + print('Module-level convenience functions:') + + # Quick random generation without explicit generator + print(f'Quick randint (1-10): {randint(1, 10)}') + print(f'Quick random float: {random_float():.6f}') + + # Generate multiple values quickly + quick_ints = [randint(1, 100) for _ in range(5)] + quick_floats = [random_float() for _ in range(5)] + + print(f'Quick integers: {quick_ints}') + print(f'Quick floats: {[f"{x:.4f}" for x in quick_floats]}') + + # Quick shuffling and sampling + test_list = list(range(1, 11)) + print(f'Original list: {test_list}') + + shuffled_copy = test_list.copy() + shuffle(shuffled_copy) + print(f'Shuffled: {shuffled_copy}') + + sampled = sample(test_list, 4) + print(f'Sample of 4: {sampled}') + + # Compare with generator approach + print(f'\nComparison with explicit generator:') + + gen = RandomGenerator(seed=42) + + print('Convenience functions (different each time):') + for i in range(3): + values = [randint(1, 6) for _ in range(3)] + print(f' Run {i+1}: {values}') + + print('Generator with fixed seed (reproducible):') + for i in range(3): + gen = RandomGenerator(seed=42) # Reset with same seed + values = [gen.randint(1, 6) for _ in range(3)] + print(f' Run {i+1}: {values}') + + print() + + +def main() -> int: + """Run all random generation examples. + + Returns + ------- + int + Exit code (0 for success, 1 for error) + """ + print('Random Module Example') + print('====================') + print() + + try: + basic_random_demo() + vector_generation_demo() + distributions_demo() + sampling_and_shuffling_demo() + statistical_simulation_demo() + data_generation_demo() + convenience_functions_demo() + + print('All random generation examples completed successfully!') + return 0 + + except Exception as e: + print(f'Error running random examples: {e}') + import traceback + + traceback.print_exc() + return 1 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/python/examples/shapes_example.py b/python/examples/shapes_example.py new file mode 100644 index 0000000..3c7c6ed --- /dev/null +++ b/python/examples/shapes_example.py @@ -0,0 +1,228 @@ +#!/usr/bin/env python3 +"""Shapes module example demonstrating geometric shape creation and analysis. + +This example shows how to create different shapes, calculate their properties, +and perform analysis using the C++ Python bindings. +""" + +import sys +from pathlib import Path +from typing import List + +# Add the python module to the path +sys.path.insert(0, str(Path(__file__).parent.parent / 'src')) + +from shapes import Circle, Rectangle, analyze_shape, compare_shapes, create_shape + + +def basic_shapes_demo() -> None: + """Demonstrate basic shape creation and property calculation. + + Creates different types of shapes and displays their basic properties + like area and perimeter. + """ + print('=== Basic Shapes Demo ===') + + # Create different shapes using factory functions + circle = create_shape('circle', 5.0) + rectangle = create_shape('rectangle', 4.0, 3.0) + square = create_shape('square', 6.0) + + print( + f'Circle: area={circle.get_area():.2f}, perimeter={circle.get_perimeter():.2f}' + ) + print( + f'Rectangle: area={rectangle.get_area():.2f}, perimeter={rectangle.get_perimeter():.2f}' + ) + print( + f'Square: area={square.get_area():.2f}, perimeter={square.get_perimeter():.2f}' + ) + + print() + + +def shape_analysis_demo() -> None: + """Demonstrate advanced shape analysis capabilities. + + Shows how to analyze individual shapes and extract detailed metrics + including aspect ratios and other derived properties. + """ + print('=== Shape Analysis Demo ===') + + # Create shapes for analysis + circle = create_shape('circle', 3.5) + rectangle = create_shape('rectangle', 8.0, 2.5) + + # Analyze individual shapes + circle_metrics = analyze_shape(circle) + rectangle_metrics = analyze_shape(rectangle) + + print(f'Circle analysis:') + print(f' - Area: {circle_metrics.area:.2f}') + print(f' - Perimeter: {circle_metrics.perimeter:.2f}') + print(f' - Aspect ratio: {circle_metrics.aspect_ratio:.4f}') + + print(f'Rectangle analysis:') + print(f' - Area: {rectangle_metrics.area:.2f}') + print(f' - Perimeter: {rectangle_metrics.perimeter:.2f}') + print(f' - Aspect ratio: {rectangle_metrics.aspect_ratio:.4f}') + + print() + + +def shape_comparison_demo() -> None: + """Demonstrate shape comparison and collection analysis. + + Shows how to compare multiple shapes and extract aggregate statistics + from collections of geometric objects. + """ + print('=== Shape Comparison Demo ===') + + # Create a collection of shapes + shapes = [ + create_shape('circle', 4.0), + create_shape('rectangle', 5.0, 3.0), + create_shape('square', 4.5), + create_shape('circle', 2.5), + create_shape('rectangle', 6.0, 2.0), + ] + + # Compare all shapes + comparison = compare_shapes(*shapes) + + print(f'Shape Collection Analysis:') + print(f' - Total shapes: {len(shapes)}') + print(f' - Total area: {comparison["total_area"]:.2f}') + print(f' - Total perimeter: {comparison["total_perimeter"]:.2f}') + print(f' - Average area: {comparison["average_area"]:.2f}') + print(f' - Largest shape by area: {comparison["largest_by_area"].get_name()}') + print(f' - Smallest shape by area: {comparison["smallest_by_area"].get_name()}') + + # Show individual shape details + print(f'\nIndividual Shape Details:') + for i, shape in enumerate(shapes, 1): + metrics = analyze_shape(shape) + print(f' {i}. {shape.get_name()}: area={metrics.area:.2f}') + + print() + + +def advanced_shape_operations() -> None: + """Demonstrate advanced shape operations and edge cases. + + Shows handling of edge cases, validation, and advanced geometric + calculations with error handling. + """ + print('=== Advanced Shape Operations ===') + + # Test edge cases and validation + test_cases = [ + ('Very small circle', 'circle', 0.1), + ('Large rectangle', 'rectangle', 100.0, 50.0), + ('Square', 'square', 7.5), + ('Thin rectangle', 'rectangle', 20.0, 0.5), + ] + + print('Testing various shape configurations:') + for description, shape_type, *params in test_cases: + try: + shape = create_shape(shape_type, *params) + metrics = analyze_shape(shape) + + print(f' {description}:') + print(f' - Type: {shape.get_name()}') + print(f' - Area: {metrics.area:.4f}') + print(f' - Perimeter: {metrics.perimeter:.4f}') + print(f' - Aspect ratio: {metrics.aspect_ratio:.4f}') + + # Check for interesting properties + if metrics.aspect_ratio > 5.0: + print( + f' - Note: Very elongated shape (aspect ratio: {metrics.aspect_ratio:.2f})' + ) + if metrics.efficiency > 0.5: + print( + f' - Note: Highly efficient shape (efficiency: {metrics.efficiency:.4f})' + ) + + except Exception as e: + print(f' {description}: Error - {e}') + + print() + + +def shape_factory_patterns() -> None: + """Demonstrate various shape creation patterns. + + Shows different ways to create shapes using factory methods and + direct constructors with parameter validation. + """ + print('=== Shape Factory Patterns ===') + + # Direct constructor usage + print('Direct constructors:') + direct_circle = Circle(radius=3.0) + direct_rectangle = Rectangle(width=4.0, height=6.0) + + print(f' Circle (direct): area={direct_circle.get_area():.2f}') + print(f' Rectangle (direct): area={direct_rectangle.get_area():.2f}') + + # Factory function usage with different patterns + print('\nFactory function patterns:') + + # Standard shapes + unit_circle = create_shape('circle', 1.0) + unit_square = create_shape('square', 1.0) + + print(f' Unit circle: area={unit_circle.get_area():.4f}') + print(f' Unit square: area={unit_square.get_area():.4f}') + + # Golden ratio rectangle + golden_ratio = 1.618033988749 + golden_rectangle = create_shape('rectangle', golden_ratio, 1.0) + golden_metrics = analyze_shape(golden_rectangle) + + print(f' Golden rectangle: aspect_ratio={golden_metrics.aspect_ratio:.6f}') + + # Array of similar shapes for batch processing + radii = [1.0, 1.5, 2.0, 2.5, 3.0] + circles = [create_shape('circle', r) for r in radii] + areas = [c.get_area() for c in circles] + + print(f' Circle areas for radii {radii}: {[f"{a:.2f}" for a in areas]}') + + print() + + +def main() -> int: + """Run all shape examples. + + Returns + ------- + int + Exit code (0 for success, 1 for error) + """ + print('Shapes Module Example') + print('====================') + print() + + try: + basic_shapes_demo() + shape_analysis_demo() + shape_comparison_demo() + advanced_shape_operations() + shape_factory_patterns() + + print('All shape examples completed successfully!') + return 0 + + except Exception as e: + print(f'Error running shape examples: {e}') + import traceback + + traceback.print_exc() + return 1 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/python/examples/timing_example.py b/python/examples/timing_example.py new file mode 100644 index 0000000..859d80e --- /dev/null +++ b/python/examples/timing_example.py @@ -0,0 +1,577 @@ +#!/usr/bin/env python3 +"""Timing module example demonstrating high-resolution timing and benchmarking. + +This example shows how to use the timing module for performance measurement, +benchmarking, and timing analysis with nanosecond precision. +""" + +import random +import sys +import time +from pathlib import Path +from typing import Any, Callable, Dict, List + +# Add the python module to the path +sys.path.insert(0, str(Path(__file__).parent.parent / 'src')) + +from timing import Benchmark, Timer, benchmark_function, measure_time, time_function + + +def basic_timer_demo() -> None: + """Demonstrate basic timer usage. + + Shows how to use the Timer class for measuring execution time + of code blocks and operations. + """ + print('=== Basic Timer Demo ===') + + # Basic timer usage + print('Basic timer with context manager:') + + with Timer() as timer: + # Simulate some work + total = sum(i * i for i in range(1000)) + time.sleep(0.01) # Add small delay + + print(f'Operation completed in: {timer.elapsed_string}') + print(f'Raw elapsed time: {timer.elapsed_ns} nanoseconds') + print(f'Result: {total}') + + # Manual timer control + print(f'\nManual timer control:') + + timer = Timer() + timer.start() + + # Simulate multiple operations + results = [] + for i in range(5): + operation_result = sum(j for j in range(100 * (i + 1))) + results.append(operation_result) + print( + f' Operation {i+1}: result={operation_result}, time={timer.elapsed_string}' + ) + + timer.stop() + print(f'Total time for all operations: {timer.elapsed_string}') + + # Timer with reset functionality + print(f'\nTimer with reset:') + + timer.reset() + timer.start() + + quick_calc = 2**10 + timer.stop() + print(f'Quick calculation (2^10 = {quick_calc}): {timer.elapsed_string}') + + print() + + +def measure_time_demo() -> None: + """Demonstrate the measure_time context manager. + + Shows how to use measure_time for convenient timing + with automatic logging and labeling. + """ + print('=== Measure Time Demo ===') + + # Basic measure_time usage + print('Measuring time for different operations:') + + with measure_time('List comprehension'): + squares = [x**2 for x in range(1000)] + + with measure_time('Generator expression'): + squares_gen = list(x**2 for x in range(1000)) + + with measure_time('Traditional loop'): + squares_loop = [] + for x in range(1000): + squares_loop.append(x**2) + + # Verify results are the same + print(f'Results match: {squares == squares_gen == squares_loop}') + + # Nested timing + print(f'\nNested timing measurements:') + + with measure_time('Data processing pipeline'): + data = list(range(500)) + + with measure_time(' Data filtering'): + filtered = [x for x in data if x % 2 == 0] + + with measure_time(' Data transformation'): + transformed = [x * 3 + 1 for x in filtered] + + with measure_time(' Data sorting'): + sorted_data = sorted(transformed, reverse=True) + + print(f'Final result size: {len(sorted_data)}') + + # Measuring different algorithms + print(f'\nComparing sorting algorithms:') + + # Generate test data + test_data = [random.randint(1, 1000) for _ in range(500)] + + with measure_time('Built-in sorted()'): + result1 = sorted(test_data.copy()) + + with measure_time('List.sort() method'): + data_copy = test_data.copy() + data_copy.sort() + result2 = data_copy + + print(f'Results identical: {result1 == result2}') + + print() + + +def benchmark_function_demo() -> None: + """Demonstrate function benchmarking capabilities. + + Shows how to benchmark functions with multiple iterations + and statistical analysis of performance. + """ + print('=== Benchmark Function Demo ===') + + # Define test functions + def bubble_sort(arr: List[int]) -> List[int]: + """Simple bubble sort implementation.""" + arr = arr.copy() + n = len(arr) + for i in range(n): + for j in range(0, n - i - 1): + if arr[j] > arr[j + 1]: + arr[j], arr[j + 1] = arr[j + 1], arr[j] + return arr + + def python_sort(arr: List[int]) -> List[int]: + """Python's built-in sort.""" + return sorted(arr) + + def manual_sort(arr: List[int]) -> List[int]: + """Manual implementation using list.sort().""" + arr = arr.copy() + arr.sort() + return arr + + # Test data + small_data = [random.randint(1, 100) for _ in range(50)] + medium_data = [random.randint(1, 1000) for _ in range(200)] + + print('Benchmarking different sorting algorithms:') + print(f'Small dataset ({len(small_data)} elements):') + + # Benchmark each function + algorithms = [ + ('Bubble Sort', bubble_sort), + ('Python sorted()', python_sort), + ('List.sort()', manual_sort), + ] + + small_results = {} + for name, func in algorithms: + stats = benchmark_function( + lambda: func(small_data), iterations=10, name=f'{name} (small)' + ) + small_results[name] = stats + print(f' {name}:') + print(f' Mean: {stats["human_readable"]["mean"]}') + print(f' Min: {stats["human_readable"]["min"]}') + print(f' Max: {stats["human_readable"]["max"]}') + + # Benchmark with larger dataset (skip bubble sort - too slow) + print(f'\nMedium dataset ({len(medium_data)} elements):') + + fast_algorithms = [('Python sorted()', python_sort), ('List.sort()', manual_sort)] + medium_results = {} + + for name, func in fast_algorithms: + stats = benchmark_function( + lambda f=func: f(medium_data), iterations=50, name=f'{name} (medium)' + ) + medium_results[name] = stats + print(f' {name}:') + print(f' Mean: {stats["human_readable"]["mean"]}') + print(f' Std: {stats["human_readable"]["std"]}') + + print() + + +def time_function_demo() -> None: + """Demonstrate single function timing. + + Shows how to time individual function calls for + quick performance measurements. + """ + print('=== Time Function Demo ===') + + # Time different operations + operations = [ + ('List creation', lambda: list(range(1000))), + ('List comprehension', lambda: [x**2 for x in range(1000)]), + ('String operations', lambda: ''.join(str(i) for i in range(100))), + ('Dictionary creation', lambda: {i: i**2 for i in range(500)}), + ('Set operations', lambda: set(range(1000)) & set(range(500, 1500))), + ] + + print('Single function timing:') + + for name, operation in operations: + elapsed = time_function(operation) + print(f' {name}: {elapsed}') + + # Time mathematical operations + print(f'\nMathematical operations:') + + math_ops = [ + ('Sum 1-10000', lambda: sum(range(10000))), + ( + 'Factorial 20', + lambda: eval('1*2*3*4*5*6*7*8*9*10*11*12*13*14*15*16*17*18*19*20'), + ), + ('Power operations', lambda: [2**i for i in range(20)]), + ('Square roots', lambda: [i**0.5 for i in range(1000)]), + ] + + for name, operation in math_ops: + elapsed = time_function(operation) + print(f' {name}: {elapsed}') + + print() + + +def benchmark_class_demo() -> None: + """Demonstrate the Benchmark class for complex benchmarking. + + Shows how to use the Benchmark class for organizing + and managing multiple related performance tests. + """ + print('=== Benchmark Class Demo ===') + + # Create benchmark suite + bench = Benchmark('Data Structure Operations') + + # Test different data structures + test_size = 1000 + test_data = list(range(test_size)) + + print(f'Benchmarking data structure operations ({test_size} elements):') + + # List operations + def list_operations(): + """Benchmark list operations.""" + data = test_data.copy() + data.append(test_size) + data.insert(0, -1) + data.remove(-1) + data.pop() + return len(data) + + list_stats = bench.run(list_operations, iterations=100, name='List operations') + print(f' List operations: {list_stats["human_readable"]["mean"]}') + + # Dictionary operations + def dict_operations(): + """Benchmark dictionary operations.""" + data = {i: i**2 for i in test_data} + data[test_size] = test_size**2 + del data[0] + return len(data) + + dict_stats = bench.run(dict_operations, iterations=100, name='Dict operations') + print(f' Dict operations: {dict_stats["human_readable"]["mean"]}') + + # Set operations + def set_operations(): + """Benchmark set operations.""" + data = set(test_data) + data.add(test_size) + data.discard(0) + return len(data) + + set_stats = bench.run(set_operations, iterations=100, name='Set operations') + print(f' Set operations: {set_stats["human_readable"]["mean"]}') + + # Compare results + print(f'\nPerformance comparison:') + results = [ + ('List', list_stats['time_ns']), + ('Dict', dict_stats['time_ns']), + ('Set', set_stats['time_ns']), + ] + + # Sort by performance + results.sort(key=lambda x: x[1]) + fastest = results[0][1] + + for name, time_ns in results: + ratio = time_ns / fastest + print( + f' {name}: {ratio:.2f}x slower than fastest' + if ratio > 1 + else f' {name}: fastest' + ) + + print() + + +def real_world_benchmarking_demo() -> None: + """Demonstrate real-world benchmarking scenarios. + + Shows practical examples of benchmarking actual algorithms + and data processing tasks that might be found in applications. + """ + print('=== Real-World Benchmarking Demo ===') + + # File processing simulation + print('File processing simulation:') + + # Simulate file content + file_content = [] + for i in range(1000): + line = f'data_{i}:value_{i**2}:timestamp_{i*1000}' + file_content.append(line) + + def csv_like_parsing(): + """Parse CSV-like data.""" + parsed = [] + for line in file_content: + parts = line.split(':') + if len(parts) == 3: + parsed.append( + { + 'data': parts[0], + 'value': int(parts[1].split('_')[1]), + 'timestamp': int(parts[2].split('_')[1]), + } + ) + return len(parsed) + + def regex_parsing(): + """Parse using string operations.""" + import re + + pattern = r'data_(\d+):value_(\d+):timestamp_(\d+)' + parsed = [] + for line in file_content: + match = re.match(pattern, line) + if match: + parsed.append( + { + 'data': int(match.group(1)), + 'value': int(match.group(2)), + 'timestamp': int(match.group(3)), + } + ) + return len(parsed) + + # Benchmark parsing methods + csv_time = time_function(csv_like_parsing) + regex_time = time_function(regex_parsing) + + print(f' CSV-like parsing: {csv_time}') + print(f' Regex parsing: {regex_time}') + + # Data aggregation simulation + print(f'\nData aggregation methods:') + + numbers = [random.randint(1, 100) for _ in range(5000)] + + def groupby_aggregation(): + """Group and aggregate using itertools.""" + from itertools import groupby + + sorted_nums = sorted(numbers) + groups = {} + for key, group in groupby(sorted_nums): + groups[key] = len(list(group)) + return len(groups) + + def dict_aggregation(): + """Aggregate using dictionary.""" + groups = {} + for num in numbers: + groups[num] = groups.get(num, 0) + 1 + return len(groups) + + def counter_aggregation(): + """Aggregate using Counter.""" + from collections import Counter + + groups = Counter(numbers) + return len(groups) + + # Time each approach + groupby_time = time_function(groupby_aggregation) + dict_time = time_function(dict_aggregation) + counter_time = time_function(counter_aggregation) + + print(f' Groupby method: {groupby_time}') + print(f' Dictionary method: {dict_time}') + print(f' Counter method: {counter_time}') + + # Algorithm comparison + print(f'\nAlgorithm optimization:') + + search_data = list(range(10000)) + target = 7500 + + def linear_search(): + """Linear search implementation.""" + for i, value in enumerate(search_data): + if value == target: + return i + return -1 + + def binary_search(): + """Binary search implementation.""" + left, right = 0, len(search_data) - 1 + while left <= right: + mid = (left + right) // 2 + if search_data[mid] == target: + return mid + elif search_data[mid] < target: + left = mid + 1 + else: + right = mid - 1 + return -1 + + def builtin_search(): + """Using built-in index method.""" + try: + return search_data.index(target) + except ValueError: + return -1 + + # Benchmark search algorithms + search_algorithms = [ + ('Linear search', linear_search), + ('Binary search', binary_search), + ('Built-in index', builtin_search), + ] + + print(f'Searching for {target} in {len(search_data)} elements:') + + search_results = {} + for name, func in search_algorithms: + stats = benchmark_function(func, iterations=1000, name=name) + search_results[name] = stats + print(f' {name}: {stats["human_readable"]["mean"]}') + + print() + + +def performance_analysis_demo() -> None: + """Demonstrate performance analysis and profiling. + + Shows how to analyze timing results and identify + performance bottlenecks in code. + """ + print('=== Performance Analysis Demo ===') + + # Analyze performance patterns + print('Performance pattern analysis:') + + def create_test_function(size_factor: int): + """Create a test function with different computational complexity.""" + + def test_func(): + n = 100 * size_factor + return sum(i * j for i in range(n) for j in range(10)) + + return test_func + + # Test with different sizes + size_factors = [1, 2, 3, 4, 5] + timing_results = [] + + for factor in size_factors: + test_func = create_test_function(factor) + stats = benchmark_function(test_func, iterations=10, name=f'Size {factor}') + mean_time = stats['time_ns'] / 1_000_000 # Convert to milliseconds + timing_results.append((factor, mean_time)) + print(f' Size factor {factor}: {stats["human_readable"]["mean"]}') + + # Analyze scaling + print(f'\nScaling analysis:') + for i in range(1, len(timing_results)): + prev_factor, prev_time = timing_results[i - 1] + curr_factor, curr_time = timing_results[i] + + time_ratio = curr_time / prev_time + size_ratio = curr_factor / prev_factor + + print( + f' Factor {prev_factor} -> {curr_factor}: {time_ratio:.2f}x slower ({size_ratio:.1f}x larger)' + ) + + # Memory vs. computation tradeoff + print(f'\nMemory vs. computation tradeoff:') + + def compute_heavy(): + """Computation-heavy approach.""" + total = 0 + for i in range(1000): + total += sum(j**2 for j in range(50)) + return total + + def memory_heavy(): + """Memory-heavy approach.""" + precomputed = [j**2 for j in range(50)] + total = 0 + for i in range(1000): + total += sum(precomputed) + return total + + compute_time = time_function(compute_heavy) + memory_time = time_function(memory_heavy) + + print(f' Compute-heavy approach: {compute_time}') + print(f' Memory-heavy approach: {memory_time}') + + if float(memory_time.split()[0]) < float(compute_time.split()[0]): + print(' Memory optimization wins!') + else: + print(' Computation optimization wins!') + + print() + + +def main() -> int: + """Run all timing examples. + + Returns + ------- + int + Exit code (0 for success, 1 for error) + """ + print('Timing Module Example') + print('====================') + print() + + try: + basic_timer_demo() + measure_time_demo() + benchmark_function_demo() + time_function_demo() + benchmark_class_demo() + real_world_benchmarking_demo() + performance_analysis_demo() + + print('All timing examples completed successfully!') + return 0 + + except Exception as e: + print(f'Error running timing examples: {e}') + import traceback + + traceback.print_exc() + return 1 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/python/src/demo/__init__.py b/python/src/demo/__init__.py new file mode 100644 index 0000000..5030652 --- /dev/null +++ b/python/src/demo/__init__.py @@ -0,0 +1,14 @@ +"""Python wrappers for the C++ demo project.""" + +from cpp_features import __version__ + +from . import algorithms, containers, exceptions, random, shapes, timing + +__all__ = [ + 'algorithms', + 'containers', + 'exceptions', + 'random', + 'shapes', + 'timing', +] diff --git a/python/src/demo/algorithms.py b/python/src/demo/algorithms.py new file mode 100644 index 0000000..a813630 --- /dev/null +++ b/python/src/demo/algorithms.py @@ -0,0 +1,176 @@ +"""Python wrapper for the algorithms module.""" + +from __future__ import annotations + +from collections.abc import Iterable +from typing import TYPE_CHECKING, Any, Callable, TypeVar + +if TYPE_CHECKING: + from _typeshed import SupportsRichComparisonT + +import cpp_features.algorithms as _algorithms + +from .containers import Container + +T = TypeVar('T') +U = TypeVar('U') + + +def sort_inplace(data: list[T] | Container[T]) -> None: + """Sort a container in-place. + + Sorts the elements in the container in ascending order. + + Parameters + ---------- + data : list[T] | Container[T] + The container to sort in-place + + Examples + -------- + >>> sort_inplace([3, 1, 4, 1, 5, 9, 2, 6, 5, 3]) + [1, 1, 2, 3, 3, 4, 5, 5, 6, 9] + >>> sort_inplace(Container(str, ['cherry', 'banana', 'elderberry', 'date', 'apple'])) + ['apple', 'banana', 'cherry', 'date', 'elderberry'] + """ + match data: + case list(): + _algorithms.sort(data) + case Container(): + _algorithms.sort(data._container) + + +def count_if(data: Iterable[T], predicate: Callable[[T], bool]) -> int: + """Count elements in a sequence that satisfy a predicate. + + Counts the number of elements in the sequence for which the predicate + returns true. + + Parameters + ---------- + data : Iterable[T] + The input sequence to examine + predicate : Callable[[T], bool] + The predicate function to test each element + + Returns + ------- + int + The number of elements that satisfy the predicate + + Examples + -------- + >>> count_if([1, 2, 3, 4, 5], lambda x: x % 2 == 1) + 3 + >>> count_if(Container(str, ['apple', 'banana', 'cherry']), lambda x: len(x) > 5) + 2 + """ + match data: + case list() | tuple(): + return _algorithms.count_if(list(data), predicate) + case Container(): + return _algorithms.count_if(data._container, predicate) + case _: + return sum(1 for item in data if predicate(item)) + + +def transform_to_list(data: Iterable[T], func: Callable[[T], U]) -> list[U]: + """Transform a sequence into a list using a transformation function. + + Applies the transformation function to each element in the input sequence + and collects the results in a new list. + + Parameters + ---------- + data : Iterable[T] + The input sequence to transform + func : Callable[[T], U] + The transformation function to apply to each element + + Returns + ------- + list[U] + A list containing the transformed elements + + Examples + -------- + >>> transform_to_list([1, 2, 3, 4, 5], lambda x: x * 2) + [2, 4, 6, 8, 10] + >>> transform_to_list(Container(str, ['apple', 'banana', 'cherry']), lambda x: x.upper()) + ['APPLE', 'BANANA', 'CHERRY'] + """ + match data: + case list() | tuple(): + return _algorithms.transform_to_list(list(data), func) + case Container(): + return _algorithms.transform_to_list(data._container, func) + case _: + return [func(item) for item in data] + + +def find_min_max( + data: Iterable[SupportsRichComparisonT], +) -> tuple[SupportsRichComparisonT, SupportsRichComparisonT]: + """Find the minimum and maximum elements in a sequence. + + Parameters + ---------- + data : Iterable[SupportsRichComparisonT] + The input sequence to search for minimum and maximum elements + + Returns + ------- + tuple[SupportsRichComparisonT, SupportsRichComparisonT] + A tuple containing the minimum and maximum elements (min, max) + """ + match data: + case list() | tuple(): + return _algorithms.find_min_max(list(data)) + case Container(): + return _algorithms.find_min_max(data._container) + case _: + return min(data), max(data) + + +def pipeline(*functions: Callable[[Any], Any]) -> Callable[[Any], Any]: + """Create a function pipeline. + + Creates a function that applies a sequence of functions in a pipeline. + + Parameters + ---------- + *functions : Callable[[Any], Any] + The functions to compose in pipeline order + + Returns + ------- + Callable[[Any], Any] + A composed function that applies all functions in sequence + + Examples + -------- + >>> process = pipeline( + ... lambda data: [x * 2 for x in data], + ... lambda data: [x for x in data if x > 5], + ... sum, + ... ) + >>> process([1, 2, 3, 4, 5]) + 24 + """ + + def composed(data: Any) -> Any: + result = data + for f in functions: + result = f(result) + return result + + return composed + + +__all__ = [ + 'sort_inplace', + 'count_if', + 'transform_to_list', + 'find_min_max', + 'pipeline', +] diff --git a/python/src/demo/containers.py b/python/src/demo/containers.py new file mode 100644 index 0000000..18f3d1a --- /dev/null +++ b/python/src/demo/containers.py @@ -0,0 +1,249 @@ +"""Python wrapper for the containers module.""" + +import builtins +from collections.abc import Iterable +from typing import Any, Callable, Generic, Iterator, TypeVar + +import cpp_features.containers as _containers + +T = TypeVar('T') +U = TypeVar('U') + + +class Container(Generic[T]): + """A generic container wrapper with enhanced functionality.""" + + def __init__( + self, container_type: type[T], data: Iterable[T] | None = None + ) -> None: + """Initialize container with specific type. + + Parameters + ---------- + container_type : type[T] + The type of elements stored in the container + data : Iterable[T], optional + Initial data to populate the container + + Examples + -------- + >>> Container(int, [1, 2, 3]) + + >>> Container(float, [1.1, 2.2, 3.3]) + + >>> Container(str, ['a', 'b', 'c']) + + """ + self._type = container_type + + match container_type: + case builtins.int: + cls = _containers.IntContainer + case builtins.float: + cls = _containers.FloatContainer + case builtins.str: + cls = _containers.StringContainer + case _: + raise ValueError(f'Unsupported container type: {container_type}') + + self._container = cls(list(data)) if data else cls() + + def add(self, item: T) -> None: + """Add an element to the container. + + Adds a copy of the specified element to the end of the container. + + Parameters + ---------- + item : T + The element to add + + Examples + -------- + >>> container = Container(int, [1, 2, 3]) + >>> container.add(4) + >>> list(container) + [1, 2, 3, 4] + """ + self._container.add(item) + + def remove(self, item: T) -> int: + """Remove all occurrences of a specific item. + + Removes all elements that compare equal to the specified item. + The container size is reduced by the number of removed elements. + + Parameters + ---------- + item : T + The item to remove from the container + + Returns + ------- + int + The number of elements that were removed + + Examples + -------- + >>> container = Container(int, [1, 2, 3, 2, 4, 2]) + >>> container.remove(2) + 3 + >>> list(container) + [1, 3, 4] + """ + return self._container.remove(item) + + def __len__(self) -> int: + """Get the number of elements in the container. + + Returns + ------- + int + The number of elements currently stored in the container + + Examples + -------- + >>> container = Container(int, [1, 2, 3]) + >>> len(container) + 3 + """ + return len(self._container) + + def __bool__(self) -> bool: + """Check if the container is not empty. + + Returns + ------- + bool + True if the container contains elements, False otherwise + + Examples + -------- + >>> container = Container(int, [1, 2, 3]) + >>> bool(container) + True + >>> container = Container(int, []) + >>> bool(container) + False + """ + return bool(self._container) + + def __iter__(self) -> Iterator[T]: + """Iterate over the container. + + Iterates over the elements in the container in the order they were added. + + Returns + ------- + Iterator[T] + Iterator over container elements + + Examples + -------- + >>> container = Container(int, [1, 2, 3]) + >>> for item in container: + ... print(item) + 1 + 2 + 3 + """ + return iter(self._container) + + def __getitem__(self, index: int) -> T: + """Access the element at specified index. + + Returns the element at the specified index. + + Parameters + ---------- + index : int + The index of the element to access + + Returns + ------- + T + The element at the specified index + + Raises + ------ + IndexError + If the index is out of bounds + + Examples + -------- + >>> container = Container(int, [1, 2, 3]) + >>> container[1] + 2 + """ + return self._container[index] + + def filter(self, predicate: Callable[[T], bool]) -> list[T]: + """Filter container elements. + + Returns a list of elements that satisfy the predicate. + + Parameters + ---------- + predicate : Callable[[T], bool] + The predicate function that returns true for elements to include. + + Returns + ------- + list[T] + A list of elements that satisfy the predicate + + Examples + -------- + >>> container = Container(int, [1, 2, 3, 4, 5]) + >>> container.filter(lambda x: x % 2 == 0) + [2, 4] + """ + return self._container.filter(predicate) + + def transform(self, func: Callable[[T], U]) -> list[U]: + """Transform container elements. + + Returns a list of elements that have been transformed by the provided function. + + Parameters + ---------- + func : Callable[[T], U] + The function to apply to each element + + Returns + ------- + list[U] + A list of transformed elements + + Examples + -------- + >>> container = Container(int, [1, 2, 3, 4, 5]) + >>> container.transform(lambda x: x * 2) + [2, 4, 6, 8, 10] + """ + return self._container.transform(func) + + def __str__(self) -> str: + """String representation. + + Returns + ------- + str + String representation of the container + """ + return str(self._container) + + def __repr__(self) -> str: + """String representation (for debugging). + + Returns + ------- + str + String representation of the container + """ + return repr(self._container) + + +__all__ = [ + 'Container', +] diff --git a/python/src/demo/exceptions.py b/python/src/demo/exceptions.py new file mode 100644 index 0000000..3f06a6e --- /dev/null +++ b/python/src/demo/exceptions.py @@ -0,0 +1,141 @@ +"""Modern Python wrapper for the exceptions module. + +Enhanced error handling with Result types and custom exceptions. +""" + +from enum import Enum +from typing import Any, Callable, Generic, TypeVar + +T = TypeVar('T') +U = TypeVar('U') + + +class ErrorSeverity(Enum): + TRACE = 0 + DEBUG = 1 + INFO = 2 + WARNING = 3 + ERROR = 4 + FATAL = 5 + + +class _PyResult(Generic[T]): + def __init__(self, value: T | None = None, error: Exception | None = None): + self._value = value + self._error = error + + def has_value(self) -> bool: + return self._error is None + + def get_value(self) -> T: + if self._error is not None: + raise self._error + return self._value # type: ignore[return-value] + + def get_exception(self) -> Exception: + if self._error is None: + raise RuntimeError('No exception in successful result') + return self._error + + def __str__(self) -> str: + return f'Ok({self._value})' if self.has_value() else f'Err({self._error})' + + +class Result(Generic[T]): + def __init__(self, inner: _PyResult[T]): + self._inner = inner + + @property + def is_ok(self) -> bool: + return self._inner.has_value() + + @property + def is_err(self) -> bool: + return not self.is_ok + + def unwrap(self) -> T: + return self._inner.get_value() + + def unwrap_or(self, default: T) -> T: + return self._inner.get_value() if self.is_ok else default + + def unwrap_or_else(self, func: Callable[[], T]) -> T: + return self._inner.get_value() if self.is_ok else func() + + def map(self, func: Callable[[T], U]) -> 'Result[U]': + if self.is_ok: + try: + return Result.ok(func(self.unwrap())) + except Exception as e: + return Result.error(str(e)) + return Result(_PyResult(error=self._inner.get_exception())) + + def and_then(self, func: Callable[[T], 'Result[U]']) -> 'Result[U]': + if self.is_ok: + return func(self.unwrap()) + return Result(_PyResult(error=self._inner.get_exception())) + + def or_else(self, func: Callable[[Exception], 'Result[T]']) -> 'Result[T]': + if self.is_err: + return func(self._inner.get_exception()) + return self + + @staticmethod + def ok(value: T) -> 'Result[T]': + return Result(_PyResult(value=value)) + + @staticmethod + def error( + message: str, severity: ErrorSeverity = ErrorSeverity.ERROR + ) -> 'Result[Any]': + # Attach severity information via a simple Exception subclass + class _Error(Exception): + def __init__(self, msg: str, sev: ErrorSeverity): + super().__init__(msg) + self.severity = sev + + return Result(_PyResult(error=_Error(message, severity))) + + def __bool__(self) -> bool: + return self.is_ok + + def __str__(self) -> str: + return str(self._inner) + + def __repr__(self) -> str: + return f'Result({self._inner})' + + +def safe_divide(a: float, b: float) -> Result[float]: + if b == 0.0: + return Result.error('Division by zero') + return Result.ok(a / b) + + +def safe_sqrt(x: float) -> Result[float]: + if x < 0: + return Result.error('Square root of negative number') + return Result.ok(x**0.5) + + +def chain_operations( + *operations: Callable[[Any], Result[Any]] +) -> Callable[[Any], Result[Any]]: + def chained(initial_value: Any) -> Result[Any]: + result = Result.ok(initial_value) + for operation in operations: + if result.is_err: + break + result = result.and_then(operation) + return result + + return chained + + +__all__ = [ + 'ErrorSeverity', + 'Result', + 'safe_divide', + 'safe_sqrt', + 'chain_operations', +] diff --git a/python/src/demo/random.py b/python/src/demo/random.py new file mode 100644 index 0000000..1865375 --- /dev/null +++ b/python/src/demo/random.py @@ -0,0 +1,507 @@ +"""Modern Python wrapper for the random module. + +Type-safe random number generation with enhanced Python integration. +""" + +import random as _random +from typing import Generic, TypeVar + +import cpp_features.random as _random_cpp + +T = TypeVar('T') + + +class RandomGenerator: + """Enhanced random generator with additional Python functionality. + + Parameters + ---------- + seed : int, optional + Seed value for reproducible results + """ + + def __init__(self, seed: int | None = None): + if seed is not None: + self._generator = _random_cpp.RandomGenerator(seed) + else: + self._generator = _random_cpp.RandomGenerator() + + def randint(self, min_val: int, max_val: int) -> int: + """Generate random integer in range [min_val, max_val]. + + Parameters + ---------- + min_val : int + Minimum value (inclusive) + max_val : int + Maximum value (inclusive) + + Returns + ------- + int + Random integer in the specified range + """ + return self._generator.generate_int(min_val, max_val) + + def randlong(self, min_val: int, max_val: int) -> int: + """Generate random long integer in range [min_val, max_val]. + + Parameters + ---------- + min_val : int + Minimum value (inclusive) + max_val : int + Maximum value (inclusive) + + Returns + ------- + int + Random long integer in the specified range + """ + # Use 64-bit integer generation via bound template + return self._generator.generate_int(min_val, max_val) + + def random(self) -> float: + """Generate random float in range [0.0, 1.0). + + Returns + ------- + float + Random float between 0.0 and 1.0 + """ + return self._generator.generate_real(0.0, 1.0) + + def uniform(self, min_val: float, max_val: float) -> float: + """Generate random float in range [min_val, max_val). + + Parameters + ---------- + min_val : float + Minimum value (inclusive) + max_val : float + Maximum value (exclusive) + + Returns + ------- + float + Random float in the specified range + """ + return self._generator.generate_real(min_val, max_val) + + def randfloat(self, min_val: float, max_val: float) -> float: + """Generate random float32 value. + + Parameters + ---------- + min_val : float + Minimum value (inclusive) + max_val : float + Maximum value (exclusive) + + Returns + ------- + float + Random float32 in the specified range + """ + # No dedicated float32; reuse double precision + return float(self._generator.generate_real(min_val, max_val)) + + def choice(self, probability: float = 0.5) -> bool: + """Generate random boolean with given probability. + + Parameters + ---------- + probability : float, default=0.5 + Probability of returning True + + Returns + ------- + bool + Random boolean value + """ + return self._generator.generate_bool(probability) + + def normal(self, mean: float = 0.0, stddev: float = 1.0) -> float: + """Generate random value from normal distribution. + + Parameters + ---------- + mean : float, default=0.0 + Mean of the distribution + stddev : float, default=1.0 + Standard deviation of the distribution + + Returns + ------- + float + Random value from normal distribution + """ + return self._generator.generate_normal(mean, stddev) + + def normal_float(self, mean: float = 0.0, stddev: float = 1.0) -> float: + """Generate random float32 from normal distribution. + + Parameters + ---------- + mean : float, default=0.0 + Mean of the distribution + stddev : float, default=1.0 + Standard deviation of the distribution + + Returns + ------- + float + Random float32 from normal distribution + """ + # No dedicated float32 normal; reuse double precision + return float(self._generator.generate_normal(mean, stddev)) + + def integers(self, min_val: int, max_val: int, count: int) -> list[int]: + """Generate list of random integers. + + Parameters + ---------- + min_val : int + Minimum value (inclusive) + max_val : int + Maximum value (inclusive) + count : int + Number of integers to generate + + Returns + ------- + list[int] + List of random integers + """ + return self._generator.generate_int_list(min_val, max_val, count) + + def floats(self, min_val: float, max_val: float, count: int) -> list[float]: + """Generate list of random floats. + + Parameters + ---------- + min_val : float + Minimum value (inclusive) + max_val : float + Maximum value (exclusive) + count : int + Number of floats to generate + + Returns + ------- + list[float] + List of random floats + """ + return self._generator.generate_real_list(min_val, max_val, count) + + def seed(self, seed: int) -> None: + """Set seed for reproducible results. + + Parameters + ---------- + seed : int + Seed value for the generator + """ + self._generator.seed(seed) + + def seed_with_time(self) -> None: + """Seed with current time for non-deterministic results.""" + self._generator.seed_with_time() + + +def shuffle(container: list[T]) -> None: + """Shuffle container in-place. + + Parameters + ---------- + container : list[T] + List to shuffle in place + """ + match container: + case list() if all(isinstance(x, int) for x in container): + _random_cpp.shuffle_container(container) + case list() if all(isinstance(x, float) for x in container): + _random_cpp.shuffle_container(container) + case list() if all(isinstance(x, str) for x in container): + _random_cpp.shuffle_container(container) + case _: + _random.shuffle(container) + + +def sample(population: list[T], k: int) -> list[T]: + """Sample k elements from population without replacement. + + Parameters + ---------- + population : list[T] + Population to sample from + k : int + Number of elements to sample + + Returns + ------- + list[T] + Sampled elements + """ + if k > len(population): + raise ValueError('Sample size cannot exceed population size') + match population: + case list() if all(isinstance(x, int) for x in population): + return _random_cpp.sample_from_range(population, k) + case list() if all(isinstance(x, float) for x in population): + return _random_cpp.sample_from_range(population, k) + case list() if all(isinstance(x, str) for x in population): + return _random_cpp.sample_from_range(population, k) + case _: + return _random.sample(population, k) + + +def sample_string(text: str, k: int) -> list[str]: + """Sample k characters from string. + + Parameters + ---------- + text : str + String to sample from + k : int + Number of characters to sample + + Returns + ------- + list[str] + List of sampled characters + """ + return _random_cpp.sample_from_range(text, k) + + +class Distribution(Generic[T]): + """Base class for probability distributions. + + Parameters + ---------- + generator : RandomGenerator + Random generator to use for sampling + """ + + def __init__(self, generator: RandomGenerator): + self.generator = generator + + def sample(self) -> T: + """Generate a single sample. + + Returns + ------- + T + Single sample from the distribution + """ + raise NotImplementedError + + def samples(self, count: int) -> list[T]: + """Generate multiple samples. + + Parameters + ---------- + count : int + Number of samples to generate + + Returns + ------- + list[T] + List of samples from the distribution + """ + return [self.sample() for _ in range(count)] + + +class UniformInt(Distribution[int]): + """Uniform integer distribution. + + Parameters + ---------- + generator : RandomGenerator + Random generator to use + min_val : int + Minimum value (inclusive) + max_val : int + Maximum value (inclusive) + """ + + def __init__(self, generator: RandomGenerator, min_val: int, max_val: int): + super().__init__(generator) + self.min_val = min_val + self.max_val = max_val + + def sample(self) -> int: + """Generate random integer. + + Returns + ------- + int + Random integer from uniform distribution + """ + return self.generator.randint(self.min_val, self.max_val) + + +class UniformFloat(Distribution[float]): + """Uniform float distribution. + + Parameters + ---------- + generator : RandomGenerator + Random generator to use + min_val : float + Minimum value (inclusive) + max_val : float + Maximum value (exclusive) + """ + + def __init__(self, generator: RandomGenerator, min_val: float, max_val: float): + super().__init__(generator) + self.min_val = min_val + self.max_val = max_val + + def sample(self) -> float: + """Generate random float. + + Returns + ------- + float + Random float from uniform distribution + """ + return self.generator.uniform(self.min_val, self.max_val) + + +class Normal(Distribution[float]): + """Normal (Gaussian) distribution. + + Parameters + ---------- + generator : RandomGenerator + Random generator to use + mean : float, default=0.0 + Mean of the distribution + stddev : float, default=1.0 + Standard deviation of the distribution + """ + + def __init__( + self, generator: RandomGenerator, mean: float = 0.0, stddev: float = 1.0 + ): + super().__init__(generator) + self.mean = mean + self.stddev = stddev + + def sample(self) -> float: + """Generate random value from normal distribution. + + Returns + ------- + float + Random value from normal distribution + """ + return self.generator.normal(self.mean, self.stddev) + + +# Convenience functions +_default_generator = RandomGenerator() + + +def randint(min_val: int, max_val: int) -> int: + """Generate random integer using default generator. + + Parameters + ---------- + min_val : int + Minimum value (inclusive) + max_val : int + Maximum value (inclusive) + + Returns + ------- + int + Random integer + """ + return _default_generator.randint(min_val, max_val) + + +def random() -> float: + """Generate random float using default generator. + + Returns + ------- + float + Random float between 0.0 and 1.0 + """ + return _default_generator.random() + + +def uniform(min_val: float, max_val: float) -> float: + """Generate random float in range using default generator. + + Parameters + ---------- + min_val : float + Minimum value (inclusive) + max_val : float + Maximum value (exclusive) + + Returns + ------- + float + Random float in range + """ + return _default_generator.uniform(min_val, max_val) + + +def choice(probability: float = 0.5) -> bool: + """Generate random boolean using default generator. + + Parameters + ---------- + probability : float, default=0.5 + Probability of returning True + + Returns + ------- + bool + Random boolean value + """ + return _default_generator.choice(probability) + + +def normal(mean: float = 0.0, stddev: float = 1.0) -> float: + """Generate normal random value using default generator. + + Parameters + ---------- + mean : float, default=0.0 + Mean of the distribution + stddev : float, default=1.0 + Standard deviation of the distribution + + Returns + ------- + float + Random value from normal distribution + """ + return _default_generator.normal(mean, stddev) + + +# Re-export C++ class +CppRandomGenerator = _random_cpp.RandomGenerator + +__all__ = [ + 'RandomGenerator', + 'Distribution', + 'UniformInt', + 'UniformFloat', + 'Normal', + 'CppRandomGenerator', + 'shuffle', + 'sample', + 'sample_string', + 'randint', + 'random', + 'uniform', + 'choice', + 'normal', +] diff --git a/python/src/demo/shapes.py b/python/src/demo/shapes.py new file mode 100644 index 0000000..29f079e --- /dev/null +++ b/python/src/demo/shapes.py @@ -0,0 +1,183 @@ +"""Modern Python wrapper for the shapes module. + +Demonstrates Python 3.13 features like pattern matching, enhanced error handling, +and modern type hints. +""" + +from dataclasses import dataclass +from enum import Enum, auto +from typing import Any, Protocol + +import cpp_features.shapes as _shapes + + +class ShapeProtocol(Protocol): + """Protocol defining the shape interface.""" + + def get_area(self) -> float: + """Get the area of the shape.""" + ... + + def get_perimeter(self) -> float: + """Get the perimeter of the shape.""" + ... + + def draw(self) -> None: + """Draw the shape.""" + ... + + def get_name(self) -> str: + """Get the name of the shape.""" + ... + + +class ShapeType(Enum): + """Enumeration of available shape types.""" + + CIRCLE = auto() + RECTANGLE = auto() + SQUARE = auto() + + +@dataclass(frozen=True) +class ShapeMetrics: + """Immutable data class for shape measurements. + + Parameters + ---------- + area : float + The area of the shape + perimeter : float + The perimeter of the shape + name : str + The name of the shape + """ + + area: float + perimeter: float + name: str + + @property + def aspect_ratio(self) -> float: + """Calculate aspect ratio for applicable shapes. + + Returns + ------- + float + The aspect ratio (area / perimeter^2) + """ + return self.area / (self.perimeter**2) if self.perimeter > 0 else 0.0 + + +def create_shape(shape_type: str | ShapeType, *args: float) -> _shapes.Shape: + """Factory function using modern Python 3.13 pattern matching. + + Parameters + ---------- + shape_type : str or ShapeType + Type of shape to create + *args : float + Arguments for shape construction + + Returns + ------- + Shape + Created shape instance + + Raises + ------ + ValueError + For invalid shape types or arguments + """ + match shape_type: + case ShapeType.CIRCLE | 'circle': + if len(args) != 1: + raise ValueError('Circle requires exactly 1 argument (radius)') + return _shapes.Circle(args[0]) + + case ShapeType.RECTANGLE | 'rectangle': + if len(args) != 2: + raise ValueError( + 'Rectangle requires exactly 2 arguments (width, height)' + ) + return _shapes.Rectangle(args[0], args[1]) + + case ShapeType.SQUARE | 'square': + if len(args) != 1: + raise ValueError('Square requires exactly 1 argument (side)') + return _shapes.Rectangle(args[0]) + + case _: + raise ValueError(f'Unknown shape type: {shape_type}') + + +def analyze_shape(shape: ShapeProtocol) -> ShapeMetrics: + """Analyze a shape and return comprehensive metrics. + + Parameters + ---------- + shape : ShapeProtocol + The shape to analyze + + Returns + ------- + ShapeMetrics + Comprehensive metrics for the shape + """ + return ShapeMetrics( + area=shape.get_area(), perimeter=shape.get_perimeter(), name=shape.get_name() + ) + + +def compare_shapes(*shapes: ShapeProtocol) -> dict[str, Any]: + """Compare multiple shapes using modern Python features. + + Parameters + ---------- + *shapes : ShapeProtocol + Variable number of shapes to compare + + Returns + ------- + dict[str, Any] + Dictionary with comparison results and statistics + + Raises + ------ + ValueError + If no shapes are provided + """ + if not shapes: + raise ValueError('At least one shape is required') + + metrics = [analyze_shape(shape) for shape in shapes] + + return { + 'count': len(shapes), + 'total_area': sum(m.area for m in metrics), + 'total_perimeter': sum(m.perimeter for m in metrics), + 'largest_by_area': max(metrics, key=lambda m: m.area), + 'smallest_by_area': min(metrics, key=lambda m: m.area), + 'average_area': sum(m.area for m in metrics) / len(metrics), + 'metrics': metrics, + } + + +# Re-export the C++ classes for direct use +Circle = _shapes.Circle +Rectangle = _shapes.Rectangle +RectangleDimensions = _shapes.RectangleDimensions +Shape = _shapes.Shape + +__all__ = [ + 'Shape', + 'Circle', + 'Rectangle', + 'RectangleDimensions', + 'ShapeProtocol', + 'ShapeType', + 'ShapeMetrics', + 'create_shape', + 'analyze_shape', + 'compare_shapes', +] diff --git a/python/src/demo/timing.py b/python/src/demo/timing.py new file mode 100644 index 0000000..37cdf11 --- /dev/null +++ b/python/src/demo/timing.py @@ -0,0 +1,307 @@ +"""Modern Python wrapper for the timing module. + +High-resolution timing and benchmarking utilities. +""" + +import statistics +from contextlib import contextmanager +from typing import Any, Callable, ContextManager + +import cpp_features.timing as _timing + + +class Timer: + """Enhanced timer with additional Python functionality.""" + + def __init__(self): + self._timer = _timing.Timer() + + def start(self) -> None: + """Start the timer.""" + self._timer.start() + + def stop(self) -> None: + """Stop the timer.""" + self._timer.stop() + + def reset(self) -> None: + """Reset the timer.""" + self._timer.reset() + + @property + def elapsed_ns(self) -> int: + """Get elapsed time in nanoseconds. + + Returns + ------- + int + Elapsed time in nanoseconds + """ + return self._timer.get_elapsed_ns() + + @property + def elapsed_us(self) -> int: + """Get elapsed time in microseconds. + + Returns + ------- + int + Elapsed time in microseconds + """ + return self._timer.get_elapsed_us() + + @property + def elapsed_ms(self) -> int: + """Get elapsed time in milliseconds. + + Returns + ------- + int + Elapsed time in milliseconds + """ + return self._timer.get_elapsed_ms() + + @property + def elapsed_s(self) -> int: + """Get elapsed time in seconds. + + Returns + ------- + int + Elapsed time in seconds + """ + return self._timer.get_elapsed_s() + + @property + def elapsed_string(self) -> str: + """Get human-readable elapsed time. + + Returns + ------- + str + Human-readable elapsed time string + """ + return self._timer.get_elapsed_string() + + def __str__(self) -> str: + """String representation. + + Returns + ------- + str + Human-readable elapsed time + """ + return self.elapsed_string + + def __enter__(self) -> 'Timer': + """Context manager entry. + + Returns + ------- + Timer + This timer instance + """ + self.start() + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + """Context manager exit. + + Parameters + ---------- + exc_type : Any + Exception type (if any) + exc_val : Any + Exception value (if any) + exc_tb : Any + Exception traceback (if any) + """ + self.stop() + + +@contextmanager +def measure_time(name: str | None = None) -> ContextManager[Timer]: + """Context manager for measuring execution time. + + Parameters + ---------- + name : str, optional + Name to display during timing + + Yields + ------ + Timer + Timer instance for the measurement + + Examples + -------- + >>> with measure_time('database query') as timer: + ... # Do work + ... pass + >>> print(f'Elapsed: {timer.elapsed_string}') + """ + timer = Timer() + if name: + print(f'Starting: {name}') + try: + yield timer + finally: + if name: + print(f'Finished {name}: {timer.elapsed_string}') + + +class Benchmark: + """Benchmarking utilities with statistical analysis. + + Parameters + ---------- + name : str, default='Benchmark' + Name of the benchmark + """ + + def __init__(self, name: str = 'Benchmark'): + self.name = name + self.measurements = [] + + def run(self, func: Callable[[], Any], iterations: int = 10) -> dict[str, Any]: + """Run benchmark for specified iterations. + + Parameters + ---------- + func : Callable[[], Any] + Function to benchmark + iterations : int, default=10 + Number of iterations to run + + Returns + ------- + dict[str, Any] + Statistical summary of measurements + """ + self.measurements.clear() + + for _ in range(iterations): + elapsed_ns = _timing.time_function(func) + self.measurements.append(elapsed_ns) + + return self.get_statistics() + + def get_statistics(self) -> dict[str, Any]: + """Get statistical summary of measurements. + + Returns + ------- + dict[str, Any] + Dictionary containing statistical measures + """ + if not self.measurements: + return {} + + measurements_ms = [ns / 1_000_000 for ns in self.measurements] + + return { + 'name': self.name, + 'iterations': len(self.measurements), + 'total_time_ms': sum(measurements_ms), + 'mean_ms': statistics.mean(measurements_ms), + 'median_ms': statistics.median(measurements_ms), + 'min_ms': min(measurements_ms), + 'max_ms': max(measurements_ms), + 'stdev_ms': ( + statistics.stdev(measurements_ms) if len(measurements_ms) > 1 else 0.0 + ), + 'measurements_ns': self.measurements.copy(), + 'human_readable': { + 'mean': _timing.to_human_readable( + int(statistics.mean(self.measurements)) + ), + 'min': _timing.to_human_readable(min(self.measurements)), + 'max': _timing.to_human_readable(max(self.measurements)), + }, + } + + def compare_with(self, other: 'Benchmark') -> dict[str, Any]: + """Compare this benchmark with another. + + Parameters + ---------- + other : Benchmark + Other benchmark to compare with + + Returns + ------- + dict[str, Any] + Comparison results + + Raises + ------ + ValueError + If either benchmark has no measurements + """ + if not self.measurements or not other.measurements: + raise ValueError('Both benchmarks must have measurements') + + self_mean = statistics.mean(self.measurements) + other_mean = statistics.mean(other.measurements) + + return { + 'benchmarks': [self.name, other.name], + 'ratio': self_mean / other_mean, + 'faster': other.name if self_mean > other_mean else self.name, + 'speedup': max(self_mean, other_mean) / min(self_mean, other_mean), + 'difference_ms': abs(self_mean - other_mean) / 1_000_000, + } + + +def benchmark_function( + func: Callable[[], Any], iterations: int = 10, name: str | None = None +) -> dict[str, Any]: + """Benchmark a function and return statistics. + + Parameters + ---------- + func : Callable[[], Any] + Function to benchmark + iterations : int, default=10 + Number of iterations to run + name : str, optional + Name for the benchmark + + Returns + ------- + dict[str, Any] + Statistical summary of the benchmark + + Examples + -------- + >>> stats = benchmark_function(lambda: expensive_operation(), iterations=5) + >>> print(f"Mean time: {stats['human_readable']['mean']}") + """ + bench = Benchmark(name or func.__name__) + return bench.run(func, iterations) + + +def time_function(func: Callable[[], Any]) -> str: + """Time a single function execution. + + Parameters + ---------- + func : Callable[[], Any] + Function to time + + Returns + ------- + str + Human-readable elapsed time + """ + elapsed_ns = _timing.time_function(func) + return _timing.to_human_readable(elapsed_ns) + + +__all__ = [ + 'Timer', + 'Benchmark', + 'measure_time', + 'benchmark_function', + 'time_function', +] diff --git a/python/tests/test_algorithms.py b/python/tests/test_algorithms.py new file mode 100644 index 0000000..5914209 --- /dev/null +++ b/python/tests/test_algorithms.py @@ -0,0 +1,332 @@ +"""Tests for the algorithms module.""" + +from demo.algorithms import ( + count_if, + find_min_max, + pipeline, + sort_inplace, + transform_to_list, +) +from demo.containers import Container + + +class TestSortInplace: + """Test in-place sorting functionality.""" + + def test_sort_empty(self) -> None: + """Test sorting empty list.""" + data: list[int] = [] + sort_inplace(data) + assert data == [] + + def test_sort_single_element(self) -> None: + """Test sorting single element list.""" + data = [42] + sort_inplace(data) + assert data == [42] + + def test_sort_integers(self) -> None: + """Test sorting integers in ascending order.""" + data = [42, 17, 89, 3, 56, 23, 78, 12, 95, 34] + sort_inplace(data) + assert data == [3, 12, 17, 23, 34, 42, 56, 78, 89, 95] + + def test_sort_strings(self) -> None: + """Test sorting strings lexicographically.""" + data = ['cherry', 'banana', 'elderberry', 'date', 'apple'] + sort_inplace(data) + assert data == ['apple', 'banana', 'cherry', 'date', 'elderberry'] + + def test_sort_already_sorted(self) -> None: + """Test sorting already sorted list.""" + data = [1, 2, 3, 4, 5] + sort_inplace(data) + assert data == [1, 2, 3, 4, 5] + + def test_sort_reverse_sorted(self) -> None: + """Test sorting reverse sorted list.""" + data = [5, 4, 3, 2, 1] + sort_inplace(data) + assert data == [1, 2, 3, 4, 5] + + def test_sort_with_duplicates(self) -> None: + """Test sorting list with duplicates.""" + data = [3, 1, 4, 1, 5, 9, 2, 6, 5, 3] + sort_inplace(data) + assert data == [1, 1, 2, 3, 3, 4, 5, 5, 6, 9] + + def test_sort_container(self) -> None: + """Test sorting container.""" + data = Container(int, [3, 1, 4, 1, 5, 9, 2, 6, 5, 3]) + sort_inplace(data) + assert list(data) == [1, 1, 2, 3, 3, 4, 5, 5, 6, 9] + + +class TestCountIf: + """Test counting with predicates.""" + + def test_count_empty(self) -> None: + """Test counting in empty list.""" + data: list[int] = [] + result = count_if(data, lambda x: x > 0) + assert result == 0 + + def test_count_even_numbers(self) -> None: + """Test counting even numbers.""" + data = [1, 2, 3, 4, 5, 6, 7, 8, 9] + even_count = count_if(data, lambda x: x % 2 == 0) + assert even_count == 4 + + def test_count_odd_numbers(self) -> None: + """Test counting odd numbers.""" + data = [1, 2, 3, 4, 5, 6, 7, 8, 9] + odd_count = count_if(data, lambda x: x % 2 == 1) + assert odd_count == 5 + + def test_count_greater_than_threshold(self) -> None: + """Test counting numbers greater than threshold.""" + data = [10, 25, 30, 45, 50, 75, 80, 95] + large_count = count_if(data, lambda x: x > 50) + assert large_count == 3 + + def test_count_no_matches(self) -> None: + """Test counting with no matches.""" + data = [1, 3, 5, 7, 9] + even_count = count_if(data, lambda x: x % 2 == 0) + assert even_count == 0 + + def test_count_all_matches(self) -> None: + """Test counting with all matches.""" + data = [2, 4, 6, 8, 10] + even_count = count_if(data, lambda x: x % 2 == 0) + assert even_count == 5 + + def test_count_strings_by_length(self) -> None: + """Test counting strings by length.""" + data = ['a', 'hello', 'world', 'test', 'algorithm'] + long_words = count_if(data, lambda word: len(word) > 4) + assert long_words == 3 + + def test_count_container(self) -> None: + """Test counting in container.""" + container = Container(int, [1, 2, 3, 4, 5, 6, 7, 8, 9]) + even_count = count_if(container, lambda x: x % 2 == 0) + assert even_count == 4 + + def test_count_tuple(self) -> None: + """Test counting in tuple.""" + data = (1, 2, 3, 4, 5, 6, 7, 8, 9) + even_count = count_if(data, lambda x: x % 2 == 0) + assert even_count == 4 + + def test_count_generator(self) -> None: + """Test counting in generator.""" + data = (x for x in range(1, 10)) + even_count = count_if(data, lambda x: x % 2 == 0) + assert even_count == 4 + + +class TestTransformToList: + """Test transforming to list.""" + + def test_transform_empty(self) -> None: + """Test transforming empty list.""" + data: list[int] = [] + result = transform_to_list(data, lambda x: x * 2) + assert result == [] + + def test_square_numbers(self) -> None: + """Test squaring numbers.""" + data = [1, 2, 3, 4, 5] + squares = transform_to_list(data, lambda x: x * x) + assert squares == [1, 4, 9, 16, 25] + + def test_double_numbers(self) -> None: + """Test doubling numbers.""" + data = [10, 20, 30, 40, 50] + doubled = transform_to_list(data, lambda x: x * 2) + assert doubled == [20, 40, 60, 80, 100] + + def test_convert_to_string(self) -> None: + """Test converting numbers to strings.""" + data = [1, 2, 3, 4, 5] + strings = transform_to_list(data, str) + assert strings == ['1', '2', '3', '4', '5'] + + def test_convert_to_int(self) -> None: + """Test converting strings to ints.""" + data = ['1', '2', '3', '4', '5'] + ints = transform_to_list(data, int) + assert ints == [1, 2, 3, 4, 5] + + def test_convert_to_float(self) -> None: + """Test converting strings to floats.""" + data = ['1.1', '2.2', '3.3', '4.4', '5.5'] + floats = transform_to_list(data, float) + assert floats == [1.1, 2.2, 3.3, 4.4, 5.5] + + def test_string_to_uppercase(self) -> None: + """Test converting strings to uppercase.""" + data = ['hello', 'world', 'test'] + uppercased = transform_to_list(data, str.upper) + assert uppercased == ['HELLO', 'WORLD', 'TEST'] + + def test_transform_container(self) -> None: + """Test transforming container.""" + container = Container(int, [1, 2, 3, 4, 5]) + square_strings = transform_to_list(container, lambda x: str(x * x)) + assert square_strings == ['1', '4', '9', '16', '25'] + + def test_transform_tuple(self) -> None: + """Test transforming tuple.""" + data = (1.1, 2.2, 3.3, 4.4, 5.5) + doubled_ints = transform_to_list(data, lambda x: int(x * 2)) + assert doubled_ints == [2, 4, 6, 8, 11] + + def test_transform_generator(self) -> None: + """Test transforming generator.""" + data = ['apple', 'banana', 'cherry', 'date', 'elderberry'] + lengths = transform_to_list(data, len) + assert lengths == [5, 6, 6, 4, 10] + + +class TestFindMinMax: + """Test finding minimum and maximum values.""" + + def test_find_min_max_integers(self) -> None: + """Test finding min/max in integer sequence.""" + data = [3, 1, 4, 1, 5, 9, 2, 6] + min_val, max_val = find_min_max(data) + assert min_val == 1 + assert max_val == 9 + + def test_find_min_max_single_element(self) -> None: + """Test finding min/max in single element sequence.""" + data = [42] + min_val, max_val = find_min_max(data) + assert min_val == 42 + assert max_val == 42 + + def test_find_min_max_same_elements(self) -> None: + """Test finding min/max with all same elements.""" + data = [5, 5, 5, 5, 5] + min_val, max_val = find_min_max(data) + assert min_val == 5 + assert max_val == 5 + + def test_find_min_max_negative_numbers(self) -> None: + """Test finding min/max in negative numbers.""" + data = [-10, -5, -20, -1, -15] + min_val, max_val = find_min_max(data) + assert min_val == -20 + assert max_val == -1 + + def test_find_min_max_mixed_numbers(self) -> None: + """Test finding min/max in mixed positive/negative.""" + data = [-5, 10, -15, 20, 0] + min_val, max_val = find_min_max(data) + assert min_val == -15 + assert max_val == 20 + + def test_find_min_max_strings(self) -> None: + """Test finding min/max in string sequence.""" + data = ['cherry', 'banana', 'elderberry', 'date', 'apple'] + min_word, max_word = find_min_max(data) + assert min_word == 'apple' + assert max_word == 'elderberry' + + def test_find_min_max_container(self) -> None: + """Test finding min/max in container.""" + container = Container(int, [3, 1, 4, 1, 5, 9, 2, 6]) + min_val, max_val = find_min_max(container) + assert min_val == 1 + assert max_val == 9 + + def test_find_min_max_tuple(self) -> None: + """Test finding min/max in tuple.""" + data = (3, 1, 4, 1, 5, 9, 2, 6) + min_val, max_val = find_min_max(data) + assert min_val == 1 + assert max_val == 9 + + +class TestPipeline: + """Test function pipeline functionality.""" + + def test_simple_pipeline(self) -> None: + """Test simple function pipeline.""" + process = pipeline( + lambda data: [x * 2 for x in data], + lambda data: [x for x in data if x > 5], + sum, + ) + + result = process([1, 2, 3, 4, 5]) + # [1, 2, 3, 4, 5] -> [2, 4, 6, 8, 10] -> [6, 8, 10] -> 24 + assert result == 24 + + def test_string_pipeline(self) -> None: + """Test pipeline with string operations.""" + process = pipeline( + str.upper, + lambda s: s.replace(' ', '_'), + lambda s: f'PREFIX_{s}', + ) + + result = process('hello world') + # 'hello world' -> 'HELLO WORLD' -> 'HELLO_WORLD' -> 'PREFIX_HELLO_WORLD' + assert result == 'PREFIX_HELLO_WORLD' + + def test_empty_pipeline(self) -> None: + """Test empty pipeline returns input unchanged.""" + process = pipeline() + assert process([1, 2, 3, 4, 5]) == [1, 2, 3, 4, 5] + + def test_single_function_pipeline(self) -> None: + """Test pipeline with single function.""" + process = pipeline(lambda x: x * 2) + assert process(5) == 10 + + +class TestAlgorithmCombinations: + """Test combining different algorithms.""" + + def test_transform_then_find_min_max(self) -> None: + """Test transforming then finding min/max.""" + data = [-3, 2, -1, 4, -5] + + process = pipeline( + lambda data: [x * x for x in data], + find_min_max, + ) + min_val, max_val = process(data) + # [-3, 2, -1, 4, -5] -> [9, 4, 1, 16, 25] -> (1, 25) + assert min_val == 1 + assert max_val == 25 + + def test_sort_transform_count_pipeline(self) -> None: + """Test full pipeline: sort, transform, count.""" + data = [3, 1, 4, 1, 5, 9, 2, 6, 5, 3] + + sort_inplace(data) + # [1, 1, 2, 3, 3, 4, 5, 5, 6, 9] + squares = transform_to_list(data, lambda x: x * x) + # [1, 1, 4, 9, 9, 16, 25, 25, 36, 81] + large_squares = count_if(squares, lambda x: x > 10) + assert large_squares == 5 # 16, 25, 25, 36, 81 + + def test_multiple_containers_algorithms(self) -> None: + """Test algorithms on multiple containers.""" + container1 = Container(int, [1, 3, 5, 7, 9]) + container2 = Container(int, [2, 4, 6, 8, 10]) + + squares1 = transform_to_list(container1, lambda x: x * x) + # [1, 9, 25, 49, 81] + squares2 = transform_to_list(container2, lambda x: x * x) + # [4, 16, 36, 64, 100] + combined = squares1 + squares2 + # [1, 9, 25, 49, 81, 4, 16, 36, 64, 100] + min_val, max_val = find_min_max(combined) + # (1, 100) + assert min_val == 1 + assert max_val == 100 diff --git a/python/tests/test_containers.py b/python/tests/test_containers.py new file mode 100644 index 0000000..d608747 --- /dev/null +++ b/python/tests/test_containers.py @@ -0,0 +1,279 @@ +"""Tests for Python containers module. + +Comprehensive tests following C++ test patterns for container functionality. +""" + +import pytest + +from python import containers + + +class TestContainer: + """Test Container creation and basic operations.""" + + def test_int_container_creation(self) -> None: + """Test creating integer container.""" + container = containers.Container(int, [1, 2, 3, 4, 5]) + + assert len(container) == 5 + assert bool(container) is True + assert container[0] == 1 + assert container[4] == 5 + + def test_float_container_creation(self) -> None: + """Test creating float container.""" + container = containers.Container(float, [1.5, 2.5, 3.5]) + + assert len(container) == 3 + assert container[0] == 1.5 + assert container[2] == 3.5 + + def test_string_container_creation(self) -> None: + """Test creating string container.""" + container = containers.Container(str, ['hello', 'world']) + + assert len(container) == 2 + assert container[0] == 'hello' + assert container[1] == 'world' + + def test_empty_container(self) -> None: + """Test empty container behavior.""" + container = containers.Container(int) + + assert len(container) == 0 + assert bool(container) is False + + def test_unsupported_type(self) -> None: + """Test creating container with unsupported type.""" + with pytest.raises(ValueError, match='Unsupported container type'): + containers.Container(list, []) + + +class TestContainerOperations: + """Test container operations.""" + + def test_add_items(self) -> None: + """Test adding items to container.""" + container = containers.Container(int, [1, 2, 3]) + container.add(4) + container.add(5) + + assert len(container) == 5 + assert 4 in list(container) + assert 5 in list(container) + + def test_remove_items(self) -> None: + """Test removing items from container.""" + container = containers.Container(int, [1, 2, 3, 2, 4]) + removed_count = container.remove(2) + + assert removed_count == 2 # Should remove all occurrences + assert 2 not in list(container) + assert len(container) == 3 + + def test_iteration(self) -> None: + """Test container iteration.""" + data = [1, 3, 5, 7, 9] + container = containers.Container(int, data) + + result = list(container) + assert result == data + + def test_indexing(self) -> None: + """Test container indexing.""" + container = containers.Container(str, ['a', 'b', 'c', 'd']) + + assert container[0] == 'a' + assert container[1] == 'b' + assert container[3] == 'd' + + def test_filter_operation(self) -> None: + """Test filtering container elements.""" + container = containers.Container(int, [1, 2, 3, 4, 5, 6, 7, 8, 9]) + + even_numbers = container.filter(lambda x: x % 2 == 0) + assert even_numbers == [2, 4, 6, 8] + + large_numbers = container.filter(lambda x: x > 5) + assert large_numbers == [6, 7, 8, 9] + + def test_transform_operation(self) -> None: + """Test transforming container elements.""" + container = containers.Container(int, [1, 2, 3, 4, 5]) + + squares = container.transform(lambda x: x * x) + assert squares == [1, 4, 9, 16, 25] + + strings = container.transform(str) + assert strings == ['1', '2', '3', '4', '5'] + + def test_string_representation(self) -> None: + """Test container string representation.""" + container = containers.Container(int, [1, 2, 3]) + repr_str = repr(container) + + assert 'Container[int]' in repr_str + assert '[1, 2, 3]' in repr_str + + +class TestContainerFactory: + """Test container factory function.""" + + def test_create_int_container(self) -> None: + """Test creating integer container via factory.""" + container = containers.create_container([1, 2, 3, 4]) + + assert len(container) == 4 + assert container[0] == 1 + assert repr(container).startswith('Container[int]') + + def test_create_float_container(self) -> None: + """Test creating float container via factory.""" + container = containers.create_container([1.5, 2.5, 3.5]) + + assert len(container) == 3 + assert container[0] == 1.5 + assert repr(container).startswith('Container[float]') + + def test_create_string_container(self) -> None: + """Test creating string container via factory.""" + container = containers.create_container(['hello', 'world']) + + assert len(container) == 2 + assert container[0] == 'hello' + assert repr(container).startswith('Container[str]') + + def test_empty_data_error(self) -> None: + """Test factory with empty data raises error.""" + with pytest.raises(ValueError, match='Cannot determine type from empty data'): + containers.create_container([]) + + def test_unsupported_type_error(self) -> None: + """Test factory with unsupported type raises error.""" + with pytest.raises(ValueError, match='Unsupported data type'): + containers.create_container([{'key': 'value'}]) + + +class TestContainerTypes: + """Test different container types.""" + + def test_int_container_operations(self) -> None: + """Test operations on integer container.""" + container = containers.Container(int, [5, 2, 8, 1, 9]) + + # Test filtering + large_nums = container.filter(lambda x: x > 5) + assert set(large_nums) == {8, 9} + + # Test transformation + doubled = container.transform(lambda x: x * 2) + assert doubled == [10, 4, 16, 2, 18] + + def test_float_container_operations(self) -> None: + """Test operations on float container.""" + container = containers.Container(float, [1.1, 2.2, 3.3, 4.4]) + + # Test filtering + large_nums = container.filter(lambda x: x > 2.5) + assert large_nums == [3.3, 4.4] + + # Test transformation + rounded = container.transform(lambda x: round(x)) + assert rounded == [1, 2, 3, 4] + + def test_string_container_operations(self) -> None: + """Test operations on string container.""" + container = containers.Container(str, ['apple', 'banana', 'cherry', 'date']) + + # Test filtering + long_words = container.filter(lambda x: len(x) > 5) + assert long_words == ['banana', 'cherry'] + + # Test transformation + lengths = container.transform(len) + assert lengths == [5, 6, 6, 4] + + uppercase = container.transform(str.upper) + assert uppercase == ['APPLE', 'BANANA', 'CHERRY', 'DATE'] + + +class TestContainerEdgeCases: + """Test container edge cases and error conditions.""" + + def test_container_with_duplicates(self) -> None: + """Test container behavior with duplicate values.""" + container = containers.Container(int, [1, 2, 2, 3, 2, 4]) + + assert len(container) == 6 + removed = container.remove(2) + assert removed == 3 # All occurrences removed + assert len(container) == 3 + + def test_container_boundary_operations(self) -> None: + """Test container operations at boundaries.""" + container = containers.Container(int, [42]) + + # Single element container + assert len(container) == 1 + assert container[0] == 42 + assert list(container) == [42] + + # Remove the only element + removed = container.remove(42) + assert removed == 1 + assert len(container) == 0 + assert bool(container) is False + + def test_filter_no_matches(self) -> None: + """Test filtering with no matches.""" + container = containers.Container(int, [1, 3, 5, 7, 9]) + + even_numbers = container.filter(lambda x: x % 2 == 0) + assert even_numbers == [] + + def test_filter_all_matches(self) -> None: + """Test filtering where all elements match.""" + container = containers.Container(int, [2, 4, 6, 8, 10]) + + even_numbers = container.filter(lambda x: x % 2 == 0) + assert even_numbers == [2, 4, 6, 8, 10] + + +class TestContainerIntegration: + """Integration tests for container functionality.""" + + def test_container_pipeline(self) -> None: + """Test chaining container operations.""" + container = containers.Container(int, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + + # Filter even numbers, then square them + even_numbers = container.filter(lambda x: x % 2 == 0) + squares = [x * x for x in even_numbers] + + assert even_numbers == [2, 4, 6, 8, 10] + assert squares == [4, 16, 36, 64, 100] + + def test_multiple_containers_interaction(self) -> None: + """Test interaction between multiple containers.""" + int_container = containers.Container(int, [1, 2, 3]) + str_container = containers.Container(str, ['a', 'b', 'c']) + + # Transform int container to strings and compare + int_as_strings = int_container.transform(str) + str_lengths = str_container.transform(len) + + assert int_as_strings == ['1', '2', '3'] + assert str_lengths == [1, 1, 1] + + def test_container_factory_vs_direct_creation(self) -> None: + """Test that factory and direct creation produce equivalent results.""" + data = [1, 2, 3, 4, 5] + + factory_container = containers.create_container(data) + direct_container = containers.Container(int, data) + + assert len(factory_container) == len(direct_container) + assert list(factory_container) == list(direct_container) + assert factory_container.filter(lambda x: x > 3) == direct_container.filter( + lambda x: x > 3 + ) diff --git a/python/tests/test_exceptions.py b/python/tests/test_exceptions.py new file mode 100644 index 0000000..931c84f --- /dev/null +++ b/python/tests/test_exceptions.py @@ -0,0 +1,410 @@ +"""Tests for Python exceptions module. + +Comprehensive tests following C++ test patterns for exception handling functionality. +""" + +import pytest + +from python import exceptions + + +class TestErrorSeverity: + """Test ErrorSeverity enum.""" + + def test_error_severity_values(self) -> None: + """Test all error severity levels.""" + assert exceptions.ErrorSeverity.TRACE is not None + assert exceptions.ErrorSeverity.DEBUG is not None + assert exceptions.ErrorSeverity.INFO is not None + assert exceptions.ErrorSeverity.WARNING is not None + assert exceptions.ErrorSeverity.ERROR is not None + assert exceptions.ErrorSeverity.FATAL is not None + + def test_error_severity_ordering(self) -> None: + """Test error severity ordering if applicable.""" + # Test that we can access all values + severities = [ + exceptions.ErrorSeverity.TRACE, + exceptions.ErrorSeverity.DEBUG, + exceptions.ErrorSeverity.INFO, + exceptions.ErrorSeverity.WARNING, + exceptions.ErrorSeverity.ERROR, + exceptions.ErrorSeverity.FATAL, + ] + assert len(severities) == 6 + + +class TestResult: + """Test Result wrapper functionality.""" + + def test_result_ok_creation(self) -> None: + """Test creating successful Result.""" + result = exceptions.Result.ok(42) + + assert result.is_ok + assert not result.is_err + assert result.unwrap() == 42 + + def test_result_ok_different_types(self) -> None: + """Test creating successful Results with different types.""" + int_result = exceptions.Result.ok(123) + float_result = exceptions.Result.ok(3.14) + str_result = exceptions.Result.ok('hello') + + assert int_result.unwrap() == 123 + assert float_result.unwrap() == 3.14 + assert str_result.unwrap() == 'hello' + + def test_result_error_creation(self) -> None: + """Test creating error Result.""" + result = exceptions.Result.error('Something went wrong') + + assert not result.is_ok + assert result.is_err + + def test_result_unwrap_error(self) -> None: + """Test unwrapping error Result raises exception.""" + result = exceptions.Result.error('Error message') + + with pytest.raises(Exception): + result.unwrap() + + def test_result_unwrap_or(self) -> None: + """Test unwrap_or with default values.""" + ok_result = exceptions.Result.ok(42) + err_result = exceptions.Result.error('Error') + + assert ok_result.unwrap_or(0) == 42 + assert err_result.unwrap_or(0) == 0 + + def test_result_unwrap_or_else(self) -> None: + """Test unwrap_or_else with function.""" + ok_result = exceptions.Result.ok(42) + err_result = exceptions.Result.error('Error') + + assert ok_result.unwrap_or_else(lambda: 0) == 42 + assert err_result.unwrap_or_else(lambda: -1) == -1 + + def test_result_map_success(self) -> None: + """Test mapping successful Result.""" + result = exceptions.Result.ok(5) + mapped = result.map(lambda x: x * 2) + + assert mapped.is_ok + assert mapped.unwrap() == 10 + + def test_result_map_error(self) -> None: + """Test mapping error Result.""" + result = exceptions.Result.error('Error') + mapped = result.map(lambda x: x * 2) + + assert mapped.is_err + + def test_result_and_then_success(self) -> None: + """Test chaining successful Results.""" + + def double_if_positive(x: int) -> exceptions.Result[int]: + if x > 0: + return exceptions.Result.ok(x * 2) + else: + return exceptions.Result.error('Negative number') + + result = exceptions.Result.ok(5) + chained = result.and_then(double_if_positive) + + assert chained.is_ok + assert chained.unwrap() == 10 + + def test_result_and_then_error(self) -> None: + """Test chaining with error Result.""" + + def double_if_positive(x: int) -> exceptions.Result[int]: + return exceptions.Result.ok(x * 2) + + result = exceptions.Result.error('Initial error') + chained = result.and_then(double_if_positive) + + assert chained.is_err + + def test_result_or_else_success(self) -> None: + """Test or_else with successful Result.""" + + def handle_error(e: Exception) -> exceptions.Result[int]: + return exceptions.Result.ok(0) + + result = exceptions.Result.ok(42) + handled = result.or_else(handle_error) + + assert handled.is_ok + assert handled.unwrap() == 42 + + def test_result_or_else_error(self) -> None: + """Test or_else with error Result.""" + + def handle_error(e: Exception) -> exceptions.Result[int]: + return exceptions.Result.ok(-1) + + result = exceptions.Result.error('Error') + handled = result.or_else(handle_error) + + assert handled.is_ok + assert handled.unwrap() == -1 + + def test_result_boolean_conversion(self) -> None: + """Test Result boolean conversion.""" + ok_result = exceptions.Result.ok(42) + err_result = exceptions.Result.error('Error') + + assert bool(ok_result) is True + assert bool(err_result) is False + + def test_result_string_representation(self) -> None: + """Test Result string representation.""" + ok_result = exceptions.Result.ok(42) + err_result = exceptions.Result.error('Error') + + assert str(ok_result) is not None + assert str(err_result) is not None + assert repr(ok_result).startswith('Result(') + assert repr(err_result).startswith('Result(') + + +class TestSafeDivide: + """Test safe division function.""" + + def test_safe_divide_normal(self) -> None: + """Test normal division.""" + result = exceptions.safe_divide(10.0, 2.0) + + assert result.is_ok + assert result.unwrap() == 5.0 + + def test_safe_divide_by_zero(self) -> None: + """Test division by zero.""" + result = exceptions.safe_divide(10.0, 0.0) + + assert result.is_err + + def test_safe_divide_negative_numbers(self) -> None: + """Test division with negative numbers.""" + result = exceptions.safe_divide(-10.0, 2.0) + + assert result.is_ok + assert result.unwrap() == -5.0 + + def test_safe_divide_float_precision(self) -> None: + """Test division with float precision.""" + result = exceptions.safe_divide(1.0, 3.0) + + assert result.is_ok + assert result.unwrap() == pytest.approx(0.3333333333333333) + + +class TestSafeSqrt: + """Test safe square root function.""" + + def test_safe_sqrt_positive(self) -> None: + """Test square root of positive number.""" + result = exceptions.safe_sqrt(16.0) + + assert result.is_ok + assert result.unwrap() == 4.0 + + def test_safe_sqrt_zero(self) -> None: + """Test square root of zero.""" + result = exceptions.safe_sqrt(0.0) + + assert result.is_ok + assert result.unwrap() == 0.0 + + def test_safe_sqrt_negative(self) -> None: + """Test square root of negative number.""" + result = exceptions.safe_sqrt(-1.0) + + assert result.is_err + + def test_safe_sqrt_float_precision(self) -> None: + """Test square root with float precision.""" + result = exceptions.safe_sqrt(2.0) + + assert result.is_ok + assert result.unwrap() == pytest.approx(1.4142135623730951) + + +class TestChainOperations: + """Test chaining Result operations.""" + + def test_chain_operations_success(self) -> None: + """Test successful chain of operations.""" + + def add_one(x: float) -> exceptions.Result[float]: + return exceptions.Result.ok(x + 1.0) + + def multiply_by_two(x: float) -> exceptions.Result[float]: + return exceptions.Result.ok(x * 2.0) + + chained = exceptions.chain_operations(add_one, multiply_by_two) + result = chained(5.0) + + assert result.is_ok + assert result.unwrap() == 12.0 # (5 + 1) * 2 + + def test_chain_operations_early_error(self) -> None: + """Test chain stops at first error.""" + + def fail_operation(x: float) -> exceptions.Result[float]: + return exceptions.Result.error('Operation failed') + + def never_called(x: float) -> exceptions.Result[float]: + return exceptions.Result.ok(x * 100.0) + + chained = exceptions.chain_operations(fail_operation, never_called) + result = chained(5.0) + + assert result.is_err + + def test_chain_operations_complex(self) -> None: + """Test complex chain with safe operations.""" + + def safe_divide_by_two(x: float) -> exceptions.Result[float]: + return exceptions.safe_divide(x, 2.0) + + def safe_sqrt_result(x: float) -> exceptions.Result[float]: + return exceptions.safe_sqrt(x) + + # Chain: divide by 2, then square root + chained = exceptions.chain_operations(safe_divide_by_two, safe_sqrt_result) + + # Test with 8.0: 8.0 / 2.0 = 4.0, sqrt(4.0) = 2.0 + result = chained(8.0) + assert result.is_ok + assert result.unwrap() == 2.0 + + # Test with negative number + result_negative = chained(-8.0) + assert result_negative.is_err # Will fail at sqrt step + + def test_empty_chain_operations(self) -> None: + """Test empty chain of operations.""" + chained = exceptions.chain_operations() + result = chained(42.0) + + assert result.is_ok + assert result.unwrap() == 42.0 + + +class TestResultMapChaining: + """Test Result map method chaining.""" + + def test_result_map_chain_success(self) -> None: + """Test chaining map operations on successful Result.""" + result = ( + exceptions.Result.ok(5) + .map(lambda x: x * 2) + .map(lambda x: x + 3) + .map(lambda x: x // 2) + ) + + assert result.is_ok + assert result.unwrap() == 6 # ((5 * 2) + 3) // 2 = 6 + + def test_result_map_chain_with_error(self) -> None: + """Test map chaining when intermediate operation fails.""" + + def might_fail(x: int) -> int: + if x > 10: + raise ValueError('Too large') + return x * 2 + + result = ( + exceptions.Result.ok(5).map(lambda x: x * 3).map(might_fail) + ) # 15 # Should fail + + assert result.is_err + + def test_result_type_transformations(self) -> None: + """Test Result with different type transformations.""" + # Start with int, transform to string, then to length + result = exceptions.Result.ok(42).map(str).map(len) # '42' # 2 + + assert result.is_ok + assert result.unwrap() == 2 + + +class TestExceptionIntegration: + """Integration tests for exception handling.""" + + def test_complex_calculation_pipeline(self) -> None: + """Test complex calculation with error handling.""" + + def complex_calculation(x: float) -> exceptions.Result[float]: + # Chain: sqrt -> divide by 2 -> add 1 + return ( + exceptions.safe_sqrt(x) + .and_then(lambda v: exceptions.safe_divide(v, 2.0)) + .map(lambda v: v + 1.0) + ) + + # Test with valid input + result_valid = complex_calculation(16.0) + assert result_valid.is_ok + assert result_valid.unwrap() == 3.0 # sqrt(16)/2 + 1 = 4/2 + 1 = 3 + + # Test with invalid input + result_invalid = complex_calculation(-4.0) + assert result_invalid.is_err + + def test_result_error_recovery(self) -> None: + """Test error recovery patterns.""" + + def might_fail(x: int) -> exceptions.Result[int]: + if x < 0: + return exceptions.Result.error('Negative number') + return exceptions.Result.ok(x * 2) + + def recover_from_error(e: Exception) -> exceptions.Result[int]: + return exceptions.Result.ok(0) # Default value + + # Test successful case + success_result = might_fail(5).or_else(recover_from_error) + assert success_result.is_ok + assert success_result.unwrap() == 10 + + # Test error recovery + error_result = might_fail(-5).or_else(recover_from_error) + assert error_result.is_ok + assert error_result.unwrap() == 0 + + def test_multiple_result_combinations(self) -> None: + """Test combining multiple Result values.""" + result1 = exceptions.Result.ok(10) + result2 = exceptions.Result.ok(20) + result3 = exceptions.Result.error('Error') + + # Combine successful results + if result1.is_ok and result2.is_ok: + combined = exceptions.Result.ok(result1.unwrap() + result2.unwrap()) + assert combined.unwrap() == 30 + + # Handle error in combination + if result1.is_ok and result3.is_err: + # Can't combine due to error + assert result3.is_err + + def test_result_with_unsupported_type(self) -> None: + """Test Result creation with unsupported type.""" + with pytest.raises(ValueError, match='Unsupported result type'): + exceptions.Result.ok({'key': 'value'}) + + def test_exception_hierarchy_integration(self) -> None: + """Test integration with C++ exception hierarchy.""" + # Test that we can access C++ exception classes + assert exceptions.BaseException is not None + assert exceptions.ValidationException is not None + assert exceptions.ResourceException is not None + assert exceptions.CalculationException is not None + + # Test that we can access C++ Result types + assert exceptions.IntResult is not None + assert exceptions.DoubleResult is not None + assert exceptions.StringResult is not None diff --git a/python/tests/test_random.py b/python/tests/test_random.py new file mode 100644 index 0000000..64be0a6 --- /dev/null +++ b/python/tests/test_random.py @@ -0,0 +1,476 @@ +"""Tests for Python random module. + +Comprehensive tests following C++ test patterns for random functionality. +""" + +import pytest + +from python import random as cpp_random + + +class TestRandomGenerator: + """Test RandomGenerator functionality.""" + + def test_random_generator_creation(self) -> None: + """Test basic random generator creation.""" + gen = cpp_random.RandomGenerator() + assert gen is not None + + def test_random_generator_with_seed(self) -> None: + """Test random generator with seed.""" + gen = cpp_random.RandomGenerator(seed=12345) + assert gen is not None + + def test_randint_basic(self) -> None: + """Test basic random integer generation.""" + gen = cpp_random.RandomGenerator() + value = gen.randint(1, 10) + + assert isinstance(value, int) + assert 1 <= value <= 10 + + def test_randint_range(self) -> None: + """Test random integer generation in various ranges.""" + gen = cpp_random.RandomGenerator(seed=42) + + # Test different ranges + for min_val, max_val in [(1, 5), (10, 20), (-5, 5), (100, 200)]: + value = gen.randint(min_val, max_val) + assert min_val <= value <= max_val + + def test_randlong(self) -> None: + """Test random long integer generation.""" + gen = cpp_random.RandomGenerator() + value = gen.randlong(1000000, 2000000) + + assert isinstance(value, int) + assert 1000000 <= value <= 2000000 + + def test_random_float(self) -> None: + """Test random float generation.""" + gen = cpp_random.RandomGenerator() + value = gen.random() + + assert isinstance(value, float) + assert 0.0 <= value < 1.0 + + def test_uniform(self) -> None: + """Test uniform distribution.""" + gen = cpp_random.RandomGenerator() + value = gen.uniform(2.0, 8.0) + + assert isinstance(value, float) + assert 2.0 <= value < 8.0 + + def test_randfloat(self) -> None: + """Test random float32 generation.""" + gen = cpp_random.RandomGenerator() + value = gen.randfloat(1.0, 5.0) + + assert isinstance(value, float) + assert 1.0 <= value < 5.0 + + def test_choice(self) -> None: + """Test random boolean generation.""" + gen = cpp_random.RandomGenerator() + + # Test with default probability + value = gen.choice() + assert isinstance(value, bool) + + # Test with custom probability + value_high = gen.choice(0.9) + value_low = gen.choice(0.1) + assert isinstance(value_high, bool) + assert isinstance(value_low, bool) + + def test_normal_distribution(self) -> None: + """Test normal distribution.""" + gen = cpp_random.RandomGenerator() + + # Test with default parameters + value = gen.normal() + assert isinstance(value, float) + + # Test with custom parameters + value_custom = gen.normal(mean=10.0, stddev=2.0) + assert isinstance(value_custom, float) + + def test_normal_float(self) -> None: + """Test normal distribution with float32.""" + gen = cpp_random.RandomGenerator() + value = gen.normal_float(5.0, 1.5) + + assert isinstance(value, float) + + def test_integers_vector(self) -> None: + """Test generating vector of integers.""" + gen = cpp_random.RandomGenerator() + values = gen.integers(1, 100, 10) + + assert isinstance(values, list) + assert len(values) == 10 + assert all(isinstance(v, int) for v in values) + assert all(1 <= v <= 100 for v in values) + + def test_floats_vector(self) -> None: + """Test generating vector of floats.""" + gen = cpp_random.RandomGenerator() + values = gen.floats(0.0, 1.0, 5) + + assert isinstance(values, list) + assert len(values) == 5 + assert all(isinstance(v, float) for v in values) + assert all(0.0 <= v < 1.0 for v in values) + + def test_seed_reproducibility(self) -> None: + """Test seed reproducibility.""" + gen1 = cpp_random.RandomGenerator(seed=123) + gen2 = cpp_random.RandomGenerator(seed=123) + + # Same seed should produce same sequence + values1 = [gen1.randint(1, 1000) for _ in range(5)] + values2 = [gen2.randint(1, 1000) for _ in range(5)] + + assert values1 == values2 + + def test_seed_method(self) -> None: + """Test seeding after creation.""" + gen = cpp_random.RandomGenerator() + + # Set seed and generate values + gen.seed(456) + values1 = [gen.randint(1, 100) for _ in range(3)] + + # Reset with same seed + gen.seed(456) + values2 = [gen.randint(1, 100) for _ in range(3)] + + assert values1 == values2 + + def test_seed_with_time(self) -> None: + """Test seeding with current time.""" + gen = cpp_random.RandomGenerator() + gen.seed_with_time() + + # Should be able to generate values + value = gen.randint(1, 100) + assert 1 <= value <= 100 + + +class TestGlobalFunctions: + """Test global random functions.""" + + def test_shuffle_int_list(self) -> None: + """Test shuffling integer list.""" + original = [1, 2, 3, 4, 5] + data = original.copy() + + cpp_random.shuffle(data) + + # List should still contain same elements + assert sorted(data) == sorted(original) + assert len(data) == len(original) + + def test_shuffle_float_list(self) -> None: + """Test shuffling float list.""" + original = [1.1, 2.2, 3.3, 4.4, 5.5] + data = original.copy() + + cpp_random.shuffle(data) + + assert sorted(data) == sorted(original) + assert len(data) == len(original) + + def test_shuffle_string_list(self) -> None: + """Test shuffling string list.""" + original = ['apple', 'banana', 'cherry', 'date'] + data = original.copy() + + cpp_random.shuffle(data) + + assert sorted(data) == sorted(original) + assert len(data) == len(original) + + def test_shuffle_unsupported_type(self) -> None: + """Test shuffling unsupported type falls back to Python random.""" + data = [{'a': 1}, {'b': 2}, {'c': 3}] + original = data.copy() + + # Should not raise exception + cpp_random.shuffle(data) + + # Should contain same elements (though order may change) + assert len(data) == len(original) + + def test_sample_int_list(self) -> None: + """Test sampling from integer list.""" + population = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + sample = cpp_random.sample(population, 3) + + assert isinstance(sample, list) + assert len(sample) == 3 + assert all(item in population for item in sample) + assert len(set(sample)) == 3 # No duplicates + + def test_sample_float_list(self) -> None: + """Test sampling from float list.""" + population = [1.1, 2.2, 3.3, 4.4, 5.5] + sample = cpp_random.sample(population, 2) + + assert len(sample) == 2 + assert all(item in population for item in sample) + + def test_sample_string_list(self) -> None: + """Test sampling from string list.""" + population = ['a', 'b', 'c', 'd', 'e'] + sample = cpp_random.sample(population, 3) + + assert len(sample) == 3 + assert all(item in population for item in sample) + + def test_sample_string_characters(self) -> None: + """Test sampling characters from string.""" + text = 'hello world' + sample = cpp_random.sample_string(text, 5) + + assert isinstance(sample, list) + assert len(sample) == 5 + assert all(char in text for char in sample) + + def test_sample_unsupported_type(self) -> None: + """Test sampling unsupported type falls back to Python random.""" + population = [{'a': 1}, {'b': 2}, {'c': 3}, {'d': 4}] + sample = cpp_random.sample(population, 2) + + assert len(sample) == 2 + + +class TestDistributions: + """Test probability distributions.""" + + def test_uniform_int_distribution(self) -> None: + """Test uniform integer distribution.""" + gen = cpp_random.RandomGenerator(seed=42) + dist = cpp_random.UniformInt(gen, 10, 20) + + value = dist.sample() + assert isinstance(value, int) + assert 10 <= value <= 20 + + samples = dist.samples(5) + assert len(samples) == 5 + assert all(10 <= v <= 20 for v in samples) + + def test_uniform_float_distribution(self) -> None: + """Test uniform float distribution.""" + gen = cpp_random.RandomGenerator(seed=42) + dist = cpp_random.UniformFloat(gen, 1.0, 5.0) + + value = dist.sample() + assert isinstance(value, float) + assert 1.0 <= value < 5.0 + + samples = dist.samples(10) + assert len(samples) == 10 + assert all(1.0 <= v < 5.0 for v in samples) + + def test_normal_distribution_class(self) -> None: + """Test normal distribution class.""" + gen = cpp_random.RandomGenerator(seed=42) + + # Standard normal distribution + std_normal = cpp_random.Normal(gen) + value = std_normal.sample() + assert isinstance(value, float) + + # Custom normal distribution + custom_normal = cpp_random.Normal(gen, mean=100.0, stddev=15.0) + samples = custom_normal.samples(100) + + # Statistical tests (approximate) + mean_sample = sum(samples) / len(samples) + assert 85.0 < mean_sample < 115.0 # Should be around 100 ± 15 + + def test_distribution_inheritance(self) -> None: + """Test that distributions follow inheritance pattern.""" + gen = cpp_random.RandomGenerator() + + # All distributions should be instances of Distribution + uniform_int = cpp_random.UniformInt(gen, 1, 10) + uniform_float = cpp_random.UniformFloat(gen, 1.0, 10.0) + normal = cpp_random.Normal(gen) + + # Test that they all have sample and samples methods + assert hasattr(uniform_int, 'sample') + assert hasattr(uniform_int, 'samples') + assert hasattr(uniform_float, 'sample') + assert hasattr(uniform_float, 'samples') + assert hasattr(normal, 'sample') + assert hasattr(normal, 'samples') + + +class TestConvenienceFunctions: + """Test convenience functions using default generator.""" + + def test_randint_convenience(self) -> None: + """Test randint convenience function.""" + value = cpp_random.randint(1, 100) + assert isinstance(value, int) + assert 1 <= value <= 100 + + def test_random_convenience(self) -> None: + """Test random convenience function.""" + value = cpp_random.random() + assert isinstance(value, float) + assert 0.0 <= value < 1.0 + + def test_uniform_convenience(self) -> None: + """Test uniform convenience function.""" + value = cpp_random.uniform(5.0, 10.0) + assert isinstance(value, float) + assert 5.0 <= value < 10.0 + + def test_choice_convenience(self) -> None: + """Test choice convenience function.""" + value = cpp_random.choice() + assert isinstance(value, bool) + + value_biased = cpp_random.choice(0.8) + assert isinstance(value_biased, bool) + + def test_normal_convenience(self) -> None: + """Test normal convenience function.""" + value = cpp_random.normal() + assert isinstance(value, float) + + value_custom = cpp_random.normal(mean=50.0, stddev=10.0) + assert isinstance(value_custom, float) + + +class TestRandomEdgeCases: + """Test edge cases and error conditions.""" + + def test_randint_same_bounds(self) -> None: + """Test randint with same min and max.""" + gen = cpp_random.RandomGenerator() + value = gen.randint(5, 5) + assert value == 5 + + def test_uniform_zero_range(self) -> None: + """Test uniform with minimal range.""" + gen = cpp_random.RandomGenerator() + value = gen.uniform(1.0, 1.0) + assert value == 1.0 + + def test_sample_larger_than_population(self) -> None: + """Test sampling more items than available.""" + population = [1, 2, 3] + + with pytest.raises(Exception): # Should raise an error + cpp_random.sample(population, 5) + + def test_empty_vector_generation(self) -> None: + """Test generating empty vectors.""" + gen = cpp_random.RandomGenerator() + + int_vec = gen.integers(1, 10, 0) + float_vec = gen.floats(0.0, 1.0, 0) + + assert int_vec == [] + assert float_vec == [] + + def test_negative_count_vector(self) -> None: + """Test generating vectors with negative count.""" + gen = cpp_random.RandomGenerator() + + # Should handle gracefully or raise appropriate error + try: + gen.integers(1, 10, -1) + except Exception: + pass # Expected to fail + + +class TestRandomIntegration: + """Integration tests for random functionality.""" + + def test_multiple_generators_independence(self) -> None: + """Test that multiple generators are independent.""" + gen1 = cpp_random.RandomGenerator(seed=111) + gen2 = cpp_random.RandomGenerator(seed=222) + + values1 = [gen1.randint(1, 1000) for _ in range(10)] + values2 = [gen2.randint(1, 1000) for _ in range(10)] + + # Different seeds should produce different sequences + assert values1 != values2 + + def test_random_data_pipeline(self) -> None: + """Test complete random data generation pipeline.""" + gen = cpp_random.RandomGenerator(seed=42) + + # Generate random data + int_data = gen.integers(1, 100, 20) + + # Shuffle the data + cpp_random.shuffle(int_data) + + # Sample from the shuffled data + sample_data = cpp_random.sample(int_data, 5) + + assert len(int_data) == 20 + assert len(sample_data) == 5 + assert all(item in int_data for item in sample_data) + + def test_statistical_properties(self) -> None: + """Test statistical properties of distributions.""" + gen = cpp_random.RandomGenerator(seed=12345) + + # Generate large sample from uniform distribution + uniform_samples = gen.floats(0.0, 1.0, 1000) + + # Basic statistical checks + mean = sum(uniform_samples) / len(uniform_samples) + assert 0.4 < mean < 0.6 # Should be around 0.5 + + min_val = min(uniform_samples) + max_val = max(uniform_samples) + assert 0.0 <= min_val < 1.0 + assert 0.0 <= max_val < 1.0 + + def test_cross_type_operations(self) -> None: + """Test operations across different random types.""" + gen = cpp_random.RandomGenerator(seed=999) + + # Generate different types of random data + integers = gen.integers(1, 10, 5) + floats = gen.floats(1.0, 10.0, 5) + booleans = [gen.choice() for _ in range(5)] + + # Verify types and ranges + assert all(isinstance(x, int) and 1 <= x <= 10 for x in integers) + assert all(isinstance(x, float) and 1.0 <= x < 10.0 for x in floats) + assert all(isinstance(x, bool) for x in booleans) + + # Test shuffling mixed data + mixed_strings = [str(x) for x in integers + floats] + cpp_random.shuffle(mixed_strings) + assert len(mixed_strings) == 10 + + def test_performance_consistency(self) -> None: + """Test that random generation performance is consistent.""" + gen = cpp_random.RandomGenerator() + + # Generate multiple large batches + batch1 = gen.integers(1, 1000000, 1000) + batch2 = gen.integers(1, 1000000, 1000) + batch3 = gen.integers(1, 1000000, 1000) + + # All batches should be complete + assert len(batch1) == 1000 + assert len(batch2) == 1000 + assert len(batch3) == 1000 + + # Values should be in range + for batch in [batch1, batch2, batch3]: + assert all(1 <= x <= 1000000 for x in batch) diff --git a/python/tests/test_shapes.py b/python/tests/test_shapes.py new file mode 100644 index 0000000..18b871c --- /dev/null +++ b/python/tests/test_shapes.py @@ -0,0 +1,232 @@ +"""Tests for Python shapes module. + +Comprehensive tests following C++ test patterns for shapes functionality. +""" + +import math + +import pytest + +from python import shapes + + +class TestCircle: + """Test Circle creation and basic properties.""" + + def test_circle_creation(self) -> None: + """Test basic circle creation.""" + circle = shapes.create_shape('circle', 5.0) + + assert circle.get_name() == 'Circle' + assert circle.get_area() == pytest.approx(math.pi * 25.0) + assert circle.get_perimeter() == pytest.approx(math.pi * 10.0) + + def test_circle_factory_function(self) -> None: + """Test circle factory function.""" + circle = shapes.create_shape(shapes.ShapeType.CIRCLE, 3.0) + + assert circle.get_name() == 'Circle' + assert circle.get_area() == pytest.approx(math.pi * 9.0) + + def test_circle_invalid_radius(self) -> None: + """Test circle with invalid radius throws exception.""" + with pytest.raises(Exception): # C++ ValidationException + shapes.create_shape('circle', 0.0) + + with pytest.raises(Exception): + shapes.create_shape('circle', -3.0) + + def test_circle_comparison(self) -> None: + """Test circle comparison operations.""" + circle1 = shapes.Circle(5.0) + circle2 = shapes.Circle(5.0) + circle3 = shapes.Circle(3.0) + + assert circle1 == circle2 + assert not (circle1 == circle3) + assert circle3 < circle1 + assert circle1 > circle3 + + +class TestRectangle: + """Test Rectangle creation and basic properties.""" + + def test_rectangle_creation(self) -> None: + """Test basic rectangle creation.""" + rect = shapes.create_shape('rectangle', 4.0, 6.0) + + assert rect.get_name() == 'Rectangle' + assert rect.get_area() == 24.0 + assert rect.get_perimeter() == 20.0 + assert not rect.is_square() + + def test_square_creation(self) -> None: + """Test square creation.""" + square = shapes.create_shape('square', 5.0) + + assert square.get_name() == 'Rectangle' + assert square.get_area() == 25.0 + assert square.get_perimeter() == 20.0 + assert square.is_square() + + def test_rectangle_invalid_dimensions(self) -> None: + """Test rectangle with invalid dimensions throws exception.""" + with pytest.raises(Exception): + shapes.create_shape('rectangle', 0.0, 5.0) + + with pytest.raises(Exception): + shapes.create_shape('rectangle', 5.0, -3.0) + + def test_rectangle_comparison(self) -> None: + """Test rectangle comparison operations.""" + rect1 = shapes.Rectangle(4.0, 3.0) + rect2 = shapes.Rectangle(4.0, 3.0) + rect3 = shapes.Rectangle(2.0, 3.0) + + assert rect1 == rect2 + assert not (rect1 == rect3) + assert rect3 < rect1 + + +class TestShapeAnalysis: + """Test shape analysis functions.""" + + def test_analyze_shape(self) -> None: + """Test shape analysis function.""" + circle = shapes.create_shape('circle', 5.0) + metrics = shapes.analyze_shape(circle) + + assert metrics.name == 'Circle' + assert metrics.area == pytest.approx(math.pi * 25.0) + assert metrics.perimeter == pytest.approx(math.pi * 10.0) + assert metrics.aspect_ratio > 0 + + def test_compare_shapes(self) -> None: + """Test shape comparison function.""" + circle = shapes.create_shape('circle', 3.0) + rectangle = shapes.create_shape('rectangle', 4.0, 5.0) + square = shapes.create_shape('square', 2.0) + + comparison = shapes.compare_shapes(circle, rectangle, square) + + assert comparison['count'] == 3 + assert comparison['total_area'] > 0 + assert comparison['total_perimeter'] > 0 + assert 'largest_by_area' in comparison + assert 'smallest_by_area' in comparison + assert 'average_area' in comparison + assert len(comparison['metrics']) == 3 + + def test_compare_shapes_empty(self) -> None: + """Test comparison with no shapes raises error.""" + with pytest.raises(ValueError, match='At least one shape is required'): + shapes.compare_shapes() + + +class TestShapeFactory: + """Test shape factory functions.""" + + def test_factory_with_string_types(self) -> None: + """Test factory with string shape types.""" + circle = shapes.create_shape('circle', 5.0) + rectangle = shapes.create_shape('rectangle', 3.0, 4.0) + square = shapes.create_shape('square', 6.0) + + assert circle.get_name() == 'Circle' + assert rectangle.get_name() == 'Rectangle' + assert square.get_name() == 'Rectangle' + assert square.is_square() + + def test_factory_with_enum_types(self) -> None: + """Test factory with enum shape types.""" + circle = shapes.create_shape(shapes.ShapeType.CIRCLE, 5.0) + rectangle = shapes.create_shape(shapes.ShapeType.RECTANGLE, 3.0, 4.0) + square = shapes.create_shape(shapes.ShapeType.SQUARE, 6.0) + + assert circle.get_name() == 'Circle' + assert rectangle.get_name() == 'Rectangle' + assert square.is_square() + + def test_factory_invalid_arguments(self) -> None: + """Test factory with invalid arguments.""" + with pytest.raises(ValueError, match='Circle requires exactly 1 argument'): + shapes.create_shape('circle', 1.0, 2.0) + + with pytest.raises(ValueError, match='Rectangle requires exactly 2 arguments'): + shapes.create_shape('rectangle', 1.0) + + with pytest.raises(ValueError, match='Unknown shape type'): + shapes.create_shape('triangle', 1.0) + + +class TestShapeMetrics: + """Test ShapeMetrics dataclass.""" + + def test_shape_metrics_creation(self) -> None: + """Test creating ShapeMetrics.""" + metrics = shapes.ShapeMetrics(area=25.0, perimeter=20.0, name='Square') + + assert metrics.area == 25.0 + assert metrics.perimeter == 20.0 + assert metrics.name == 'Square' + assert metrics.aspect_ratio == pytest.approx(25.0 / 400.0) + + def test_shape_metrics_immutable(self) -> None: + """Test that ShapeMetrics is immutable.""" + metrics = shapes.ShapeMetrics(area=25.0, perimeter=20.0, name='Square') + + with pytest.raises(AttributeError): + metrics.area = 30.0 # Should fail - frozen dataclass + + +class TestShapeIntegration: + """Integration tests for shape functionality.""" + + def test_polymorphic_behavior(self) -> None: + """Test polymorphic behavior with different shapes.""" + shapes_list = [ + shapes.create_shape('circle', 3.0), + shapes.create_shape('rectangle', 4.0, 5.0), + shapes.create_shape('square', 2.0), + ] + + expected_names = ['Circle', 'Rectangle', 'Rectangle'] + expected_areas = [math.pi * 9.0, 20.0, 4.0] + + for shape, name, area in zip(shapes_list, expected_names, expected_areas): + assert shape.get_name() == name + assert shape.get_area() == pytest.approx(area) + + def test_shape_collection_analysis(self) -> None: + """Test analyzing a collection of shapes.""" + test_shapes = [ + shapes.create_shape('circle', 1.0), + shapes.create_shape('circle', 2.0), + shapes.create_shape('rectangle', 2.0, 3.0), + shapes.create_shape('square', 2.0), + ] + + total_area = sum(shape.get_area() for shape in test_shapes) + circle_area_1 = math.pi * 1.0 + circle_area_2 = math.pi * 4.0 + rect_area = 6.0 + square_area = 4.0 + + expected_total = circle_area_1 + circle_area_2 + rect_area + square_area + assert total_area == pytest.approx(expected_total) + + def test_shape_sorting_by_area(self) -> None: + """Test sorting shapes by area.""" + test_shapes = [ + shapes.create_shape('circle', 2.0), # π * 4 ≈ 12.57 + shapes.create_shape('rectangle', 2.0, 3.0), # 6.0 + shapes.create_shape('square', 4.0), # 16.0 + shapes.create_shape('circle', 1.0), # π ≈ 3.14 + ] + + sorted_shapes = sorted(test_shapes, key=lambda s: s.get_area()) + areas = [shape.get_area() for shape in sorted_shapes] + + # Should be in ascending order + for i in range(len(areas) - 1): + assert areas[i] <= areas[i + 1] diff --git a/python/tests/test_timing.py b/python/tests/test_timing.py new file mode 100644 index 0000000..5740a01 --- /dev/null +++ b/python/tests/test_timing.py @@ -0,0 +1,450 @@ +"""Tests for Python timing module. + +Comprehensive tests following C++ test patterns for timing functionality. +""" + +import time + +import pytest + +from python import timing + + +class TestTimer: + """Test Timer functionality.""" + + def test_timer_creation(self) -> None: + """Test basic timer creation.""" + timer = timing.Timer() + assert timer is not None + + def test_timer_start_stop(self) -> None: + """Test timer start and stop.""" + timer = timing.Timer() + timer.start() + time.sleep(0.01) # Sleep for 10ms + timer.stop() + + assert timer.elapsed_ns > 0 + assert timer.elapsed_us > 0 + assert timer.elapsed_ms >= 0 # Might be 0 for very short times + + def test_timer_reset(self) -> None: + """Test timer reset functionality.""" + timer = timing.Timer() + timer.start() + time.sleep(0.01) + timer.stop() + + initial_elapsed = timer.elapsed_ns + assert initial_elapsed > 0 + + timer.reset() + # After reset, elapsed time should be minimal + assert timer.elapsed_ns < initial_elapsed + + def test_timer_properties(self) -> None: + """Test timer property accessors.""" + timer = timing.Timer() + timer.start() + time.sleep(0.01) + timer.stop() + + # Test all time unit properties + ns = timer.elapsed_ns + us = timer.elapsed_us + ms = timer.elapsed_ms + s = timer.elapsed_s + + assert ns > 0 + assert us > 0 + assert ns >= us # ns should be >= us (1000x conversion) + assert us >= ms # us should be >= ms (1000x conversion) + assert ms >= s # ms should be >= s (1000x conversion) + + def test_timer_string_representation(self) -> None: + """Test timer string representation.""" + timer = timing.Timer() + timer.start() + time.sleep(0.001) + timer.stop() + + elapsed_str = timer.elapsed_string + str_repr = str(timer) + + assert isinstance(elapsed_str, str) + assert isinstance(str_repr, str) + assert elapsed_str == str_repr + + def test_timer_context_manager(self) -> None: + """Test timer as context manager.""" + with timing.Timer() as timer: + time.sleep(0.01) + + assert timer.elapsed_ns > 0 + + def test_timer_multiple_measurements(self) -> None: + """Test multiple measurements with same timer.""" + timer = timing.Timer() + + # First measurement + timer.start() + time.sleep(0.005) + timer.stop() + first_elapsed = timer.elapsed_ns + + # Reset and second measurement + timer.reset() + timer.start() + time.sleep(0.01) + timer.stop() + second_elapsed = timer.elapsed_ns + + assert first_elapsed > 0 + assert second_elapsed > 0 + assert second_elapsed > first_elapsed + + +class TestMeasureTime: + """Test measure_time context manager.""" + + def test_measure_time_basic(self, capsys) -> None: + """Test basic measure_time usage.""" + with timing.measure_time('test operation') as timer: + time.sleep(0.01) + + assert timer.elapsed_ns > 0 + + # Check that start/finish messages were printed + captured = capsys.readouterr() + assert 'Starting: test operation' in captured.out + assert 'Finished test operation:' in captured.out + + def test_measure_time_no_name(self) -> None: + """Test measure_time without name.""" + with timing.measure_time() as timer: + time.sleep(0.005) + + assert timer.elapsed_ns > 0 + + def test_measure_time_with_exception(self, capsys) -> None: + """Test measure_time with exception.""" + try: + with timing.measure_time('failing operation') as timer: + time.sleep(0.005) + raise ValueError('Test error') + except ValueError: + pass + + # Timer should still have measured time + assert timer.elapsed_ns > 0 + + # Finish message should still be printed + captured = capsys.readouterr() + assert 'Finished failing operation:' in captured.out + + +class TestBenchmark: + """Test Benchmark functionality.""" + + def test_benchmark_creation(self) -> None: + """Test benchmark creation.""" + benchmark = timing.Benchmark('test_benchmark') + assert benchmark.name == 'test_benchmark' + assert benchmark.measurements == [] + + def test_benchmark_default_name(self) -> None: + """Test benchmark with default name.""" + benchmark = timing.Benchmark() + assert benchmark.name == 'Benchmark' + + def test_benchmark_run_simple(self) -> None: + """Test running simple benchmark.""" + + def simple_function() -> int: + return sum(range(100)) + + benchmark = timing.Benchmark('sum_test') + stats = benchmark.run(simple_function, iterations=5) + + assert stats['name'] == 'sum_test' + assert stats['iterations'] == 5 + assert stats['total_time_ms'] > 0 + assert stats['mean_ms'] > 0 + assert stats['min_ms'] >= 0 + assert stats['max_ms'] >= stats['min_ms'] + assert len(stats['measurements_ns']) == 5 + + def test_benchmark_statistics(self) -> None: + """Test benchmark statistics calculation.""" + + def test_function() -> None: + time.sleep(0.001) # 1ms sleep + + benchmark = timing.Benchmark('sleep_test') + stats = benchmark.run(test_function, iterations=3) + + # Verify statistical measures + assert 'mean_ms' in stats + assert 'median_ms' in stats + assert 'stdev_ms' in stats + assert 'human_readable' in stats + + # Check human readable format + human = stats['human_readable'] + assert 'mean' in human + assert 'min' in human + assert 'max' in human + + def test_benchmark_get_statistics_empty(self) -> None: + """Test getting statistics from empty benchmark.""" + benchmark = timing.Benchmark() + stats = benchmark.get_statistics() + + assert stats == {} + + def test_benchmark_multiple_runs(self) -> None: + """Test multiple benchmark runs.""" + + def test_function() -> int: + return len([x for x in range(50)]) + + benchmark = timing.Benchmark('multi_test') + + # First run + stats1 = benchmark.run(test_function, iterations=3) + assert len(stats1['measurements_ns']) == 3 + + # Second run (should clear previous measurements) + stats2 = benchmark.run(test_function, iterations=2) + assert len(stats2['measurements_ns']) == 2 + assert len(benchmark.measurements) == 2 + + def test_benchmark_compare_with(self) -> None: + """Test comparing benchmarks.""" + + def fast_function() -> int: + return 42 + + def slow_function() -> int: + return sum(range(1000)) + + fast_bench = timing.Benchmark('fast') + slow_bench = timing.Benchmark('slow') + + fast_bench.run(fast_function, iterations=5) + slow_bench.run(slow_function, iterations=5) + + comparison = fast_bench.compare_with(slow_bench) + + assert 'benchmarks' in comparison + assert comparison['benchmarks'] == ['fast', 'slow'] + assert 'ratio' in comparison + assert 'faster' in comparison + assert 'speedup' in comparison + assert 'difference_ms' in comparison + + # Fast function should be faster + assert comparison['faster'] == 'fast' + assert comparison['speedup'] >= 1.0 + + def test_benchmark_compare_error(self) -> None: + """Test benchmark comparison with no measurements.""" + benchmark1 = timing.Benchmark('empty1') + benchmark2 = timing.Benchmark('empty2') + + with pytest.raises(ValueError, match='Both benchmarks must have measurements'): + benchmark1.compare_with(benchmark2) + + +class TestBenchmarkFunction: + """Test benchmark_function utility.""" + + def test_benchmark_function_basic(self) -> None: + """Test basic benchmark_function usage.""" + + def test_func() -> list[int]: + return [x * x for x in range(10)] + + stats = timing.benchmark_function(test_func, iterations=3) + + assert stats['name'] == 'test_func' + assert stats['iterations'] == 3 + assert stats['mean_ms'] >= 0 + + def test_benchmark_function_with_name(self) -> None: + """Test benchmark_function with custom name.""" + + def test_func() -> int: + return 42 + + stats = timing.benchmark_function(test_func, iterations=2, name='custom_name') + + assert stats['name'] == 'custom_name' + assert stats['iterations'] == 2 + + def test_benchmark_function_lambda(self) -> None: + """Test benchmark_function with lambda.""" + stats = timing.benchmark_function(lambda: sum(range(50)), iterations=3) + + assert stats['name'] == '' + assert stats['iterations'] == 3 + + +class TestTimeFunction: + """Test time_function utility.""" + + def test_time_function_basic(self) -> None: + """Test basic time_function usage.""" + + def test_func() -> str: + return 'hello' * 100 + + elapsed_str = timing.time_function(test_func) + + assert isinstance(elapsed_str, str) + # Should contain time unit (ns, us, ms, or s) + assert any(unit in elapsed_str for unit in ['ns', 'us', 'ms', 's']) + + def test_time_function_lambda(self) -> None: + """Test time_function with lambda.""" + elapsed_str = timing.time_function(lambda: [x for x in range(100)]) + + assert isinstance(elapsed_str, str) + + def test_time_function_different_complexities(self) -> None: + """Test time_function with different complexity functions.""" + + def simple_func() -> int: + return 1 + 1 + + def complex_func() -> int: + return sum(x * x for x in range(1000)) + + simple_time = timing.time_function(simple_func) + complex_time = timing.time_function(complex_func) + + # Both should return valid time strings + assert isinstance(simple_time, str) + assert isinstance(complex_time, str) + + +class TestTimingIntegration: + """Integration tests for timing functionality.""" + + def test_timer_benchmark_integration(self) -> None: + """Test using Timer within benchmark context.""" + + def timed_operation() -> None: + with timing.Timer() as timer: + time.sleep(0.001) + return timer.elapsed_ns + + benchmark = timing.Benchmark('timer_integration') + stats = benchmark.run(timed_operation, iterations=3) + + assert stats['iterations'] == 3 + assert stats['mean_ms'] > 0 + + def test_multiple_timing_methods_comparison(self) -> None: + """Test comparing different timing methods.""" + + def test_operation() -> list[int]: + return sorted([x for x in range(100, 0, -1)]) + + # Method 1: Direct timing + elapsed_direct = timing.time_function(test_operation) + + # Method 2: Benchmark with single iteration + benchmark_stats = timing.benchmark_function(test_operation, iterations=1) + + # Method 3: Manual timer + with timing.Timer() as manual_timer: + test_operation() + + # All methods should return valid results + assert isinstance(elapsed_direct, str) + assert benchmark_stats['iterations'] == 1 + assert manual_timer.elapsed_ns > 0 + + def test_performance_measurement_workflow(self) -> None: + """Test complete performance measurement workflow.""" + + # Define test functions with different performance characteristics + def linear_operation(n: int = 100) -> int: + return sum(range(n)) + + def quadratic_operation(n: int = 50) -> int: + return sum(i * j for i in range(n) for j in range(n)) + + # Benchmark both operations + linear_bench = timing.Benchmark('linear') + quadratic_bench = timing.Benchmark('quadratic') + + linear_stats = linear_bench.run(linear_operation, iterations=5) + quadratic_stats = quadratic_bench.run(quadratic_operation, iterations=5) + + # Compare performance + comparison = linear_bench.compare_with(quadratic_bench) + + # Linear should generally be faster than quadratic + assert comparison['faster'] == 'linear' + assert comparison['speedup'] > 1.0 + + # Verify statistics structure + for stats in [linear_stats, quadratic_stats]: + assert 'mean_ms' in stats + assert 'human_readable' in stats + assert len(stats['measurements_ns']) == 5 + + def test_timing_with_exceptions(self) -> None: + """Test timing behavior with exceptions.""" + + def failing_function() -> None: + time.sleep(0.001) + raise RuntimeError('Intentional failure') + + # Timer should still measure time even if function fails + try: + with timing.Timer() as timer: + failing_function() + except RuntimeError: + pass + + assert timer.elapsed_ns > 0 + + # measure_time should also handle exceptions + try: + with timing.measure_time('failing_op') as timer: + failing_function() + except RuntimeError: + pass + + assert timer.elapsed_ns > 0 + + def test_precision_and_accuracy(self) -> None: + """Test timing precision and accuracy.""" + + # Test very short operations + def micro_operation() -> int: + return 1 + 1 + + # Test multiple measurements for consistency + measurements = [] + for _ in range(10): + with timing.Timer() as timer: + micro_operation() + measurements.append(timer.elapsed_ns) + + # All measurements should be positive + assert all(m > 0 for m in measurements) + + # Test longer operation for comparison + def longer_operation() -> list[int]: + return [x * x for x in range(1000)] + + with timing.Timer() as long_timer: + longer_operation() + + # Longer operation should take more time than micro operation + assert long_timer.elapsed_ns > min(measurements) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index c78d224..dd939b6 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,6 +1,6 @@ # Tests CMakeLists.txt -# Check if Catch2 is available +# Ensure Catch2 is available if(NOT TARGET Catch2::Catch2WithMain) if(NOT COMMAND catch_discover_tests) message( diff --git a/tests/test_concepts.cpp b/tests/test_concepts.cpp index a92c6de..5c41ad9 100644 --- a/tests/test_concepts.cpp +++ b/tests/test_concepts.cpp @@ -198,6 +198,75 @@ TEST_CASE("TimerCallback concept", "[concepts][callable][timer]") { } } +template Func> +auto TestTransformFunction(const Input &input, const Func &transform) -> Output { + return transform(input); +} + +TEST_CASE("TransformFunction concept", "[concepts][callable][transform]") { + SECTION("Valid transform functions") { + // Lambda functions + REQUIRE(TransformFunction); + REQUIRE(TransformFunction(n); }), int, double>); + REQUIRE(TransformFunction); + REQUIRE(TransformFunction); + + // Function pointers + REQUIRE(TransformFunction); + REQUIRE(TransformFunction); + REQUIRE(TransformFunction); + + // std::function + REQUIRE(TransformFunction, int, int>); + REQUIRE(TransformFunction, int, double>); + REQUIRE(TransformFunction, int, std::string>); + + // Test actual usage + auto square = [](int n) { return n * n; }; + auto to_double = [](int n) { return static_cast(n); }; + auto to_string = [](int n) { return std::to_string(n); }; + auto string_length = [](const std::string &s) { return s.length(); }; + + std::function func_square = [](int n) { return n * n; }; + std::function func_to_string = [](int n) { return std::to_string(n); }; + + REQUIRE(TestTransformFunction(5, square) == 25); + REQUIRE(TestTransformFunction(42, to_double) == 42.0); + REQUIRE(TestTransformFunction(123, to_string) == "123"); + REQUIRE(TestTransformFunction("hello", string_length) == 5); + REQUIRE(TestTransformFunction(7, func_square) == 49); + REQUIRE(TestTransformFunction(456, func_to_string) == "456"); + } + + SECTION("Invalid transform functions") { + // Wrong input type + REQUIRE_FALSE(TransformFunction); + REQUIRE_FALSE(TransformFunction); + + // Wrong output type (not convertible) + struct NonConvertible {}; + REQUIRE_FALSE(TransformFunction); + REQUIRE_FALSE(TransformFunction{}; }), int, int>); + + // Wrong arity + REQUIRE_FALSE(TransformFunction); + REQUIRE_FALSE(TransformFunction); + + // Not callable + REQUIRE_FALSE(TransformFunction); + REQUIRE_FALSE(TransformFunction); + REQUIRE_FALSE(TransformFunction); + + // Function pointers with wrong signatures + REQUIRE_FALSE(TransformFunction); + REQUIRE_FALSE(TransformFunction); + REQUIRE_FALSE(TransformFunction); + } +} + template Predicate> auto TestPredicateFor(const T &value, Predicate predicate) -> bool { return predicate(value);