diff --git a/.github/workflows/ci-develop.yml b/.github/workflows/ci-develop.yml index edcd143..480f075 100644 --- a/.github/workflows/ci-develop.yml +++ b/.github/workflows/ci-develop.yml @@ -55,6 +55,8 @@ jobs: # Unit-tests only (exclude integration markers) - name: Run unit tests + env: + ENVIRONMENT: test run: poetry run pytest -m "not integration" --disable-warnings diff --git a/.gitignore b/.gitignore index 56d248a..58026c9 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,8 @@ pip-wheel-metadata/ venv/ ENV/ env/ +docker_fs/.env +docker_fs/.env.* # Poetry-specific .cache/pypoetry/ diff --git a/docker_fs/.env.dev b/docker_fs/.env.dev deleted file mode 100644 index 911bc85..0000000 --- a/docker_fs/.env.dev +++ /dev/null @@ -1,11 +0,0 @@ -DB_HOST=db -DB_PORT=5432 -DB_NAME=project_backend_dev -DB_USER=dev_user -DB_PASSWORD=dev_pass -DB_URL=postgresql+asyncpg://dev_user:dev_pass@db:5432/project_backend_dev - -PGADMIN_DEFAULT_EMAIL=admin@example.com -PGADMIN_DEFAULT_PASSWORD=secret - -ENVIRONMENT=development \ No newline at end of file diff --git a/docker_fs/.env.test b/docker_fs/.env.test deleted file mode 100644 index 2ae2f2e..0000000 --- a/docker_fs/.env.test +++ /dev/null @@ -1,12 +0,0 @@ - -ENVIRONMENT=test - - -DB_HOST=db -DB_PORT=5432 -DB_USER=postgres -DB_PASSWORD=testpassword -DB_NAME=test_db - - -DB_URL=postgresql+asyncpg://postgres:testpassword@db:5432/test_db diff --git a/DEV_WORKFLOW_GUIDE.md b/documentation/DEV_WORKFLOW_GUIDE.md similarity index 70% rename from DEV_WORKFLOW_GUIDE.md rename to documentation/DEV_WORKFLOW_GUIDE.md index 3ae74de..c08cea4 100644 --- a/DEV_WORKFLOW_GUIDE.md +++ b/documentation/DEV_WORKFLOW_GUIDE.md @@ -1,6 +1,6 @@ # **Development Workflow & Architecture Guide** -This document outlines the standardized development workflow, repository architecture, and branching strategy for this project. Adhering to these guidelines ensures consistency, maintainability, and a scalable development process. +This document outlines the standardized development workflow, repository architecture, and branching strategy for the backend of the FastSim project. Adhering to these guidelines ensures consistency, maintainability, and a scalable development process. ## 1. Technology Stack @@ -13,13 +13,7 @@ The project is built upon the following core technologies: - **Caching**: Redis - **Containerization**: Docker -## 2. Architectural Overview: A Multi-Repo Strategy - -To promote scalability, team autonomy, and clear separation of concerns, this project adopts a **multi-repo architecture**. Each core component of the system resides in its own dedicated repository. This approach allows for independent development cycles, testing, and deployment. - -Our architecture is composed of three main repositories: - -### 2.1. Backend Service (`project-backend`) +### 2.1. Backend Service (`FastSim-backend`) This repository contains all code related to the FastAPI backend service. Its primary responsibility is to handle business logic, interact with the database, and expose a RESTful API. @@ -61,69 +55,6 @@ project-backend/ * To be testable in isolation. * To produce a versioned Docker image (`backend:`) as its main artifact. -### 2.2. Frontend Service (`project-frontend`) - -This repository contains all code for the React web application. Its responsibility is to provide the user interface and interact with the backend via its API. - -**Folder Structure:** -``` -project-frontend/ -├── .github/ -│ └── workflows/ -│ └── main.yml # CI: Tests and publishes the frontend Docker image -├── public/ # Static assets (index.html, favicon, etc.) -├── src/ # Application source code -│ ├── api/ # Functions for backend API calls -│ ├── components/ # Reusable UI components -│ ├── hooks/ # Custom React hooks -│ ├── mocks/ # Service worker mocks for API (for isolated testing) -│ ├── pages/ # Page components -│ └── index.js # React application entrypoint -├── .env.example -├── .gitignore -├── docker-compose.yml # Base local development definition -├── Dockerfile # Multi-stage build for a lean production image -├── package.json -├── package-lock.json -└── README.md # Setup instructions for the frontend service -``` - -**Key Responsibilities:** -* To be testable in isolation (using a mocked API). -* To produce a versioned, production-ready Docker image (`frontend:`), typically served by Nginx. - -### 2.3. Infrastructure & E2E Tests (`project-master`) - -This repository is the "glue" that holds the system together. It does not contain application code but rather the configuration to orchestrate, test, and deploy the entire system. - -**Folder Structure:** -``` -project-master/ -├── .github/ -│ └── workflows/ -│ ├── e2e-tests.yml # CI: Runs End-to-End tests on a complete stack -│ └── deploy.yml # CD: Handles deployment to environments -├── e2e-tests/ # End-to-End test suite (e.g., Cypress, Playwright) -│ ├── cypress/ # Test code -│ └── cypress.config.js -├── environments/ # Environment-specific configurations -│ ├── staging/ -│ │ ├── docker-compose.yml # Docker Compose file for the Staging environment -│ │ └── .env.example -│ └── production/ -│ ├── docker-compose.yml # Docker Compose file for the Production environment -│ └── .env.example -├── scripts/ # Deployment and utility scripts -│ ├── deploy-staging.sh -│ └── deploy-prod.sh -└── README.md # Main project README: explains the overall architecture -``` - -**Key Responsibilities:** -* To define the composition of services for each environment (Staging, Production). -* To run End-to-End tests that validate the integration between services. -* To manage the Continuous Deployment (CD) process. - ## 3. Branching Strategy: Git Flow To manage code development and releases in a structured manner, we use the **Git Flow** branching model. diff --git a/documentation/tests_documentation/integration_tests/test_sampler_helper.md b/documentation/tests_documentation/integration_tests/test_sampler_helper.md new file mode 100644 index 0000000..9084ed8 --- /dev/null +++ b/documentation/tests_documentation/integration_tests/test_sampler_helper.md @@ -0,0 +1,50 @@ +Below is a guided walkthrough of **`tests/unit/simulation/test_sampler_helper.py`**, explaining core ideas and each test’s intent. + +--- + +## File purpose + +This file verifies that your three helper functions— + +* `uniform_variable_generator` +* `poisson_variable_generator` +* `truncated_gaussian_generator` + +—correctly delegate to whatever RNG you pass in, and fall back to NumPy’s default RNG when you don’t provide one. + +--- + +## Key testing patterns + +1. **Dependency injection via `rng`** + Each helper takes an `rng` parameter. In production you’ll pass a `np.random.Generator`; in tests we inject a **`DummyRNG`** with predictable outputs to make our tests **deterministic**. + +2. **Duck typing** + Python doesn’t require `rng` to be a specific class—only that it implements the required methods (`random()`, `poisson(mean)`, `normal(mean, sigma)`). Our `DummyRNG` simply implements those three methods. + +3. **`typing.cast` for static typing** + We wrap `DummyRNG` instances in `cast("np.random.Generator", DummyRNG(...))` so mypy sees them as satisfying the generator type, but at runtime they remain our dummy. + +--- + +## Test-by-test breakdown + +| Test name | What it checks | +| -------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| **`test_uniform_variable_generator_with_dummy_rng`** | Passing a `DummyRNG(uniform_value=0.75)`, `rng.random()` returns 0.75 → helper must return exactly 0.75. | +| **`test_uniform_variable_generator_default_rng_range`** | Without supplying `rng`, the helper uses `default_rng()`. We call it 100× to ensure it always returns a `float` in \[0.0, 1.0). | +| **`test_poisson_variable_generator_with_dummy_rng`** | With `DummyRNG(poisson_value=3)`, `rng.poisson(mean)` yields 3 → helper returns 3. | +| **`test_poisson_variable_generator_reproducible`** | Two NumPy generators created with the same seed (`12345`) must produce the same Poisson sample for `mean=10.0`. | +| **`test_truncated_gaussian_generator_truncates_negative`** | `DummyRNG(normal_value=-2.7)` forces a negative draw: helper must clamp it to **0**. | +| **`test_truncated_gaussian_generator_truncates_toward_zero`** | `DummyRNG(normal_value=3.9)` forces a positive draw: helper must cast/round toward zero (int(3.9) → **3**). | +| **`test_truncated_gaussian_generator_default_rng_non_negative_int`** | With a real seeded RNG, helper must produce **some** non-negative `int` (verifies default fallback path is valid). | + +--- + +## Why this matters + +* **Deterministic behavior**: by forcing the RNG’s output via `DummyRNG`, we can assert exactly how our helpers transform that value (clamping, rounding, type conversion). +* **Fallbacks work**: tests with **no** `rng` verify that calling `default_rng()` still gives valid outputs of the correct type and range. +* **Type safety**: using `cast(...)` silences mypy errors while still executing our dummy logic at runtime—ensuring we meet both static‐typing and functional correctness goals. + +With this suite, you have **full confidence** that your sampling helpers behave correctly under both controlled (dummy) and uncontrolled (default) RNG conditions. diff --git a/documentation/tests_documentation/integration_tests/test_simulation_input.md b/documentation/tests_documentation/integration_tests/test_simulation_input.md new file mode 100644 index 0000000..f555571 --- /dev/null +++ b/documentation/tests_documentation/integration_tests/test_simulation_input.md @@ -0,0 +1,93 @@ +Below an explanation for the unit tests in the file `tests/unit/simulation/test_simulation_input.py` + +### 1. `test_normal_sets_variance_to_mean` + +**Purpose:** +Checks that when you create an `RVConfig` with `distribution="normal"` and omit the `variance` field, the model automatically sets `variance = mean`. + +* Verifies the “default variance” logic in the post‐init validator. + +--- + +### 2. `test_poisson_keeps_variance_none` + +**Purpose:** +Ensures that if you choose the Poisson distribution (`distribution="poisson"`) and do **not** supply a variance, the model **does not** fill in any default variance (keeps it `None`). + +* Confirms that defaulting only applies to “normal”/“gaussian,” not to Poisson. + +--- + +### 3. `test_explicit_variance_is_preserved` + +**Purpose:** +Validates that if you explicitly pass a `variance` value—even for a distribution that would normally default—it remains exactly what you provided, and is coerced to float. + +* Guards against accidental overwriting of user‐supplied variance. + +--- + +### 4. `test_mean_must_be_numeric` + +**Purpose:** +Verifies that giving a non‐numeric `mean` (e.g. a string) raises a `ValidationError` with our custom message `"mean must be a number"`. + +* Tests the “before” validator on the `mean` field for type checking and coercion. + +--- + +### 5. `test_missing_mean_field` + +**Purpose:** +Ensures that completely omitting the `mean` key triggers a standard “field required” error. + +* Confirms that `mean` is mandatory in the schema. + +--- + +### 6. `test_gaussian_sets_variance_to_mean` + +**Purpose:** +Exactly like the “normal” test above, but for `distribution="gaussian"`. + +* Demonstrates that “gaussian” is treated as an alias for “normal” in the default‐variance logic. + +--- + +### 7. `test_default_distribution_is_poisson` + +**Purpose:** +Checks two things simultaneously: + +1. When you omit `distribution`, it defaults to `"poisson"`. +2. In that default‐poisson case, `variance` remains `None`. + +* Validates both the default distribution and its variance behavior in one test. + +--- + +### 8. `test_explicit_variance_kept_for_poisson` + +**Purpose:** +Confirms that even if you supply a `variance` when `distribution="poisson"`, the model preserves it rather than discarding it or forcing it back to `None`. + +* Provides symmetry to the “explicit variance” test for non‐Poisson cases. + +--- + +### 9. `test_invalid_distribution_raises` + +**Purpose:** +Ensures that passing a value for `distribution` outside of the allowed literals (`"poisson"`, `"normal"`, `"gaussian"`) results in a `ValidationError`. + +* Confirms that the `Literal[...]` constraint on `distribution` is enforced. + +--- + +With these nine tests you fully cover: + +1. **Defaulting behavior** for both “normal” and “gaussian.” +2. **No‐op behavior** for Poisson defaults. +3. **Preservation** of explicit user input. +4. **Type‐checking** on required fields. +5. **Literal‐constraint** enforcement. diff --git a/poetry.lock b/poetry.lock index de1465a..b959392 100644 --- a/poetry.lock +++ b/poetry.lock @@ -661,6 +661,66 @@ files = [ {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, ] +[[package]] +name = "numpy" +version = "2.3.1" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.11" +files = [ + {file = "numpy-2.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6ea9e48336a402551f52cd8f593343699003d2353daa4b72ce8d34f66b722070"}, + {file = "numpy-2.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ccb7336eaf0e77c1635b232c141846493a588ec9ea777a7c24d7166bb8533ae"}, + {file = "numpy-2.3.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:0bb3a4a61e1d327e035275d2a993c96fa786e4913aa089843e6a2d9dd205c66a"}, + {file = "numpy-2.3.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:e344eb79dab01f1e838ebb67aab09965fb271d6da6b00adda26328ac27d4a66e"}, + {file = "numpy-2.3.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:467db865b392168ceb1ef1ffa6f5a86e62468c43e0cfb4ab6da667ede10e58db"}, + {file = "numpy-2.3.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:afed2ce4a84f6b0fc6c1ce734ff368cbf5a5e24e8954a338f3bdffa0718adffb"}, + {file = "numpy-2.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0025048b3c1557a20bc80d06fdeb8cc7fc193721484cca82b2cfa072fec71a93"}, + {file = "numpy-2.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a5ee121b60aa509679b682819c602579e1df14a5b07fe95671c8849aad8f2115"}, + {file = "numpy-2.3.1-cp311-cp311-win32.whl", hash = "sha256:a8b740f5579ae4585831b3cf0e3b0425c667274f82a484866d2adf9570539369"}, + {file = "numpy-2.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:d4580adadc53311b163444f877e0789f1c8861e2698f6b2a4ca852fda154f3ff"}, + {file = "numpy-2.3.1-cp311-cp311-win_arm64.whl", hash = "sha256:ec0bdafa906f95adc9a0c6f26a4871fa753f25caaa0e032578a30457bff0af6a"}, + {file = "numpy-2.3.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2959d8f268f3d8ee402b04a9ec4bb7604555aeacf78b360dc4ec27f1d508177d"}, + {file = "numpy-2.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:762e0c0c6b56bdedfef9a8e1d4538556438288c4276901ea008ae44091954e29"}, + {file = "numpy-2.3.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:867ef172a0976aaa1f1d1b63cf2090de8b636a7674607d514505fb7276ab08fc"}, + {file = "numpy-2.3.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:4e602e1b8682c2b833af89ba641ad4176053aaa50f5cacda1a27004352dde943"}, + {file = "numpy-2.3.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:8e333040d069eba1652fb08962ec5b76af7f2c7bce1df7e1418c8055cf776f25"}, + {file = "numpy-2.3.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:e7cbf5a5eafd8d230a3ce356d892512185230e4781a361229bd902ff403bc660"}, + {file = "numpy-2.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f1b8f26d1086835f442286c1d9b64bb3974b0b1e41bb105358fd07d20872952"}, + {file = "numpy-2.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ee8340cb48c9b7a5899d1149eece41ca535513a9698098edbade2a8e7a84da77"}, + {file = "numpy-2.3.1-cp312-cp312-win32.whl", hash = "sha256:e772dda20a6002ef7061713dc1e2585bc1b534e7909b2030b5a46dae8ff077ab"}, + {file = "numpy-2.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:cfecc7822543abdea6de08758091da655ea2210b8ffa1faf116b940693d3df76"}, + {file = "numpy-2.3.1-cp312-cp312-win_arm64.whl", hash = "sha256:7be91b2239af2658653c5bb6f1b8bccafaf08226a258caf78ce44710a0160d30"}, + {file = "numpy-2.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:25a1992b0a3fdcdaec9f552ef10d8103186f5397ab45e2d25f8ac51b1a6b97e8"}, + {file = "numpy-2.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7dea630156d39b02a63c18f508f85010230409db5b2927ba59c8ba4ab3e8272e"}, + {file = "numpy-2.3.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:bada6058dd886061f10ea15f230ccf7dfff40572e99fef440a4a857c8728c9c0"}, + {file = "numpy-2.3.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:a894f3816eb17b29e4783e5873f92faf55b710c2519e5c351767c51f79d8526d"}, + {file = "numpy-2.3.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:18703df6c4a4fee55fd3d6e5a253d01c5d33a295409b03fda0c86b3ca2ff41a1"}, + {file = "numpy-2.3.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:5902660491bd7a48b2ec16c23ccb9124b8abfd9583c5fdfa123fe6b421e03de1"}, + {file = "numpy-2.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:36890eb9e9d2081137bd78d29050ba63b8dab95dff7912eadf1185e80074b2a0"}, + {file = "numpy-2.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a780033466159c2270531e2b8ac063704592a0bc62ec4a1b991c7c40705eb0e8"}, + {file = "numpy-2.3.1-cp313-cp313-win32.whl", hash = "sha256:39bff12c076812595c3a306f22bfe49919c5513aa1e0e70fac756a0be7c2a2b8"}, + {file = "numpy-2.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:8d5ee6eec45f08ce507a6570e06f2f879b374a552087a4179ea7838edbcbfa42"}, + {file = "numpy-2.3.1-cp313-cp313-win_arm64.whl", hash = "sha256:0c4d9e0a8368db90f93bd192bfa771ace63137c3488d198ee21dfb8e7771916e"}, + {file = "numpy-2.3.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:b0b5397374f32ec0649dd98c652a1798192042e715df918c20672c62fb52d4b8"}, + {file = "numpy-2.3.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c5bdf2015ccfcee8253fb8be695516ac4457c743473a43290fd36eba6a1777eb"}, + {file = "numpy-2.3.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d70f20df7f08b90a2062c1f07737dd340adccf2068d0f1b9b3d56e2038979fee"}, + {file = "numpy-2.3.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:2fb86b7e58f9ac50e1e9dd1290154107e47d1eef23a0ae9145ded06ea606f992"}, + {file = "numpy-2.3.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:23ab05b2d241f76cb883ce8b9a93a680752fbfcbd51c50eff0b88b979e471d8c"}, + {file = "numpy-2.3.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:ce2ce9e5de4703a673e705183f64fd5da5bf36e7beddcb63a25ee2286e71ca48"}, + {file = "numpy-2.3.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c4913079974eeb5c16ccfd2b1f09354b8fed7e0d6f2cab933104a09a6419b1ee"}, + {file = "numpy-2.3.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:010ce9b4f00d5c036053ca684c77441f2f2c934fd23bee058b4d6f196efd8280"}, + {file = "numpy-2.3.1-cp313-cp313t-win32.whl", hash = "sha256:6269b9edfe32912584ec496d91b00b6d34282ca1d07eb10e82dfc780907d6c2e"}, + {file = "numpy-2.3.1-cp313-cp313t-win_amd64.whl", hash = "sha256:2a809637460e88a113e186e87f228d74ae2852a2e0c44de275263376f17b5bdc"}, + {file = "numpy-2.3.1-cp313-cp313t-win_arm64.whl", hash = "sha256:eccb9a159db9aed60800187bc47a6d3451553f0e1b08b068d8b277ddfbb9b244"}, + {file = "numpy-2.3.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ad506d4b09e684394c42c966ec1527f6ebc25da7f4da4b1b056606ffe446b8a3"}, + {file = "numpy-2.3.1-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:ebb8603d45bc86bbd5edb0d63e52c5fd9e7945d3a503b77e486bd88dde67a19b"}, + {file = "numpy-2.3.1-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:15aa4c392ac396e2ad3d0a2680c0f0dee420f9fed14eef09bdb9450ee6dcb7b7"}, + {file = "numpy-2.3.1-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c6e0bf9d1a2f50d2b65a7cf56db37c095af17b59f6c132396f7c6d5dd76484df"}, + {file = "numpy-2.3.1-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:eabd7e8740d494ce2b4ea0ff05afa1b7b291e978c0ae075487c51e8bd93c0c68"}, + {file = "numpy-2.3.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:e610832418a2bc09d974cc9fecebfa51e9532d6190223bc5ef6a7402ebf3b5cb"}, + {file = "numpy-2.3.1.tar.gz", hash = "sha256:1ec9ae20a4226da374362cca3c62cd753faf2f951440b0e3b98e93c235441d2b"}, +] + [[package]] name = "packaging" version = "25.0" @@ -1127,6 +1187,17 @@ files = [ {file = "ruff-0.12.1.tar.gz", hash = "sha256:806bbc17f1104fd57451a98a58df35388ee3ab422e029e8f5cf30aa4af2c138c"}, ] +[[package]] +name = "simpy" +version = "4.1.1" +description = "Event discrete, process based simulation for Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "simpy-4.1.1-py3-none-any.whl", hash = "sha256:7c5ae380240fd2238671160e4830956f8055830a8317edf5c05e495b3823cd88"}, + {file = "simpy-4.1.1.tar.gz", hash = "sha256:06d0750a7884b11e0e8e20ce0bc7c6d4ed5f1743d456695340d13fdff95001a6"}, +] + [[package]] name = "sniffio" version = "1.3.1" @@ -1589,4 +1660,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "^3.12" -content-hash = "387c837941fad4e69a6a11d22f94c0b0ac3f5d2688fe196b31a9beb3a00b3dbe" +content-hash = "71f5d3bb002c33ab3bad9d5e9efadc144b033c272b4319c2686728026dd5d849" diff --git a/pyproject.toml b/pyproject.toml index 2d76c41..f5fc34a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] -name = "project-backend" +name = "FastSim-backend" version = "0.1.0" -description = "solo professional project" +description = "Simulate fastapi event loop to manage resources" authors = ["Gioele Botta"] readme = "README.md" @@ -20,6 +20,8 @@ pydantic-settings = "^2.10.1" pydantic = {extras = ["email"], version = "^2.11.7"} asyncpg = "^0.30.0" sqlalchemy-utils = "^0.41.2" +numpy = "^2.3.1" +simpy = "^4.1.1" [tool.poetry.group.dev.dependencies] pytest = "^8.4.1" diff --git a/scripts/quality-check.sh b/scripts/quality-check.sh new file mode 100644 index 0000000..eb2a66f --- /dev/null +++ b/scripts/quality-check.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Lint & format with ruff, automatic corrections applied (--fix) +poetry run ruff check src tests --fix + +# Type‐check with mypy +poetry run mypy src tests + +echo "✅ Linting and type‐checking completed SUCCESSFULLY" \ No newline at end of file diff --git a/src/app/api/simulation.py b/src/app/api/simulation.py new file mode 100644 index 0000000..55081c6 --- /dev/null +++ b/src/app/api/simulation.py @@ -0,0 +1,18 @@ +""""Api to simulate the process""" + +import numpy as np +from fastapi import APIRouter + +from app.core.simulation.simulation_run import run_simulation +from app.schemas.simulation_input import SimulationInput +from app.schemas.simulation_output import SimulationOutput + +router = APIRouter() + +@router.post("/simulation") +async def event_loop_simulation(input_data: SimulationInput) -> SimulationOutput: + """Run the simulation and return aggregate KPIs.""" + rng = np.random.default_rng() + return run_simulation(input_data, rng=rng) + + diff --git a/src/app/config/constants.py b/src/app/config/constants.py index 14ed238..fb0c231 100644 --- a/src/app/config/constants.py +++ b/src/app/config/constants.py @@ -1,4 +1,11 @@ """Application constants and configuration values.""" +from enum import IntEnum +class TimeDefaults(IntEnum): + """Default time-related constants (all in seconds).""" + + MIN_TO_SEC = 60 # 1 minute → 60 s + SAMPLING_WINDOW = 60 # keep U(t) constant for 60 s + SIMULATION_HORIZON = 3_600 # run 1 h if user gives no other value diff --git a/src/app/core/auth_helpers.py b/src/app/core/auth_helpers.py deleted file mode 100644 index 4a3437a..0000000 --- a/src/app/core/auth_helpers.py +++ /dev/null @@ -1,62 +0,0 @@ -"""helper functions for the auth process""" - -import string -from typing import cast - -from passlib.context import CryptContext - -MAX_PASSWORD_LENGTH = 128 # prevent DoS -MIN_PASSWORD_LENGTH = 8 -SPECIAL_CHARS = set(string.punctuation) - -pwd_context = CryptContext( - schemes=["argon2"], - deprecated="auto", - -) - -def is_password_safe(password: str) -> bool: - """Function to verify if a password is safe""" - has_upper = False - has_lower = False - has_digit = False - has_special = False - - if len(password) < MIN_PASSWORD_LENGTH or len(password) > MAX_PASSWORD_LENGTH: - return False - - for char in password: - if char.isupper(): - has_upper = True - elif char.islower(): - has_lower = True - elif char.isdigit(): - has_digit = True - elif char in SPECIAL_CHARS: - has_special = True - - if has_upper and has_lower and has_digit and has_special: - return True - - return False - - -def verify_passwords_equality(password: str, confirmed_password: str) -> bool: - """Verify if the passwords given in the registration form are equivalent.""" - return password == confirmed_password - - -def hash_password(password: str) -> str: - """ - Receive the password - give back the hashed password - """ - return cast("str", pwd_context.hash(password)) - - -def verify_hashed_pwd_plain_pwd_equality(plain_pwd: str, hashed_pwd: str) -> bool: - """ - Verify during the login if the plain password - correspond to the hashed password - """ - return cast("bool", pwd_context.verify(plain_pwd, hashed_pwd)) diff --git a/src/app/core/event_samplers/common_helpers.py b/src/app/core/event_samplers/common_helpers.py new file mode 100644 index 0000000..0f80870 --- /dev/null +++ b/src/app/core/event_samplers/common_helpers.py @@ -0,0 +1,33 @@ +"""Helpers function for the request generator""" + + +import numpy as np + + +def uniform_variable_generator(rng: np.random.Generator | None = None) -> float: + """Return U~Uniform(0, 1).""" + rng = rng or np.random.default_rng() + return float(rng.random()) + + +def poisson_variable_generator( + mean: float, + rng: np.random.Generator | None = None, +) -> int: + """Return a Poisson-distributed integer with expectation *mean*.""" + rng = rng or np.random.default_rng() + return int(rng.poisson(mean)) + + +def truncated_gaussian_generator( + mean: float, + variance: float, + rng: np.random.Generator, +) -> int: + """ + Generate a Normal-distributed variable + with mean and variance + """ + rng = rng or np.random.default_rng() + value = rng.normal(mean, variance) + return max(0, int(value)) diff --git a/src/app/core/event_samplers/gaussian_poisson.py b/src/app/core/event_samplers/gaussian_poisson.py new file mode 100644 index 0000000..2239626 --- /dev/null +++ b/src/app/core/event_samplers/gaussian_poisson.py @@ -0,0 +1,98 @@ +""" +event sampler in the case of gaussian distribution +for concurrent user and poisson distribution for rqs per minute per user. +The rationale behind this choice is about considering scenario +with variance bigger or smaller w.r.t the one inherited from +the Poisson distribution +""" + +import math +from collections.abc import Generator + +import numpy as np + +from app.config.constants import TimeDefaults +from app.core.event_samplers.common_helpers import ( + truncated_gaussian_generator, + uniform_variable_generator, +) +from app.schemas.simulation_input import SimulationInput + + +def gaussian_poisson_sampling( + input_data: SimulationInput, + *, + sampling_window_s: int = TimeDefaults.SAMPLING_WINDOW.value, + rng: np.random.Generator | None = None, +) -> Generator[float, None, None]: + """ + Yield inter-arrival gaps (seconds) for the compound Gaussian-Poisson process. + + Algorithm + --------- + 1. Every *sampling_window_s* seconds, draw + U ~ Gaussian(mean_concurrent_user, variance). + 2. Compute the aggregate rate + Λ = U * (mean_req_per_minute_per_user / 60) [req/s]. + 3. While inside the current window, draw gaps + Δt ~ Exponential(Λ) using inverse-CDF. + 4. Stop once the virtual clock exceeds *simulation_time*. + """ + rng = rng or np.random.default_rng() + + simulation_time = input_data.total_simulation_time + # pydantic in the validation assign a value and mypy is not + # complaining because a None cannot be compared in the loop + # to a float + assert simulation_time is not None + + # λ_u : mean concurrent users per window + mean_concurrent_user = float(input_data.avg_active_users.mean) + + # Let's be sure that the variance is not None (guaranteed from pydantic) + variance_concurrent_user = input_data.avg_active_users.variance + assert variance_concurrent_user is not None + variance_concurrent_user = float(variance_concurrent_user) + + # λ_r / 60 : mean req/s per user + mean_req_per_sec_per_user = ( + float( + input_data.avg_request_per_minute_per_user.mean) + / TimeDefaults.MIN_TO_SEC.value + ) + + now = 0.0 # virtual clock (s) + window_end = 0.0 # end of the current user window + lam = 0.0 # aggregate rate Λ (req/s) + + while now < simulation_time: + # (Re)sample U at the start of each window + if now >= window_end: + window_end = now + float(sampling_window_s) + users = truncated_gaussian_generator( + mean_concurrent_user, + variance_concurrent_user, + rng, + ) + lam = users * mean_req_per_sec_per_user + + # No users → fast-forward to next window + if lam <= 0.0: + now = window_end + continue + + # Exponential gap from a protected uniform value + u_raw = max(uniform_variable_generator(rng), 1e-15) + delta_t = -math.log(1.0 - u_raw) / lam + + # End simulation if the next event exceeds the horizon + if now + delta_t > simulation_time: + break + + # If the gap crosses the window boundary, jump to it + if now + delta_t >= window_end: + now = window_end + continue + + now += delta_t + yield delta_t diff --git a/src/app/core/event_samplers/poisson_poisson.py b/src/app/core/event_samplers/poisson_poisson.py new file mode 100644 index 0000000..37c2da5 --- /dev/null +++ b/src/app/core/event_samplers/poisson_poisson.py @@ -0,0 +1,86 @@ +""" +event sampler in the case of poisson distribution +both for concurrent user and rqs per minute per user +""" + +import math +from collections.abc import Generator + +import numpy as np + +from app.config.constants import TimeDefaults +from app.core.event_samplers.common_helpers import ( + poisson_variable_generator, + uniform_variable_generator, +) +from app.schemas.simulation_input import SimulationInput + + +def poisson_poisson_sampling( + input_data: SimulationInput, + *, + sampling_window_s: int = TimeDefaults.SAMPLING_WINDOW.value, + rng: np.random.Generator | None = None, +) -> Generator[float, None, None]: + """ + Yield inter-arrival gaps (seconds) for the compound Poisson-Poisson process. + + Algorithm + --------- + 1. Every *sampling_window_s* seconds, draw + U ~ Poisson(mean_concurrent_user). + 2. Compute the aggregate rate + Λ = U * (mean_req_per_minute_per_user / 60) [req/s]. + 3. While inside the current window, draw gaps + Δt ~ Exponential(Λ) using inverse-CDF. + 4. Stop once the virtual clock exceeds *simulation_time*. + """ + rng = rng or np.random.default_rng() + + simulation_time = input_data.total_simulation_time + # pydantic in the validation assign a value and mypy is not + # complaining because a None cannot be compared in the loop + # to a float + assert simulation_time is not None + + # λ_u : mean concurrent users per window + mean_concurrent_user = float(input_data.avg_active_users.mean) + + # λ_r / 60 : mean req/s per user + mean_req_per_sec_per_user = ( + float( + input_data.avg_request_per_minute_per_user.mean) + / TimeDefaults.MIN_TO_SEC.value + ) + + now = 0.0 # virtual clock (s) + window_end = 0.0 # end of the current user window + lam = 0.0 # aggregate rate Λ (req/s) + + while now < simulation_time: + # (Re)sample U at the start of each window + if now >= window_end: + window_end = now + float(sampling_window_s) + users = poisson_variable_generator(mean_concurrent_user, rng) + lam = users * mean_req_per_sec_per_user + + # No users → fast-forward to next window + if lam <= 0.0: + now = window_end + continue + + # Exponential gap from a protected uniform value + u_raw = max(uniform_variable_generator(rng), 1e-15) + delta_t = -math.log(1.0 - u_raw) / lam + + # End simulation if the next event exceeds the horizon + if now + delta_t > simulation_time: + break + + # If the gap crosses the window boundary, jump to it + if now + delta_t >= window_end: + now = window_end + continue + + now += delta_t + yield delta_t diff --git a/src/app/core/simulation/requests_generator.py b/src/app/core/simulation/requests_generator.py new file mode 100644 index 0000000..0da1c1c --- /dev/null +++ b/src/app/core/simulation/requests_generator.py @@ -0,0 +1,51 @@ +""" +Continuous-time event sampling for the Poisson-Poisson +and Gaussian-Poisson workload model. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from app.core.event_samplers.gaussian_poisson import gaussian_poisson_sampling +from app.core.event_samplers.poisson_poisson import poisson_poisson_sampling + +if TYPE_CHECKING: + from collections.abc import Generator + + import numpy as np + + from app.schemas.simulation_input import SimulationInput + + +def requests_generator( + input_data: SimulationInput, + *, + rng: np.random.Generator | None = None, +) -> Generator[float, None, None]: + """ + Return an iterator of inter-arrival gaps (seconds) according to the model + chosen in *input_data*. + + Notes + ----- + * If ``avg_active_users.distribution`` is ``"gaussian"`` or ``"normal"``, + the Gaussian-Poisson sampler is used. + * Otherwise the default Poisson-Poisson sampler is returned. + + """ + dist = input_data.avg_active_users.distribution.lower() + + if dist in {"gaussian", "normal"}: + #Gaussian-Poisson model + return gaussian_poisson_sampling( + input_data=input_data, + rng=rng, + + ) + + # Poisson + Poisson + return poisson_poisson_sampling( + input_data=input_data, + rng=rng, + ) diff --git a/src/app/core/simulation/simulation_run.py b/src/app/core/simulation/simulation_run.py new file mode 100644 index 0000000..b2f72f3 --- /dev/null +++ b/src/app/core/simulation/simulation_run.py @@ -0,0 +1,56 @@ +"""simulation of the server""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import simpy + +from app.core.simulation.requests_generator import requests_generator +from app.schemas.simulation_output import SimulationOutput + +if TYPE_CHECKING: + from collections.abc import Generator + + import numpy as np + + from app.schemas.simulation_input import SimulationInput + + + + +def run_simulation( + data: SimulationInput, + *, + rng: np.random.Generator, +) -> SimulationOutput: + """Simulation executor in Simpy""" + gaps: Generator[float, None, None] = requests_generator(data, rng=rng) + env = simpy.Environment() + + simulation_time = data.total_simulation_time + # pydantic in the validation assign a value and mypy is not + # complaining because a None cannot be compared in the loop + # to a float + assert simulation_time is not None + + total_request_per_time_period = { + "simulation_time": simulation_time, + "total_requests": 0, + } + + def arrival_process( + env: simpy.Environment, + ) -> Generator[simpy.events.Event, None, None]: + for gap in gaps: + yield env.timeout(gap) + total_request_per_time_period["total_requests"] += 1 + + env.process(arrival_process(env)) + env.run(until=simulation_time) + + return SimulationOutput( + total_requests=total_request_per_time_period, + metric_2=str(data.avg_request_per_minute_per_user.mean), + metric_n=str(data.avg_active_users.mean), + ) diff --git a/src/app/main.py b/src/app/main.py index 4a78144..b5ef876 100644 --- a/src/app/main.py +++ b/src/app/main.py @@ -5,7 +5,6 @@ from fastapi import FastAPI - from app.api.health_check import router as health_router from app.config.settings import settings from app.db.init_db import close_engine, init_models diff --git a/src/app/schemas/simulation_input.py b/src/app/schemas/simulation_input.py new file mode 100644 index 0000000..2197be1 --- /dev/null +++ b/src/app/schemas/simulation_input.py @@ -0,0 +1,58 @@ +"""Define the schemas for the simulator""" + +from typing import Literal + +from pydantic import BaseModel, field_validator, model_validator + +from app.config.constants import TimeDefaults + + +class RVConfig(BaseModel): + """class to configure random variables""" + + mean: float + distribution: Literal["poisson", "normal", "gaussian"] = "poisson" + variance: float | None = None + + @field_validator("mean", mode="before") + def check_mean_is_number( + cls, # noqa: N805 + v: object, + ) -> float: + """Ensure `mean` is numeric, then coerce to float.""" + err_msg = "mean must be a number (int or float)" + if not isinstance(v, (float, int)): + raise ValueError(err_msg) # noqa: TRY004 + return float(v) + + @model_validator(mode="after") # type: ignore[arg-type] + def default_variance(cls, model: "RVConfig") -> "RVConfig": # noqa: N805 + """Set variance = mean when distribution == 'normal' and variance is missing.""" + if model.variance is None and model.distribution in {"normal", "gaussian"}: + model.variance = model.mean + return model + +class SimulationInput(BaseModel): + """Define the expected variables for the simulation""" + + avg_active_users: RVConfig + avg_request_per_minute_per_user: RVConfig + total_simulation_time: int | None = None + + @field_validator("total_simulation_time", mode="before") + def check_simulation_time(cls, v: object) -> int: # noqa: N805 + """ + Assign constant value to total sim time if is None + check if it is of the right type + impose a lower boundary for the simulation + """ + if v is None: + v = TimeDefaults.SIMULATION_HORIZON.value + if not isinstance(v, int): + err_msg_type = "the simulation time must be an integer" + raise ValueError(err_msg_type) # noqa: TRY004 + if v <= 60: + err_msg_val = "the simulation must be at least 60 seconds" + raise ValueError(err_msg_val) + return v + diff --git a/src/app/schemas/simulation_output.py b/src/app/schemas/simulation_output.py new file mode 100644 index 0000000..b02c172 --- /dev/null +++ b/src/app/schemas/simulation_output.py @@ -0,0 +1,12 @@ +"""Define the output of the simulation""" + +from pydantic import BaseModel + + +class SimulationOutput(BaseModel): + """Define the output of the simulation""" + + total_requests: dict[str, int | float] + metric_2: str + #...... + metric_n: str diff --git a/tests/integration/db_initialization/test_init_models.py b/tests/integration/db_initialization/test_init_models.py index 05721eb..204f478 100644 --- a/tests/integration/db_initialization/test_init_models.py +++ b/tests/integration/db_initialization/test_init_models.py @@ -14,4 +14,4 @@ async def test_users_table_exists_after_migrations() -> None: await conn.execute(text("SELECT 1")) except SQLAlchemyError: pytest.fail("Database connection or Alembic setup failed.") - + diff --git a/tests/unit/sampler/test_sampler_helper.py b/tests/unit/sampler/test_sampler_helper.py new file mode 100644 index 0000000..0222196 --- /dev/null +++ b/tests/unit/sampler/test_sampler_helper.py @@ -0,0 +1,130 @@ +from typing import cast + +import numpy as np + +from app.core.event_samplers.common_helpers import ( + poisson_variable_generator, + truncated_gaussian_generator, + uniform_variable_generator, +) + + +class DummyRNG: + """Dummy RNG for testing: returns fixed values for random(), poisson(), normal().""" + + def __init__( + self, + uniform_value: float | None = None, + poisson_value: int | None = None, + normal_value: float | None = None, + ) -> None: + """ + Initialize the dummy RNG with optional preset outputs. + + Args: + uniform_value: value to return from random(), if not None. + poisson_value: value to return from poisson(), if not None. + normal_value: value to return from normal(), if not None. + + """ + self.uniform_value = uniform_value + self.poisson_value = poisson_value + self.normal_value = normal_value + + def random(self) -> float: + """ + Return the preset uniform_value or fall back to a real RNG. + + Returns: + A float in [0.0, 1.0). + + """ + if self.uniform_value is not None: + return self.uniform_value + return np.random.default_rng().random() + + def poisson(self, mean: float) -> int: + """ + Return the preset poisson_value or fall back to a real RNG. + + Args: + mean: the λ parameter for a Poisson draw (ignored if poisson_value is set). + + Returns: + An integer sample from a Poisson distribution. + + """ + if self.poisson_value is not None: + return self.poisson_value + return int(np.random.default_rng().poisson(mean)) + + def normal(self, mean: float, sigma: float) -> float: + """ + Return the preset normal_value or fall back to a real RNG. + + Args: + mean: the mean of the Normal distribution. + sigma: the standard deviation of the Normal distribution. + + Returns: + A float sample from a Normal distribution. + + """ + if self.normal_value is not None: + return self.normal_value + return float(np.random.default_rng().normal(mean, sigma)) + + +def test_uniform_variable_generator_with_dummy_rng() -> None: + """Ensure uniform_variable_generator returns the dummy RNGs uniform_value.""" + dummy = cast("np.random.Generator", DummyRNG(uniform_value=0.75)) + assert uniform_variable_generator(dummy) == 0.75 + + +def test_uniform_variable_generator_default_rng_range() -> None: + """Ensure the default RNG produces a float in [0.0, 1.0).""" + for _ in range(100): + val = uniform_variable_generator() + assert isinstance(val, float) + assert 0.0 <= val < 1.0 + + +def test_poisson_variable_generator_with_dummy_rng() -> None: + """Ensure poisson_variable_generator returns the dummy RNGs poisson_value.""" + dummy = cast("np.random.Generator", DummyRNG(poisson_value=3)) + assert poisson_variable_generator(mean=5.0, rng=dummy) == 3 + + +def test_poisson_variable_generator_reproducible() -> None: + """Ensure two generators with the same seed produce the same Poisson sample.""" + rng1 = np.random.default_rng(12345) + rng2 = np.random.default_rng(12345) + v1 = poisson_variable_generator(mean=10.0, rng=rng1) + v2 = poisson_variable_generator(mean=10.0, rng=rng2) + assert v1 == v2 + + +def test_truncated_gaussian_generator_truncates_negative() -> None: + """Ensure truncated_gaussian_generator clamps negative draws to zero.""" + dummy = cast("np.random.Generator", DummyRNG(normal_value=-2.7)) + result = truncated_gaussian_generator(mean=10.0, variance=5.0, rng=dummy) + assert result == 0 + + +def test_truncated_gaussian_generator_truncates_toward_zero() -> None: + """Ensure truncated_gaussian_generator rounds toward zero for positive draws.""" + dummy = cast("np.random.Generator", DummyRNG(normal_value=3.9)) + result = truncated_gaussian_generator(mean=10.0, variance=5.0, rng=dummy) + assert isinstance(result, int) + assert result == 3 + + +def test_truncated_gaussian_generator_default_rng_non_negative_int() -> None: + """ + Ensure the default RNG produces + a non-negative integer from the truncated Gaussian. + """ + rng = np.random.default_rng(321) + val = truncated_gaussian_generator(mean=10.0, variance=2.0, rng=rng) + assert isinstance(val, int) + assert val >= 0 diff --git a/tests/unit/simulation/test_requests_generator.py b/tests/unit/simulation/test_requests_generator.py new file mode 100644 index 0000000..e7fad85 --- /dev/null +++ b/tests/unit/simulation/test_requests_generator.py @@ -0,0 +1,198 @@ +"""Unit test to verify the behaviour of the rqs generator""" + +from __future__ import annotations + +from types import GeneratorType +from typing import TYPE_CHECKING + +import numpy as np +import pytest + +from app.core.simulation.requests_generator import requests_generator +from app.core.simulation.simulation_run import run_simulation +from app.schemas.simulation_input import SimulationInput + +if TYPE_CHECKING: + + from collections.abc import Iterator + + from app.schemas.simulation_output import SimulationOutput + +# -------------------------------------------------------------- +# TESTS INPUT +# -------------------------------------------------------------- + +@pytest.fixture +def base_input() -> SimulationInput: + """Return a SimulationInput with a 120-second simulation horizon.""" + return SimulationInput( + avg_active_users={"mean": 1.0}, + avg_request_per_minute_per_user={"mean": 2.0}, + total_simulation_time=120, + ) + +# -------------------------------------------------------------- +# REQUESTS GENERATOR FUNCTION TESTS +# -------------------------------------------------------------- + +def test_default_requests_generator_uses_poisson_poisson_sampling( + base_input: SimulationInput, +) -> None: + """ + Verify that when avg_active_users.distribution is the default 'poisson', + requests_generator returns an iterator whose code object is from + poisson_poisson_sampling. + """ + rng = np.random.default_rng(0) + gen = requests_generator(base_input, rng=rng) + # It must be a generator. + assert isinstance(gen, GeneratorType) + + # Internally, it should call poisson_poisson_sampling. + assert gen.gi_code.co_name == "poisson_poisson_sampling" + +@pytest.mark.parametrize( + ("dist", "expected_sampler"), + [ + ("poisson", "poisson_poisson_sampling"), + ("normal", "gaussian_poisson_sampling"), + ("gaussian", "gaussian_poisson_sampling"), + ], +) +def test_requests_generator_dispatches_to_correct_sampler( + dist: str, + expected_sampler: str, +) -> None: + """ + Verify that requests_generator returns a generator whose code object + comes from the appropriate sampler function based on distribution: + - 'poisson' → poisson_poisson_sampling + - 'normal' → gaussian_poisson_sampling + - 'gaussian'→ gaussian_poisson_sampling + """ + input_data = SimulationInput( + avg_active_users={"mean": 1.0, "distribution": dist}, + avg_request_per_minute_per_user={"mean": 1.0}, + total_simulation_time=120, # Must be > 60 to pass schema validation + ) + rng = np.random.default_rng(0) + gen = requests_generator(input_data, rng=rng) + + # It must be a generator object. + assert isinstance(gen, GeneratorType) + # Check which underlying sampler function produced it. + assert gen.gi_code.co_name == expected_sampler + +# -------------------------------------------------------------- +# REQUESTS GENERATOR INSIDE SIMULATION TESTS +# -------------------------------------------------------------- + +def test_run_simulation_counts_events_up_to_horizon( + monkeypatch: pytest.MonkeyPatch, base_input: SimulationInput, +) -> None: + """ + Verify that all events whose cumulative inter-arrival times + fall within the simulation horizon are counted. + For gaps [1, 2, 3, 4], cumulative times [1, 3, 6, 10] + yield 4 events by t=10. + """ + def fake_requests_generator_fixed( + data: SimulationInput, *, rng: np.random.Generator, + ) -> Iterator[float]: + # Replace the complex Poisson-Poisson sampler with a deterministic sequence. + yield from [1.0, 2.0, 3.0, 4.0] + + # Monkeypatch the internal requests_generator to use our simple generator. + monkeypatch.setattr( + "app.core.simulation.simulation_run.requests_generator", + fake_requests_generator_fixed, + ) + + # The rng argument is unused in this deterministic test. + rng = np.random.default_rng(42) + output: SimulationOutput = run_simulation(base_input, rng=rng) + + assert output.total_requests["total_requests"] == 4 + # The returned metrics should reflect the input means as strings. + assert output.metric_2 == str(base_input.avg_request_per_minute_per_user.mean) + assert output.metric_n == str(base_input.avg_active_users.mean) + + +def test_run_simulation_includes_event_at_exact_horizon( + monkeypatch: pytest.MonkeyPatch, base_input: SimulationInput, +) -> None: + """ + Confirm that an event scheduled exactly at the simulation horizon + is not processed, since SimPy stops at t == horizon. + """ + def fake_generator_at_horizon( + data: SimulationInput, *, rng: np.random.Generator, + ) -> Iterator[float]: + + # mypy assertion, pydantic guaranteed + assert base_input.total_simulation_time is not None + # Yield a single event at exactly t == simulation_time. + yield float(base_input.total_simulation_time) + + monkeypatch.setattr( + "app.core.simulation.simulation_run.requests_generator", + fake_generator_at_horizon, + ) + + rng = np.random.default_rng(123) + output: SimulationOutput = run_simulation(base_input, rng=rng) + + # SimPy does not execute events scheduled exactly at the stop time. + assert output.total_requests["total_requests"] == 0 + + +def test_run_simulation_excludes_event_beyond_horizon( + monkeypatch: pytest.MonkeyPatch, base_input: SimulationInput, +) -> None: + """ + Ensure that events scheduled after the simulation horizon + are not counted. + """ + def fake_generator_beyond_horizon( + data: SimulationInput, *, rng: np.random.Generator, + ) -> Iterator[float]: + + # mypy assertion, pydantic guaranteed + assert base_input.total_simulation_time is not None + # Yield a single event just beyond the horizon. + yield float(base_input.total_simulation_time) + 0.1 + + monkeypatch.setattr( + "app.core.simulation.simulation_run.requests_generator", + fake_generator_beyond_horizon, + ) + + rng = np.random.default_rng(999) + output: SimulationOutput = run_simulation(base_input, rng=rng) + + assert output.total_requests["total_requests"] == 0 + + +def test_run_simulation_zero_events_when_generator_empty( + monkeypatch: pytest.MonkeyPatch, base_input: SimulationInput, +) -> None: + """ + Check that run_simulation reports zero requests when no + inter-arrival times are yielded. + """ + def fake_generator_empty( + data: SimulationInput, *, rng: np.random.Generator, + ) -> Iterator[float]: + # Empty generator yields nothing. + if False: + yield # pragma: no cover + + monkeypatch.setattr( + "app.core.simulation.simulation_run.requests_generator", + fake_generator_empty, + ) + + rng = np.random.default_rng(2025) + output: SimulationOutput = run_simulation(base_input, rng=rng) + + assert output.total_requests["total_requests"] == 0 diff --git a/tests/unit/simulation/test_simulation_input.py b/tests/unit/simulation/test_simulation_input.py new file mode 100644 index 0000000..20a32d9 --- /dev/null +++ b/tests/unit/simulation/test_simulation_input.py @@ -0,0 +1,93 @@ +import pytest +from pydantic import ValidationError + +from app.schemas.simulation_input import RVConfig, SimulationInput + + +def test_normal_sets_variance_to_mean() -> None: + """When distribution='normal' and variance is omitted, variance == mean.""" + cfg = RVConfig(mean=10, distribution="normal") + assert cfg.variance == 10.0 + + +def test_poisson_keeps_variance_none() -> None: + """When distribution='poisson' and variance is omitted, variance stays None.""" + cfg = RVConfig(mean=5, distribution="poisson") + assert cfg.variance is None + + +def test_explicit_variance_is_preserved() -> None: + """If the user supplies variance explicitly, it is preserved unchanged.""" + cfg = RVConfig(mean=8, distribution="normal", variance=4) + assert cfg.variance == 4.0 + + +def test_mean_must_be_numeric() -> None: + """A non-numeric mean raises a ValidationError with our custom message.""" + with pytest.raises(ValidationError) as excinfo: + RVConfig(mean="not a number", distribution="poisson") + + # Check that at least one error refers to the 'mean' field + assert any(err["loc"] == ("mean",) for err in excinfo.value.errors()) + assert "mean must be a number" in excinfo.value.errors()[0]["msg"] + + +def test_missing_mean_field() -> None: + """Omitting the mean field raises a 'field required' ValidationError.""" + with pytest.raises(ValidationError) as excinfo: + # Using model_validate avoids the constructor signature check + RVConfig.model_validate({"distribution": "normal"}) + + assert any( + err["loc"] == ("mean",) and err["type"] == "missing" + for err in excinfo.value.errors() + ) + +def test_gaussian_sets_variance_to_mean() -> None: + """When distribution='gaussian' and variance is omitted, variance == mean.""" + cfg = RVConfig(mean=12.5, distribution="gaussian") + assert cfg.variance == pytest.approx(12.5) + + +def test_default_distribution_is_poisson() -> None: + """ + When distribution is omitted, it defaults to 'poisson' and + variance stays None. + """ + cfg = RVConfig(mean=3.3) + assert cfg.distribution == "poisson" + assert cfg.variance is None + + +def test_explicit_variance_kept_for_poisson() -> None: + """If the user supplies variance even for poisson, it is preserved.""" + cfg = RVConfig(mean=4.0, distribution="poisson", variance=2.2) + assert cfg.variance == pytest.approx(2.2) + + +def test_invalid_distribution_raises() -> None: + """Supplying a non-supported distribution literal raises ValidationError.""" + with pytest.raises(ValidationError) as excinfo: + RVConfig(mean=5.0, distribution="not_a_dist") + + errors = excinfo.value.errors() + # Only assert there is at least one error for the 'distribution' field: + assert any(e["loc"] == ("distribution",) for e in errors) + + +def test_simulation_time_below_minimum_raises() -> None: + """ + Passing total_simulation_time <= 60 must raise a ValidationError, + because the minimum allowed simulation time is 61 seconds. + """ + with pytest.raises(ValidationError) as excinfo: + SimulationInput( + avg_active_users={"mean": 1.0}, + avg_request_per_minute_per_user={"mean": 1.0}, + total_simulation_time=60, # exactly at the boundary + ) + errors = excinfo.value.errors() + assert any( + err["loc"] == ("total_simulation_time",) and "at least 60 seconds" in err["msg"] + for err in errors + )