-
Notifications
You must be signed in to change notification settings - Fork 24
Api unit tests #266
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
dbutenhof
merged 7 commits into
cloud-bulldozer:main
from
khandrew-redhat:api-unit-tests
Sep 17, 2025
Merged
Api unit tests #266
Changes from all commits
Commits
Show all changes
7 commits
Select commit
Hold shift + click to select a range
4be3282
api.py test coverage 100%
khandrew-redhat 19a5751
Merge branch 'cloud-bulldozer:main' into api-unit-tests
khandrew-redhat 5181e0c
Merge branch 'cloud-bulldozer:main' into api-unit-tests
khandrew-redhat f7794e1
full commons tests
khandrew-redhat 4c9d59f
linting changes
khandrew-redhat 98452c9
Elastic Service separation and DS_Store removal
khandrew-redhat 31cdae5
error response and total count change
khandrew-redhat File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -143,3 +143,6 @@ dmypy.json | |
|
|
||
| # Cython debug symbols | ||
| cython_debug/ | ||
|
|
||
| # DS_Store | ||
| **/.DS_Store | ||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,146 @@ | ||
| from collections import defaultdict | ||
| from typing import Any, Optional | ||
|
|
||
| from app.services.search import ElasticService | ||
|
|
||
|
|
||
| class FakeElasticService(ElasticService): | ||
| """ | ||
| Fake ElasticService for testing commons functions without actual Elasticsearch connections. | ||
|
|
||
| This fake implementation provides canned responses for ElasticService methods used in commons modules: | ||
| - post(): Used by getData() methods | ||
| - filterPost(): Used by getFilterData() methods | ||
|
|
||
| """ | ||
|
|
||
| def __init__(self, configpath: str = "TEST", index: str = ""): | ||
| self.configpath = configpath | ||
| self.index = index | ||
| self.data = defaultdict(list) | ||
| # Error simulation flags | ||
| self.post_error = None | ||
| self.filterPost_error = None | ||
|
|
||
| # Testing helpers to manage fake post/filterPost responses | ||
| def set_post_response( | ||
| self, | ||
| response_type: str, | ||
| data_list: Optional[list[dict[str, Any]]] = None, | ||
| filter_data: Optional[list[dict[str, Any]]] = None, | ||
| summary: Optional[dict[str, Any]] = None, | ||
| upstream_list: Optional[list[str]] = None, | ||
| total: Optional[int] = None, | ||
| repeat: int = 1, | ||
| error: Optional[Exception] = None, | ||
| ): | ||
| """Set a canned response or error for ElasticService methods (post/filterPost) | ||
|
|
||
| Args: | ||
| response_type: "post" for getData responses, "filterPost" for getFilterData responses | ||
| data_list: list of source data objects (for post responses, total auto-calculated from length) | ||
| filter_data: filter aggregation data (for filterPost responses) | ||
| summary: summary data (for filterPost responses) | ||
| upstream_list: list of upstream job names (for filterPost responses) | ||
| total: total count (for filterPost responses only, auto-calculated for post responses) | ||
| repeat: how many times to return this response | ||
| error: Exception to raise instead of returning response data | ||
| """ | ||
| if error is not None: | ||
| # Set error instead of response data | ||
| if response_type == "post": | ||
| self.post_error = error | ||
| elif response_type == "filterPost": | ||
| self.filterPost_error = error | ||
| else: | ||
| raise ValueError( | ||
| f"Invalid response_type: {response_type}. Must be 'post' or 'filterPost'" | ||
| ) | ||
| return | ||
|
|
||
| # Set normal response data | ||
| if response_type == "post": | ||
| # Format for getData responses | ||
| hits = [] | ||
| if data_list: | ||
| for d in data_list: | ||
| hits.append({"_source": d}) | ||
| # Auto-calculate total from data_list length | ||
| calculated_total = len(data_list or []) | ||
| response = {"data": hits, "total": calculated_total} | ||
| elif response_type == "filterPost": | ||
| # Format for getFilterData responses | ||
| if total is None: | ||
| raise ValueError("total parameter is required for filterPost responses") | ||
| response = { | ||
| "total": total, | ||
| "filterData": filter_data or [], | ||
| "summary": summary or {}, | ||
| } | ||
| if upstream_list: | ||
| response["upstreamList"] = upstream_list | ||
| else: | ||
| raise ValueError( | ||
| f"Invalid response_type: {response_type}. Must be 'post' or 'filterPost'" | ||
| ) | ||
|
|
||
| # Store in a special key for commons responses | ||
| commons_key = f"commons_{response_type}" | ||
| if commons_key not in self.data: | ||
| self.data[commons_key] = [] | ||
| for c in range(repeat): | ||
| self.data[commons_key].append(response) | ||
|
|
||
| # Mock ElasticService methods | ||
| async def post( | ||
| self, | ||
| query, | ||
| indice=None, | ||
| size=10000, | ||
| start_date=None, | ||
| end_date=None, | ||
| timestamp_field=None, | ||
| **kwargs, | ||
| ): | ||
| """Mock the ElasticService.post method""" | ||
| if self.post_error: | ||
| raise self.post_error | ||
|
|
||
| # Check if a response has been registered | ||
| if "commons_post" in self.data and len(self.data["commons_post"]) > 0: | ||
| return self.data["commons_post"].pop(0) | ||
|
|
||
| # Raise exception if no response registered - indicates broken test | ||
| raise Exception( | ||
| "No mock data was defined for ElasticService.post() - call set_post_response() first" | ||
| ) | ||
|
|
||
| async def filterPost( | ||
| self, | ||
| start_datetime, | ||
| end_datetime, | ||
| aggregate, | ||
| refiner, | ||
| timestamp_field="timestamp", | ||
| indice=None, | ||
| **kwargs, | ||
| ): | ||
| """Mock the ElasticService.filterPost method""" | ||
| if self.filterPost_error: | ||
| raise self.filterPost_error | ||
|
|
||
| # Check if a response has been registered | ||
| if ( | ||
| "commons_filterPost" in self.data | ||
| and len(self.data["commons_filterPost"]) > 0 | ||
| ): | ||
| return self.data["commons_filterPost"].pop(0) | ||
|
|
||
| # Raise exception if no response registered - indicates broken test | ||
| raise Exception( | ||
| "No mock data was defined for ElasticService.filterPost() - call set_post_response() first" | ||
| ) | ||
|
|
||
| async def close(self): | ||
| """Mock the ElasticService.close method - no-op for testing""" | ||
| pass | ||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,157 @@ | ||
| from collections import defaultdict | ||
| from dataclasses import dataclass | ||
| from typing import Any, Optional | ||
|
|
||
| from app.services.splunk import SplunkService | ||
|
|
||
|
|
||
| @dataclass | ||
| class SplunkRequest: | ||
| """Represents a request made to the fake Splunk service for testing verification.""" | ||
|
|
||
| query: Optional[str] | ||
| size: int | ||
| offset: int | ||
| sort: Optional[str] | ||
| searchList: Optional[str] | ||
| configpath: str | ||
|
|
||
| def __eq__(self, other) -> bool: | ||
| return ( | ||
| self.query == other.query | ||
| and self.size == other.size | ||
| and self.offset == other.offset | ||
| and self.sort == other.sort | ||
| and self.searchList == other.searchList | ||
| and self.configpath == other.configpath | ||
| ) | ||
|
|
||
|
|
||
| class FakeSplunkService(SplunkService): | ||
| configpath: str | ||
| data: dict[str, Any] | ||
| query_error: Optional[Exception] | ||
| filterPost_error: Optional[Exception] | ||
|
|
||
| """ | ||
| Fake SplunkService for testing telco functions without actual Splunk connections. | ||
| This fake implementation follows the same pattern as FakeAsyncElasticsearch, | ||
| providing canned responses for SplunkService methods used in telco.py: | ||
| - query(): Used by telco.getData() | ||
| - filterPost(): Used by telco.getFilterData() | ||
|
|
||
| Usage: | ||
| fake_splunk = FakeSplunkService() | ||
|
|
||
| # For getData() tests | ||
| fake_splunk.set_query_response(data_list=[...], total=100) | ||
|
|
||
| # For getFilterData() tests | ||
| fake_splunk.set_filter_response(data_list=[...], summary={...}, total=50) | ||
|
|
||
| # For error testing: | ||
| fake_splunk.set_query_response(error=Exception("Connection failed")) | ||
| """ | ||
|
|
||
| def __init__(self, configpath: str = "TEST"): | ||
| self.configpath = configpath | ||
| self.data = defaultdict(list) | ||
| # Error simulation flags | ||
| self.query_error = None | ||
| self.filterPost_error = None | ||
|
|
||
| # Testing helpers to manage fake searches | ||
| def set_query_response( | ||
| self, | ||
| data_list: Optional[list[dict[str, Any]]] = None, | ||
| total: int = 0, | ||
dbutenhof marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| error: Optional[Exception] = None, | ||
dbutenhof marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| return_none: bool = False, | ||
| ): | ||
| """ | ||
| Set a canned response for SplunkService.query() method. | ||
|
|
||
| This method is used by telco.getData() to retrieve time-series data. | ||
|
|
||
| Args: | ||
| data_list: List of telco data objects to return | ||
| total: Total count of results | ||
| error: Exception to raise instead of returning response data | ||
| return_none: If True, return None instead of a response dict | ||
| """ | ||
| if error is not None: | ||
| self.query_error = error | ||
| return | ||
|
|
||
| # Clear any previous error | ||
| self.query_error = None | ||
|
|
||
| if return_none: | ||
| response = None | ||
| else: | ||
| response = {"data": data_list or [], "total": total} | ||
|
|
||
| self.data["query_responses"].append(response) | ||
|
|
||
| def set_filter_response( | ||
| self, | ||
| data_list: Optional[list[dict[str, Any]]] = None, | ||
| summary: Optional[dict[str, Any]] = None, | ||
| total: int = 0, | ||
| error: Optional[Exception] = None, | ||
| ): | ||
| """ | ||
| Set a canned response for SplunkService.filterPost() method. | ||
|
|
||
| This method is used by telco.getFilterData() to retrieve aggregation data. | ||
|
|
||
| Args: | ||
| data_list: List of aggregation data objects to return | ||
| summary: Summary statistics (e.g., {"success": 50, "failure": 10}) | ||
| total: Total count of results | ||
| error: Exception to raise instead of returning response data | ||
| """ | ||
| if error is not None: | ||
| self.filterPost_error = error | ||
| return | ||
|
|
||
| # Clear any previous error | ||
| self.filterPost_error = None | ||
|
|
||
| response = {"data": data_list or [], "summary": summary or {}, "total": total} | ||
|
|
||
| self.data["filter_responses"].append(response) | ||
|
|
||
| # Mock SplunkService methods | ||
| async def query( | ||
| self, | ||
| query: Optional[str] = None, | ||
| size: int = 10, | ||
| offset: int = 0, | ||
| sort: Optional[str] = None, | ||
| searchList: Optional[str] = None, | ||
| ): | ||
| # Check for error simulation | ||
| if self.query_error: | ||
| raise self.query_error | ||
|
|
||
| # Return canned response or default empty response | ||
| if self.data["query_responses"]: | ||
| return self.data["query_responses"].pop(0) | ||
|
|
||
| return {"data": [], "total": 0} | ||
|
|
||
| async def filterPost( | ||
| self, | ||
| query: Optional[str] = None, | ||
| searchList: Optional[str] = None, | ||
| ): | ||
| # Check for error simulation | ||
| if self.filterPost_error: | ||
| raise self.filterPost_error | ||
|
|
||
| # Return canned response or default empty response | ||
| if self.data["filter_responses"]: | ||
| return self.data["filter_responses"].pop(0) | ||
|
|
||
| return {"data": [], "summary": {}, "total": 0} | ||
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.