From 72604becad8cdb688f59447720688b0a2be18259 Mon Sep 17 00:00:00 2001 From: goldhaxx <1616671+goldhaxx@users.noreply.github.com> Date: Fri, 20 Dec 2024 20:46:19 -0800 Subject: [PATCH 01/10] Enhance asset-liability summary generation with error handling and data validation. Added try-except for missing market data and improved column checks in the asset-liab matrix display. This ensures robustness against KeyErrors and provides user feedback when data is unavailable. --- src/page/asset_liability.py | 92 ++++++++++++++++++++++++------------- 1 file changed, 59 insertions(+), 33 deletions(-) diff --git a/src/page/asset_liability.py b/src/page/asset_liability.py index 9c29aff..b2fef48 100644 --- a/src/page/asset_liability.py +++ b/src/page/asset_liability.py @@ -30,30 +30,35 @@ def generate_summary_data( df: pd.DataFrame, mode: int, perp_market_index: int ) -> pd.DataFrame: summary_data = {} - for i in range(len(mainnet_spot_market_configs)): + for market in mainnet_spot_market_configs: + i = market.market_index prefix = f"spot_{i}" - assets = df[f"{prefix}_all_assets"].sum() - liabilities = df[f"{prefix}_all"].sum() - - summary_data[f"spot{i}"] = { - "all_assets": assets, - "all_liabilities": format_metric( - liabilities, 0 < liabilities < 1_000_000, mode, financial=True - ), - "effective_leverage": format_metric( - calculate_effective_leverage(assets, liabilities), - 0 < calculate_effective_leverage(assets, liabilities) < 2, - mode, - ), - "all_spot": df[f"{prefix}_all_spot"].sum(), - "all_perp": df[f"{prefix}_all_perp"].sum(), - f"perp_{perp_market_index}_long": df[ - f"{prefix}_perp_{perp_market_index}_long" - ].sum(), - f"perp_{perp_market_index}_short": df[ - f"{prefix}_perp_{perp_market_index}_short" - ].sum(), - } + try: + assets = df[f"{prefix}_all_assets"].sum() + liabilities = df[f"{prefix}_all"].sum() + + summary_data[f"spot{i}"] = { + "all_assets": assets, + "all_liabilities": format_metric( + liabilities, 0 < liabilities < 1_000_000, mode, financial=True + ), + "effective_leverage": format_metric( + calculate_effective_leverage(assets, liabilities), + 0 < calculate_effective_leverage(assets, liabilities) < 2, + mode, + ), + "all_spot": df[f"{prefix}_all_spot"].sum(), + "all_perp": df[f"{prefix}_all_perp"].sum(), + f"perp_{perp_market_index}_long": df[ + f"{prefix}_perp_{perp_market_index}_long" + ].sum(), + f"perp_{perp_market_index}_short": df[ + f"{prefix}_perp_{perp_market_index}_short" + ].sum(), + } + except KeyError as e: + print(f"Warning: Missing data for market {i} ({market.symbol}): {e}") + continue return pd.DataFrame(summary_data).T @@ -132,16 +137,37 @@ def asset_liab_matrix_cached_page(): st.write(f"Total liabilities: **{filtered_df['spot_liability'].sum():,.2f}**") st.dataframe(filtered_df, hide_index=True) - for idx, tab in enumerate(tabs[1:]): - important_cols = [x for x in filtered_df.columns if "spot_" + str(idx) in x] - + for idx, tab in enumerate(tabs[1:], 1): + market = mainnet_spot_market_configs[idx - 1] + market_index = market.market_index + prefix = f"spot_{market_index}" + + # Check if the required columns exist + required_cols = [f"{prefix}_all", f"{prefix}_all_assets", f"{prefix}_all_perp", f"{prefix}_all_spot"] + if not all(col in filtered_df.columns for col in required_cols): + tab.warning(f"No data available for {market.symbol} (market index {market_index})") + continue + + important_cols = [x for x in filtered_df.columns if prefix in x] + toshow = filtered_df[ ["user_key", "spot_asset", "net_usd_value"] + important_cols ] - toshow = toshow[toshow[important_cols].abs().sum(axis=1) != 0].sort_values( - by="spot_" + str(idx) + "_all", ascending=False - ) - tab.write( - f"{len(toshow)} users with this asset to cover liabilities (with {st.session_state.min_leverage}x leverage or more)" - ) - tab.dataframe(toshow, hide_index=True) + + # Filter rows where any of the market-specific columns have non-zero values + non_zero_mask = toshow[important_cols].abs().sum(axis=1) != 0 + toshow = toshow[non_zero_mask] + + if len(toshow) > 0: + # Sort by the 'all' column if it exists, otherwise don't sort + try: + toshow = toshow.sort_values(by=f"{prefix}_all", ascending=False) + except KeyError: + pass # Skip sorting if column doesn't exist + + tab.write( + f"{len(toshow)} users with this asset to cover liabilities (with {st.session_state.min_leverage}x leverage or more)" + ) + tab.dataframe(toshow, hide_index=True) + else: + tab.info("No users found with non-zero positions in this market") From a49733be3b524eab471adc777ea6a0cc0e130bb5 Mon Sep 17 00:00:00 2001 From: goldhaxx <1616671+goldhaxx@users.noreply.github.com> Date: Fri, 20 Dec 2024 21:04:41 -0800 Subject: [PATCH 02/10] Improve error handling and data validation in asset-liability matrix page. Added try-except blocks for API calls, checks for valid response format, and warnings for empty data. Enhanced slot information retrieval with error handling. --- src/page/asset_liability.py | 39 +++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/src/page/asset_liability.py b/src/page/asset_liability.py index b2fef48..7376de3 100644 --- a/src/page/asset_liability.py +++ b/src/page/asset_liability.py @@ -87,12 +87,24 @@ def asset_liab_matrix_cached_page(): ) st.query_params.update({"perp_market_index": str(perp_market_index)}) - result = api2( - "asset-liability/matrix", - _params={"mode": mode, "perp_market_index": perp_market_index}, - key=f"asset-liability/matrix_{mode}_{perp_market_index}", - ) + try: + result = api2( + "asset-liability/matrix", + _params={"mode": mode, "perp_market_index": perp_market_index}, + key=f"asset-liability/matrix_{mode}_{perp_market_index}", + ) + except Exception as e: + st.error(f"Failed to fetch data: {str(e)}") + return + + if not isinstance(result, dict) or "df" not in result: + st.error("Invalid response format from API") + return + df = pd.DataFrame(result["df"]) + if df.empty: + st.warning("No data available for the selected parameters") + return if st.session_state.only_high_leverage_mode_users: df = df[df["is_high_leverage"]] @@ -102,12 +114,19 @@ def asset_liab_matrix_cached_page(): ) summary_df = generate_summary_data(filtered_df, mode, perp_market_index) - slot = result["slot"] - current_slot = get_current_slot() + + # Get slot information if available + slot = result.get("slot") + if slot is not None: + try: + current_slot = get_current_slot() + st.info( + f"This data is for slot {slot}, which is now {int(current_slot) - int(slot)} slots old" + ) + except Exception as e: + print(f"Error getting current slot: {e}") + # Continue without showing slot information - st.info( - f"This data is for slot {slot}, which is now {int(current_slot) - int(slot)} slots old" - ) st.write(f"{df.shape[0]} users") st.checkbox( "Only show high leverage mode users", key="only_high_leverage_mode_users" From 342567fff59f160a35fe75049134ff238f13e0b6 Mon Sep 17 00:00:00 2001 From: goldhaxx <1616671+goldhaxx@users.noreply.github.com> Date: Fri, 20 Dec 2024 21:39:33 -0800 Subject: [PATCH 03/10] Enhance error handling in asset-liability matrix page by checking for the presence of 'is_high_leverage' column. Added user feedback for unavailable data, improving robustness and user experience. --- src/page/asset_liability.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/page/asset_liability.py b/src/page/asset_liability.py index 7376de3..f2c5e8a 100644 --- a/src/page/asset_liability.py +++ b/src/page/asset_liability.py @@ -107,7 +107,11 @@ def asset_liab_matrix_cached_page(): return if st.session_state.only_high_leverage_mode_users: - df = df[df["is_high_leverage"]] + if "is_high_leverage" not in df.columns: + st.error("High leverage mode data is not available") + st.session_state.only_high_leverage_mode_users = False + else: + df = df[df["is_high_leverage"]] filtered_df = df[df["leverage"] >= st.session_state.min_leverage].sort_values( "leverage", ascending=False From 0557b5d6fe1d01d4b012602e98861a3fa3a6356d Mon Sep 17 00:00:00 2001 From: goldhaxx <1616671+goldhaxx@users.noreply.github.com> Date: Sat, 21 Dec 2024 18:47:10 -0800 Subject: [PATCH 04/10] Add Docker Compose configuration for backend and frontend services --- docker-compose.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 docker-compose.yml diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..f768c99 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,28 @@ +services: + backend: + build: + context: . + dockerfile: Dockerfile-backend + ports: + - "8000:8000" + container_name: app-backend + networks: + - app-network + + frontend: + build: + context: . + dockerfile: Dockerfile-frontend + ports: + - "8501:8501" + container_name: app-frontend + depends_on: + - backend + environment: + - BACKEND_URL=http://app-backend:8000 + networks: + - app-network + +networks: + app-network: + driver: bridge \ No newline at end of file From 2d07b52541b5c33b010477104fff376d23649963 Mon Sep 17 00:00:00 2001 From: goldhaxx <1616671+goldhaxx@users.noreply.github.com> Date: Sat, 21 Dec 2024 20:50:46 -0800 Subject: [PATCH 05/10] Enhance Docker setup and logging for backend services - Added volume mapping for logs in docker-compose.yml to persist log data. - Updated Dockerfile to create and set permissions for the /logs directory. - Replaced print statements with logging in asset_liability.py and matrix.py for better traceability and debugging. - Introduced timing metrics in asset_liability.py to monitor loading times and improve user feedback during data fetching and processing. --- Dockerfile-backend | 4 +- backend/api/asset_liability.py | 12 ++- backend/utils/matrix.py | 14 +++- docker-compose.yml | 2 + src/page/asset_liability.py | 146 +++++++++++++++++++++++++++------ 5 files changed, 146 insertions(+), 32 deletions(-) diff --git a/Dockerfile-backend b/Dockerfile-backend index dd13c0d..75bd910 100644 --- a/Dockerfile-backend +++ b/Dockerfile-backend @@ -5,6 +5,8 @@ COPY . /app RUN apt-get update && apt-get install -y gcc python3-dev RUN pip install --trusted-host pypi.python.org -r requirements.txt +RUN mkdir -p /logs && chmod 777 /logs + EXPOSE 8000 -CMD ["uvicorn", "backend.app:app", "--host", "0.0.0.0", "--port", "8000"] +CMD ["gunicorn", "backend.app:app", "-c", "gunicorn_config.py"] diff --git a/backend/api/asset_liability.py b/backend/api/asset_liability.py index e745ad4..b06474f 100644 --- a/backend/api/asset_liability.py +++ b/backend/api/asset_liability.py @@ -1,10 +1,12 @@ from driftpy.pickle.vat import Vat from fastapi import APIRouter +import logging from backend.state import BackendRequest from backend.utils.matrix import get_matrix router = APIRouter() +logger = logging.getLogger(__name__) async def _get_asset_liability_matrix( @@ -13,10 +15,16 @@ async def _get_asset_liability_matrix( mode: int, perp_market_index: int, ) -> dict: - print("==> Getting asset liability matrix...") + logger.info("==> Starting asset liability matrix calculation...") + logger.info(f"Mode: {mode}, Perp Market Index: {perp_market_index}") + + logger.info("==> Processing user data and calculating metrics...") df = await get_matrix(vat, mode, perp_market_index) + + logger.info("==> Converting DataFrame to dictionary...") df_dict = df.to_dict() - print("==> Asset liability matrix fetched") + + logger.info("==> Asset liability matrix calculation complete") return { "slot": slot, diff --git a/backend/utils/matrix.py b/backend/utils/matrix.py index 50e44dc..7c9f257 100644 --- a/backend/utils/matrix.py +++ b/backend/utils/matrix.py @@ -1,9 +1,12 @@ import pandas as pd +import logging from driftpy.constants.spot_markets import mainnet_spot_market_configs from driftpy.pickle.vat import Vat from backend.utils.user_metrics import get_user_leverages_for_asset_liability +logger = logging.getLogger(__name__) + def calculate_effective_leverage(assets: float, liabilities: float) -> float: return liabilities / assets if assets != 0 else 0 @@ -18,7 +21,9 @@ def format_metric( async def get_matrix(vat: Vat, mode: int = 0, perp_market_index: int = 0): NUMBER_OF_SPOT = len(mainnet_spot_market_configs) + logger.info(f"Processing data for {NUMBER_OF_SPOT} spot markets...") + logger.info("Calculating user leverages...") res = get_user_leverages_for_asset_liability(vat.users) leverage_data = { 0: res["leverages_none"], @@ -32,9 +37,11 @@ async def get_matrix(vat: Vat, mode: int = 0, perp_market_index: int = 0): if mode in [2, 3] else res["user_keys"] ) + logger.info(f"Processing data for {len(user_keys)} users...") df = pd.DataFrame(leverage_data[mode], index=user_keys) + logger.info("Initializing market columns...") new_columns = {} for i in range(NUMBER_OF_SPOT): prefix = f"spot_{i}" @@ -49,6 +56,7 @@ async def get_matrix(vat: Vat, mode: int = 0, perp_market_index: int = 0): for col in column_names: new_columns[col] = pd.Series(0.0, index=df.index) + logger.info("Calculating market metrics for each user...") for idx, row in df.iterrows(): spot_asset = row["spot_asset"] @@ -67,15 +75,12 @@ async def get_matrix(vat: Vat, mode: int = 0, perp_market_index: int = 0): } net_perp = float(row["net_p"][perp_market_index]) - print(f"net_perp value: {net_perp}, type: {type(net_perp)}") if net_perp > 0: - print("Net perp above 0") metrics[f"{base_name}_perp_{perp_market_index}_long"] = ( value / spot_asset * net_perp ) if net_perp < 0: - print("Net perp below 0") metrics[f"{base_name}_perp_{perp_market_index}_short"] = ( value / spot_asset * net_perp ) @@ -83,5 +88,8 @@ async def get_matrix(vat: Vat, mode: int = 0, perp_market_index: int = 0): for col, val in metrics.items(): new_columns[col][idx] = val + logger.info("Finalizing DataFrame...") df = pd.concat([df, pd.DataFrame(new_columns)], axis=1) + logger.info("Matrix calculation complete") + return df diff --git a/docker-compose.yml b/docker-compose.yml index f768c99..754accc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -6,6 +6,8 @@ services: ports: - "8000:8000" container_name: app-backend + volumes: + - ./logs:/logs networks: - app-network diff --git a/src/page/asset_liability.py b/src/page/asset_liability.py index f2c5e8a..59d6ee7 100644 --- a/src/page/asset_liability.py +++ b/src/page/asset_liability.py @@ -1,11 +1,13 @@ import pandas as pd import streamlit as st +import time from driftpy.constants.perp_markets import mainnet_perp_market_configs from driftpy.constants.spot_markets import mainnet_spot_market_configs from lib.api import api2 from utils import get_current_slot + options = [0, 1, 2, 3] labels = [ "none", @@ -26,6 +28,15 @@ def format_metric( return f"{formatted} ✅" if should_highlight and mode > 0 else formatted +def format_time(seconds: float) -> str: + """Format time duration in a human-readable format.""" + if seconds < 60: + return f"{seconds:.1f}s" + minutes = int(seconds // 60) + seconds = seconds % 60 + return f"{minutes}m {seconds:.1f}s" + + def generate_summary_data( df: pd.DataFrame, mode: int, perp_market_index: int ) -> pd.DataFrame: @@ -67,6 +78,8 @@ def asset_liab_matrix_cached_page(): st.session_state.min_leverage = 0.0 if "only_high_leverage_mode_users" not in st.session_state: st.session_state.only_high_leverage_mode_users = False + if "load_times" not in st.session_state: + st.session_state.load_times = {} params = st.query_params mode = int(params.get("mode", 0)) @@ -87,38 +100,119 @@ def asset_liab_matrix_cached_page(): ) st.query_params.update({"perp_market_index": str(perp_market_index)}) + # Create containers for timing information + timing_container = st.container() + with timing_container: + #st.write("### Loading Times") + timing_table = st.empty() + + # Create a container for the loading spinner and status + loading_container = st.container() + try: - result = api2( - "asset-liability/matrix", - _params={"mode": mode, "perp_market_index": perp_market_index}, - key=f"asset-liability/matrix_{mode}_{perp_market_index}", - ) - except Exception as e: - st.error(f"Failed to fetch data: {str(e)}") - return + start_time = time.time() + load_times = {} + + # Create a placeholder for the timer + timer_placeholder = st.empty() + status_container = st.empty() + + while True: + # Update timer + elapsed = time.time() - start_time + timer_placeholder.markdown(f"### Loading asset-liability matrix... ({format_time(elapsed)})") + + try: + # Fetching data from backend + step_start = time.time() + status_container.info("⏳ Fetching data from backend...") + + result = api2( + "asset-liability/matrix", + _params={"mode": mode, "perp_market_index": perp_market_index}, + key=f"asset-liability/matrix_{mode}_{perp_market_index}_{int(time.time())}", + ) + + step_duration = time.time() - step_start + load_times["Fetch Data"] = step_duration + break # Exit the loop once data is fetched + + except Exception as e: + if "timeout" not in str(e).lower(): # If it's not a timeout error, raise it + raise e + time.sleep(0.1) # Small delay before retrying + continue + + # Processing market data + step_start = time.time() + status_container.info("⚡ Processing market data...") + + if not isinstance(result, dict) or "df" not in result: + st.error("Invalid response format from API") + return - if not isinstance(result, dict) or "df" not in result: - st.error("Invalid response format from API") - return + df = pd.DataFrame(result["df"]) + if df.empty: + st.warning("No data available for the selected parameters") + return + + step_duration = time.time() - step_start + load_times["Process Data"] = step_duration - df = pd.DataFrame(result["df"]) - if df.empty: - st.warning("No data available for the selected parameters") - return + # Calculating metrics + step_start = time.time() + status_container.info("📊 Calculating market metrics...") + + if st.session_state.only_high_leverage_mode_users: + if "is_high_leverage" not in df.columns: + st.error("High leverage mode data is not available") + st.session_state.only_high_leverage_mode_users = False + else: + df = df[df["is_high_leverage"]] - if st.session_state.only_high_leverage_mode_users: - if "is_high_leverage" not in df.columns: - st.error("High leverage mode data is not available") - st.session_state.only_high_leverage_mode_users = False - else: - df = df[df["is_high_leverage"]] + filtered_df = df[df["leverage"] >= st.session_state.min_leverage].sort_values( + "leverage", ascending=False + ) + + step_duration = time.time() - step_start + load_times["Calculate Metrics"] = step_duration - filtered_df = df[df["leverage"] >= st.session_state.min_leverage].sort_values( - "leverage", ascending=False - ) + # Generating summary + step_start = time.time() + status_container.info("🔄 Generating summary data...") + + summary_df = generate_summary_data(filtered_df, mode, perp_market_index) + + step_duration = time.time() - step_start + load_times["Generate Summary"] = step_duration + + # Calculate total time + total_time = time.time() - start_time + load_times["Total Time"] = total_time + + # Update final timer display + timer_placeholder.markdown(f"### Loading Times") + + # Store the load times in session state + st.session_state.load_times = load_times + + # Clear the status container after loading is complete + status_container.empty() + + # Display timing information + timing_df = pd.DataFrame( + { + "Duration": [format_time(t) for t in load_times.values()], + "Percentage": [f"{(t/load_times['Total Time'])*100:.1f}%" for t in load_times.values()] + }, + index=load_times.keys() + ) + st.table(timing_df) + + except Exception as e: + st.error(f"Failed to fetch data: {str(e)}") + return - summary_df = generate_summary_data(filtered_df, mode, perp_market_index) - # Get slot information if available slot = result.get("slot") if slot is not None: From bccb6224172d81b9f50a3f27d3771587bdfe125f Mon Sep 17 00:00:00 2001 From: goldhaxx <1616671+goldhaxx@users.noreply.github.com> Date: Sat, 21 Dec 2024 21:44:58 -0800 Subject: [PATCH 06/10] Update logging configuration and enhance .gitignore for better file management - Changed logging configuration in gunicorn_config.py to direct logs to /logs/access.log and /logs/error.log, and set log level to debug for improved debugging. - Added matrix_response.json and logs/* to .gitignore to prevent unnecessary files from being tracked. - Updated Streamlit config to suppress callback and resource warnings, enhancing user experience. --- .gitignore | 2 ++ .streamlit/config.toml | 3 +++ gunicorn_config.py | 6 +++--- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 0299b5b..891e909 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,5 @@ pickles/* cache ignore ucache +matrix_response.json +logs/* \ No newline at end of file diff --git a/.streamlit/config.toml b/.streamlit/config.toml index a6f1de6..2899e4d 100644 --- a/.streamlit/config.toml +++ b/.streamlit/config.toml @@ -9,3 +9,6 @@ gatherUsageStats = false [client] toolbarMode = "minimal" +showCallbackWarning = false +showCachedResourceWarning = false +showStatusIndicator = false diff --git a/gunicorn_config.py b/gunicorn_config.py index 27ded59..69845d7 100644 --- a/gunicorn_config.py +++ b/gunicorn_config.py @@ -5,9 +5,9 @@ keepalive = 65 max_requests = 1000 max_requests_jitter = 50 -accesslog = "-" -errorlog = "-" -loglevel = "info" +accesslog = "/logs/access.log" +errorlog = "/logs/error.log" +loglevel = "debug" # Restart workers that die unexpectedly worker_exit_on_restart = True From 7ed03d7b25fd6affc6ebe0532b69ca9dd0bf4618 Mon Sep 17 00:00:00 2001 From: goldhaxx <1616671+goldhaxx@users.noreply.github.com> Date: Sat, 21 Dec 2024 21:46:30 -0800 Subject: [PATCH 07/10] Add last_oracle_slot attribute to BackendState class - Introduced a new attribute `last_oracle_slot` initialized to 0 in the BackendState class within state.py. This addition enhances the state management capabilities of the backend, potentially improving tracking of oracle data. --- backend/state.py | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/state.py b/backend/state.py index 750ebe2..b8066c4 100644 --- a/backend/state.py +++ b/backend/state.py @@ -75,6 +75,7 @@ def initialize( ) self.ready = False self.current_pickle_path = "bootstrap" + self.last_oracle_slot = 0 async def bootstrap(self): with waiting_for("drift client"): From 58fe6b30a803bf0a550a384857cbf28f425ed950 Mon Sep 17 00:00:00 2001 From: goldhaxx <1616671+goldhaxx@users.noreply.github.com> Date: Thu, 26 Dec 2024 17:43:18 -0800 Subject: [PATCH 08/10] pre-RepoPrompt checkin --- .DS_Store | Bin 0 -> 10244 bytes .gitignore | 4 +- .repo_ignore | 27 ++ backend/.DS_Store | Bin 0 -> 6148 bytes backend/api/debug.py | 123 ++++++++ backend/api/risk_metrics.py | 513 ++++++++++++++++++++++++++++++++ backend/app.py | 91 ++++-- backend/middleware/readiness.py | 53 +++- backend/state.py | 299 +++++++++++++++---- backend/utils/matrix.py | 187 +++++++----- backend/utils/risk_metrics.py | 118 ++++++++ backend/utils/user_metrics.py | 107 +++++-- constraints.txt | 4 + requirements-minimal.txt | 6 + requirements.txt | 7 +- src/.DS_Store | Bin 0 -> 6148 bytes 16 files changed, 1349 insertions(+), 190 deletions(-) create mode 100644 .DS_Store create mode 100644 .repo_ignore create mode 100644 backend/.DS_Store create mode 100644 backend/api/debug.py create mode 100644 backend/api/risk_metrics.py create mode 100644 backend/utils/risk_metrics.py create mode 100644 constraints.txt create mode 100644 requirements-minimal.txt create mode 100644 src/.DS_Store diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..276c807d9c5a31441a03860b23f712bed9a63183 GIT binary patch literal 10244 zcmeHMYi!(97Cy(BG&e`iWTuE8 z2!wBBAK!boXD0AxCt)Rxd`fyVUgnAaq4WE)8&e@KqE^~bGL zdY+{};4|Pe;4|Pe;4|Pe@M~d!-r20lDwqD*XTWE`XJC#2_WKZ~gUN^|XSj5KI_TtX z0Z0aD*ll#Ju?v!6PewdB!zF2`A$KLoUD4ZOAa|$nuxA(X$49sVM?cD>g4#vR%N8G;$mVdWS)TOMRo> z)+Z|J(-Vbn1A!*DN^A0QVFI!c6Qctx`GjUKu@Eu5wqy6=EqA#X=2;bxl(!P`c)wYT z36Bj9WI%@~q##+!2_bryjs@*RZ0m>Su$8ebAUv1t0ywJ)XO1wMkP|uUc&H{s=mf2Z zY2(M2o&KEjRb){>vap31f9v}Z3gF(q7auMowhUrzQbbbUc=MTZb|R*Rw?Df5@B>0QmENmgh@2$U|#a{{uun0qzYLR8TL+m(|Z5EiZDLd`` z8a*FNCk@+aZT%3XszCLk8ft3Ib^R08RKF8-vUXQAbBOf@EGrqyUq@quhCVf>HFR61 z6E#dd<2o1#olFi78D_$o>awzC!ghW8xk8~frmZ-ByuE!xOSr9V!*omd_~!Q3mhctr z?bFkW+}ON+>;AFHWXiM{N7T?;u+ZHUx4WoY;SNQ{J;zGccCMTAQ5DYfamM3ARu-#E zf`fxY!|Ir}JV&?Pmv#)xv#_IW)nTT;+DBDqciCf)zWsw*6lqz_wJjmU8dD7uUFL(qT6(g>kU0~V9H2169=PqTsISjIX;v#Y$KM` z2h3m_C%J=q7DG)*0-UwKwHa7I<&ecKXt~nTOOF+#$;& z_Cz$J_f1iz<~{bvvit04oVoNS49CiFHp0O^Sw6@n>r5nQp4t@`oPX{9V33}QVde~~ z3wrRpJhb;@S#FN3;Sa~9wK>wtdkU?}o1hiCU^k3XIVK|F+qF^V_fB&N_r8~=>c zcr(u69e5w!j}PKQcnTlENAWRy8lT4(@E`b3d<$ps1N;!r;(zc9{1ShVY9v`wq|}$@^uIjxShAp{uXnvGZ3~NoSVw+G%J! z?~fN>x~^m6w#ZzD@r2ChpqSfkIu{jzo6c?$l%^t8S+HrP8ahDvt{*L}v3%`mHLNCO zmW9Fag=$Evri$R*KV3rIMN|#EyJl@Dq)AkSyt}?Nq-s@EVs1SLH>{;DdYw(1RaFa6 zu_?1bN@R7Vx?NE;aD#Mq(KM`depV8{3~#{O@By5qB>on@gYR((C2kX5NJ-m)ms8?y z#x0b%y|@E+;x62e12~97l*UnvqmI|(IHoa!4rVb&Nqh_5inrn2crPXL1C+|A@UQqc zd>9|c)A%I*9iMVj`Xyv9b82%*x|{d&XMBUMDkQ1+!kyiA8-(t27f}^G3X6cB<{`e2 zJzN#$&-G`_q-BniB`dr|^kb2t*Y!BPxb5Os+316%dhu@l^~&7Rxo8e?0a4DT7;&0` z4$varA`~SU(nmy{82|PYFKz!fUnbA_im?~c7ss8UscE5>CI;d#N%2b&qxJ^QW+-ZrUX1T{00TY!JzaDY`6YAd{jhKCVLFb`!5Xuh;nH=XllwWIr29FZI_z`&YC4#$ jx0!ISBi`5ym##b6xb7r>p??NU4fy+ifB*lx-v9pySs;6( literal 0 HcmV?d00001 diff --git a/.gitignore b/.gitignore index 891e909..8829ad9 100644 --- a/.gitignore +++ b/.gitignore @@ -3,9 +3,11 @@ venv **/__pycache__/ .env .venv +.venv-risk pickles/* cache ignore ucache matrix_response.json -logs/* \ No newline at end of file +logs/* +documentation/* \ No newline at end of file diff --git a/.repo_ignore b/.repo_ignore new file mode 100644 index 0000000..55d3643 --- /dev/null +++ b/.repo_ignore @@ -0,0 +1,27 @@ +# Package manager caches +**/node_modules/ +**/.npm/ +**/__pycache__/ +**/.pytest_cache/ +**/.mypy_cache/ + +# Build caches +**/.gradle/ +**/.nuget/ +**/.cargo/ +**/.stack-work/ +**/.ccache/ + +# IDE and Editor caches +**/.idea/ +**/.vscode/ +**/*.swp +**/*~ + +# Temp files +**/*.tmp +**/*.temp +**/*.bak + +**/*.meta +**/package-lock.json \ No newline at end of file diff --git a/backend/.DS_Store b/backend/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..b7aa957b846fc687d12d28172459cb9d61a95048 GIT binary patch literal 6148 zcmeHKJxBvF6n@c)rCmBWbUa)H7ZIFvIZIs}?JWHjTWIOgTEy*kMLIgU2u?1#>g3?y z?(Xa&&Mtm03B60Nt+43@!Z|M*PM1jAi0I%IFR%@98I>FkF)^E9exL&GvJAt8Gt4{aqbc@AW zyIX@t`r&+e?fUF#dzN?O2i`$5&O(!!2g`t34nK8wgN$}0&gXsO zfoniV)OQ)?S)VbFIJcMKZf4VPOL#@BrhB=3Jm#}N1Mp};wJ|QA@9t=v$J_nw)rI+9 zvG}+$lRcdLs>1ojW-EX*n=PF+D77dc3Wx$D1$ck(P#ArSp+UWMpwd?WU<%#Z(9UrK z=vnkFh6XVLQzjH>LY2K@C=-r;&-(cmLxU!ql+KKKY-VL|C`xBXzo*?v`39vH1w;W? zfqC6!=#Ps8q33EZ~*w*_wDcK5IRc3JM$hg$8v9DtjF31|P+F b6m1yuxB>Jnh6d3ClRpAl2B}1WUsd1>{0g`K literal 0 HcmV?d00001 diff --git a/backend/api/debug.py b/backend/api/debug.py new file mode 100644 index 0000000..4c5f95e --- /dev/null +++ b/backend/api/debug.py @@ -0,0 +1,123 @@ +from fastapi import APIRouter +from backend.state import BackendRequest +import logging +from typing import Dict, Any + +router = APIRouter() +logger = logging.getLogger(__name__) + + +@router.get("/debug") +async def debug_state(request: BackendRequest) -> Dict[str, Any]: + """ + Debug endpoint to check state details. + """ + state = request.backend_state + + if not state: + return { + "status": "error", + "error": "no_state", + "details": "No backend state found in request" + } + + components = { + "ready": state.ready, + "vat_exists": state.vat is not None, + "dc_exists": state.dc is not None, + "connection_exists": state.connection is not None, + "spot_map_exists": state.spot_markets is not None, + "perp_map_exists": state.perp_markets is not None, + "user_map_exists": state.user_map is not None, + "stats_map_exists": state.stats_map is not None + } + + try: + if not state.is_ready: + components["state_error"] = "State is not ready" + return { + "status": "error", + "components": components, + "is_ready": False + } + + if state.vat: + vat = state.vat + + # Check spot markets + if hasattr(vat, "spot_markets"): + spot_markets = vat.spot_markets + if hasattr(spot_markets, 'market_map'): + market_map = spot_markets.market_map + elif hasattr(spot_markets, 'markets'): + market_map = spot_markets.markets + + if market_map: + try: + # Get market keys safely + market_keys = [] + for key in market_map: + market_keys.append(key) + components["spot_markets_count"] = len(market_keys) + components["spot_markets_indices"] = market_keys[:5] + except Exception as e: + logger.error(f"Error getting spot markets count: {str(e)}") + components["spot_markets_error"] = str(e) + else: + components["spot_markets_error"] = "No market map found" + + # Check perp markets + if hasattr(vat, "perp_markets"): + perp_markets = vat.perp_markets + if hasattr(perp_markets, 'market_map'): + market_map = perp_markets.market_map + elif hasattr(perp_markets, 'markets'): + market_map = perp_markets.markets + + if market_map: + try: + # Get market keys safely + market_keys = [] + for key in market_map: + market_keys.append(key) + components["perp_markets_count"] = len(market_keys) + components["perp_markets_indices"] = market_keys[:5] + except Exception as e: + logger.error(f"Error getting perp markets count: {str(e)}") + components["perp_markets_error"] = str(e) + else: + components["perp_markets_error"] = "No market map found" + + # Check users + if hasattr(vat, "users"): + users = vat.users + if hasattr(users, 'users'): + user_map = users.users + elif hasattr(users, 'user_map'): + user_map = users.user_map + + if user_map: + try: + # Get user keys safely + user_keys = [] + for key in user_map: + user_keys.append(key) + components["users_count"] = len(user_keys) + components["sample_users"] = [str(key) for key in user_keys[:5]] + except Exception as e: + logger.error(f"Error getting users count: {str(e)}") + components["users_error"] = str(e) + else: + components["users_error"] = "No users found" + else: + components["vat_error"] = "VAT is not initialized" + + except Exception as e: + logger.error(f"Error in debug endpoint: {str(e)}") + components["error"] = str(e) + + return { + "status": "success", + "components": components, + "is_ready": state.is_ready + } \ No newline at end of file diff --git a/backend/api/risk_metrics.py b/backend/api/risk_metrics.py new file mode 100644 index 0000000..7c9ab8d --- /dev/null +++ b/backend/api/risk_metrics.py @@ -0,0 +1,513 @@ +from typing import Dict, List, Any +from fastapi import APIRouter, HTTPException, Request +import httpx +import logging +from driftpy.math.margin import MarginCategory +import asyncio + +from backend.state import BackendRequest +from backend.utils.risk_metrics import calculate_target_scale_iaw + +router = APIRouter() +logger = logging.getLogger(__name__) + +async def get_jupiter_price_impact( + input_mint: str, + output_mint: str, + amount: int +) -> float: + """ + Get price impact from Jupiter API for a given swap. + """ + url = "https://quote-api.jup.ag/v6/quote" + params = { + "inputMint": input_mint, + "outputMint": output_mint, + "amount": str(amount) # Convert to string for older httpx + } + + client = httpx.AsyncClient( + timeout=httpx.Timeout(30.0), + http2=False # Disable HTTP/2 for older httpx + ) + try: + logger.info(f"Requesting Jupiter quote for {amount} {input_mint} -> {output_mint}") + response = await client.get(url, params=params) + if response.status_code != 200: + error_msg = f"Jupiter API error: {response.text}" + logger.error(error_msg) + raise HTTPException(status_code=502, detail=error_msg) + data = response.json() + price_impact = float(data.get("priceImpactPct", 0)) + logger.info(f"Got price impact: {price_impact}") + return price_impact + except Exception as e: + logger.error(f"Error getting Jupiter price impact: {str(e)}") + raise HTTPException(status_code=502, detail=str(e)) + finally: + await client.aclose() + +async def wait_for_backend_state(request: Request, max_retries: int = 5, delay: float = 1.0) -> bool: + """ + Wait for the backend state to be ready. + + Args: + request: FastAPI request object + max_retries: Maximum number of retries + delay: Delay between retries in seconds + + Returns: + bool: True if backend state is ready, False otherwise + """ + for i in range(max_retries): + if hasattr(request.state, "backend_state") and request.state.backend_state.is_ready: + return True + logger.info(f"Waiting for backend state (attempt {i+1}/{max_retries})") + await asyncio.sleep(delay) + return False + +@router.get("/target_scale_iaw") +async def get_target_scale_iaw( + request: BackendRequest, + market_index: int +) -> Dict[str, Any]: + """ + Get Target Scale IAW and criteria results for all users in a market. + + Returns: + Dict with user public keys as keys and their Target Scale IAW metrics as values + """ + logger.info(f"Processing target_scale_iaw request for market {market_index}") + + try: + # Wait for backend state to be ready + if not await wait_for_backend_state(request): + logger.error("Backend state not ready after retries") + return {"result": "miss", "reason": "state_not_ready_after_retries"} + + state = request.backend_state + vat = state.vat + + if not hasattr(vat, "spot_markets"): + logger.warning("Spot markets not available") + return { + "result": "miss", + "reason": "spot_markets_not_available", + } + + results = {} + + # Get market configuration + try: + market = None + market_map = vat.spot_markets.market_map if hasattr(vat.spot_markets, 'market_map') else vat.spot_markets.markets + + if not market_map: + logger.error("Market map is empty") + return {"result": "miss", "reason": "market_map_empty"} + + # Find market by index + for m in market_map.values(): + if hasattr(m, 'market_index') and m.market_index == market_index: + market = m + break + elif hasattr(m, 'data') and m.data.market_index == market_index: + market = m.data + break + + if not market: + logger.error(f"Market index {market_index} not found") + return {"result": "miss", "reason": f"market_index_{market_index}_not_found"} + + # Get insurance fund balance safely + insurance_fund_balance = 0 + if hasattr(market, 'insurance_fund'): + if hasattr(market.insurance_fund, 'total_shares'): + insurance_fund_balance = market.insurance_fund.total_shares + elif hasattr(market.insurance_fund, 'shares'): + insurance_fund_balance = market.insurance_fund.shares + + logger.info(f"Processing market {market_index} with insurance fund balance: {insurance_fund_balance}") + + # Get market mint safely + market_mint = None + if hasattr(market, 'mint'): + market_mint = str(market.mint) + elif hasattr(market, 'oracle_source'): + market_mint = str(market.oracle_source) + + if not market_mint: + logger.error(f"Market {market_index} mint not found") + return {"result": "miss", "reason": f"market_{market_index}_mint_not_found"} + + except Exception as e: + logger.error(f"Error getting market {market_index}: {str(e)}") + return {"result": "miss", "reason": f"market_error: {str(e)}"} + + user_count = 0 + if state.user_map and hasattr(state.user_map, 'users'): + users = state.user_map.users + for user in users.values(): + try: + # Get largest position for price impact calculation + position_value = user.get_spot_market_asset_value(market_index, MarginCategory.MAINTENANCE) + logger.debug(f"User {user.user_public_key} position value: {position_value}") + + if position_value <= 0: + continue + + # Get price impact from Jupiter for the position + try: + price_impact = await get_jupiter_price_impact( + input_mint=market_mint, + output_mint="EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", # USDC mint + amount=int(position_value) + ) + except Exception as e: + logger.error(f"Jupiter API error for user {user.user_public_key}: {e}") + continue + + # Calculate Target Scale IAW and get criteria results + target_scale, criteria = calculate_target_scale_iaw( + user=user, + market_index=market_index, + price_impact=price_impact, + insurance_fund_balance=insurance_fund_balance + ) + + results[str(user.user_public_key)] = { + "target_scale_iaw": target_scale, + "criteria_results": criteria + } + user_count += 1 + + except Exception as e: + logger.error(f"Error calculating Target Scale IAW for user {user.user_public_key}: {e}") + continue + + logger.info(f"Processed {user_count} users for market {market_index}") + + if not results: + logger.warning(f"No results found for market {market_index}") + return {"result": "miss", "reason": "no_results_found"} + + return {"result": "success", "data": results} + + except Exception as e: + logger.error(f"Unexpected error in get_target_scale_iaw: {str(e)}") + return {"result": "miss", "reason": f"unexpected_error: {str(e)}"} + +@router.get("/health") +async def health_check(request: Request) -> Dict[str, Any]: + """ + Check the health of the risk metrics service. + """ + state_ready = hasattr(request.state, "backend_state") and request.state.backend_state.is_ready + vat_ready = hasattr(request.state, "backend_state") and request.state.backend_state.vat is not None + + logger.info(f"Health check - State ready: {state_ready}, VAT ready: {vat_ready}") + + return { + "status": "healthy" if state_ready and vat_ready else "initializing", + "state_ready": state_ready, + "vat_ready": vat_ready + } + +@router.get("/debug") +async def debug_state(request: Request) -> Dict[str, Any]: + """ + Debug endpoint to check state details. + """ + state = getattr(request.state, "backend_state", None) + + if not state: + return { + "status": "error", + "error": "no_state", + "details": "No backend state found in request" + } + + components = { + "ready": state.ready, + "vat_exists": state.vat is not None, + "dc_exists": state.dc is not None, + "connection_exists": state.connection is not None, + "spot_markets_exists": hasattr(state.vat, "spot_markets") and state.vat.spot_markets is not None, + "perp_map_exists": state.perp_map is not None, + "user_map_exists": state.user_map is not None, + "stats_map_exists": state.stats_map is not None, + "current_pickle_path": state.current_pickle_path, + "last_oracle_slot": state.last_oracle_slot + } + + # Check if state is initialized + try: + logger.info("Checking state initialization") + if not state.is_ready: + logger.warning("State is not ready") + components["state_error"] = "State is not ready" + return { + "status": "error", + "components": components, + "is_ready": False + } + + # Check VAT details + if state.vat: + logger.info("Checking VAT details") + vat = state.vat + + # Debug VAT structure + logger.info(f"VAT type: {type(vat)}") + logger.info(f"VAT dir: {dir(vat)}") + + # Check spot markets + if vat.spot_markets: + logger.info("Checking spot markets") + logger.info(f"Spot markets type: {type(vat.spot_markets)}") + logger.info(f"Spot markets dir: {dir(vat.spot_markets)}") + try: + # Try to get spot markets count + if hasattr(vat.spot_markets, 'size'): + spot_markets_count = vat.spot_markets.size() + components["spot_markets_count"] = spot_markets_count + elif hasattr(vat.spot_markets, 'markets'): + spot_markets_count = len(vat.spot_markets.markets) + components["spot_markets_count"] = spot_markets_count + else: + logger.warning("Could not determine spot markets count") + components["spot_markets_error"] = "Could not determine count" + + # Try to get market details + spot_markets = [] + if hasattr(vat.spot_markets, 'markets'): + for market_data in list(vat.spot_markets.markets.values())[:5]: + if hasattr(market_data, 'data'): + spot_markets.append({ + "market_index": market_data.data.market_index, + "oracle_source": str(market_data.data.oracle_source) + }) + elif hasattr(vat.spot_markets, 'values'): + for market_data in list(vat.spot_markets.values())[:5]: + if hasattr(market_data, 'data'): + spot_markets.append({ + "market_index": market_data.data.market_index, + "oracle_source": str(market_data.data.oracle_source) + }) + + if spot_markets: + components["spot_markets"] = spot_markets + else: + logger.warning("No spot markets found") + components["spot_markets_error"] = "No markets found" + + except Exception as e: + logger.error(f"Error getting spot markets: {str(e)}") + components["spot_markets_error"] = str(e) + + # Check perp markets + if vat.perp_markets: + logger.info("Checking perp markets") + logger.info(f"Perp markets type: {type(vat.perp_markets)}") + logger.info(f"Perp markets dir: {dir(vat.perp_markets)}") + try: + # Try to get perp markets count + if hasattr(vat.perp_markets, 'size'): + perp_markets_count = vat.perp_markets.size() + components["perp_markets_count"] = perp_markets_count + elif hasattr(vat.perp_markets, 'markets'): + perp_markets_count = len(vat.perp_markets.markets) + components["perp_markets_count"] = perp_markets_count + else: + logger.warning("Could not determine perp markets count") + components["perp_markets_error"] = "Could not determine count" + + # Try to get market details + perp_markets = [] + if hasattr(vat.perp_markets, 'markets'): + for market_data in list(vat.perp_markets.markets.values())[:5]: + if hasattr(market_data, 'data'): + perp_markets.append({ + "market_index": market_data.data.market_index, + "oracle_source": str(market_data.data.oracle_source) + }) + elif hasattr(vat.perp_markets, 'values'): + for market_data in list(vat.perp_markets.values())[:5]: + if hasattr(market_data, 'data'): + perp_markets.append({ + "market_index": market_data.data.market_index, + "oracle_source": str(market_data.data.oracle_source) + }) + + if perp_markets: + components["perp_markets"] = perp_markets + else: + logger.warning("No perp markets found") + components["perp_markets_error"] = "No markets found" + + except Exception as e: + logger.error(f"Error getting perp markets: {str(e)}") + components["perp_markets_error"] = str(e) + + # Check users + if vat.users: + logger.info("Checking users") + logger.info(f"Users type: {type(vat.users)}") + logger.info(f"Users dir: {dir(vat.users)}") + try: + # Try to get users count + if hasattr(vat.users, 'size'): + users_count = vat.users.size() + components["users_count"] = users_count + elif hasattr(vat.users, 'user_map'): + users_count = len(vat.users.user_map) + components["users_count"] = users_count + else: + logger.warning("Could not determine users count") + components["users_error"] = "Could not determine count" + + # Try to get user details + users = [] + if hasattr(vat.users, 'user_map'): + for user_data in list(vat.users.user_map.values())[:5]: + if hasattr(user_data, 'user_public_key'): + users.append({ + "public_key": user_data.user_public_key + }) + elif hasattr(vat.users, 'values'): + for user_data in list(vat.users.values())[:5]: + if hasattr(user_data, 'user_public_key'): + users.append({ + "public_key": user_data.user_public_key + }) + + if users: + components["users"] = users + else: + logger.warning("No users found") + components["users_error"] = "No users found" + + except Exception as e: + logger.error(f"Error getting users: {str(e)}") + components["users_error"] = str(e) + + # Check user stats + if vat.user_stats: + logger.info("Checking user stats") + logger.info(f"User stats type: {type(vat.user_stats)}") + logger.info(f"User stats dir: {dir(vat.user_stats)}") + try: + # Try to get stats count + if hasattr(vat.user_stats, 'size'): + stats_count = vat.user_stats.size() + components["stats_count"] = stats_count + elif hasattr(vat.user_stats, 'stats'): + stats_count = len(vat.user_stats.stats) + components["stats_count"] = stats_count + else: + logger.warning("Could not determine stats count") + components["stats_error"] = "Could not determine count" + except Exception as e: + logger.error(f"Error getting user stats: {str(e)}") + components["stats_error"] = str(e) + + else: + logger.warning("VAT is not initialized") + components["vat_error"] = "VAT is not initialized" + + except Exception as e: + logger.error(f"Error in debug endpoint: {str(e)}") + components["error"] = str(e) + + logger.info(f"Debug state check: {components}") + + return { + "status": "success", + "components": components, + "is_ready": state.is_ready + } + +@router.get("/vat-state") +async def get_vat_state(request: BackendRequest) -> Dict[str, Any]: + """ + Get the current state of the VAT object. + """ + logger.info("Processing vat-state request") + + try: + # Wait for backend state to be ready + if not await wait_for_backend_state(request): + logger.error("Backend state not ready after retries") + return {"result": "miss", "reason": "state_not_ready_after_retries"} + + state = request.backend_state + vat = state.vat + + if not hasattr(vat, "spot_markets"): + logger.warning("Spot markets not available") + return { + "result": "miss", + "reason": "spot_markets_not_available", + } + + # Get spot markets count safely + spot_markets = {} + if hasattr(vat.spot_markets, 'markets'): + market_map = vat.spot_markets.markets + for market_index, market in market_map.items(): + market_info = { + "market_index": market_index, + "insurance_fund_balance": 0, # Default value + "mint": None # Default value + } + + # Get insurance fund balance safely + if hasattr(market, 'insurance_fund'): + if hasattr(market.insurance_fund, 'total_shares'): + market_info["insurance_fund_balance"] = market.insurance_fund.total_shares / 1e6 # Convert from lamports + elif hasattr(market.insurance_fund, 'shares'): + market_info["insurance_fund_balance"] = market.insurance_fund.shares / 1e6 # Convert from lamports + elif hasattr(market.insurance_fund, 'balance'): + market_info["insurance_fund_balance"] = market.insurance_fund.balance / 1e6 # Convert from lamports + + # Get market mint safely + if hasattr(market, 'mint'): + market_info["mint"] = str(market.mint) + elif hasattr(market, 'oracle_source'): + market_info["mint"] = str(market.oracle_source) + + spot_markets[str(market_index)] = market_info + + # Get perp markets count safely + perp_markets = {} + if hasattr(vat.perp_markets, 'markets'): + market_map = vat.perp_markets.markets + for market_index, market in market_map.items(): + market_info = { + "market_index": market_index, + "insurance_fund_balance": 0 # Default value + } + + # Get insurance fund balance safely + if hasattr(market, 'insurance_fund'): + if hasattr(market.insurance_fund, 'total_shares'): + market_info["insurance_fund_balance"] = market.insurance_fund.total_shares / 1e6 # Convert from lamports + elif hasattr(market.insurance_fund, 'shares'): + market_info["insurance_fund_balance"] = market.insurance_fund.shares / 1e6 # Convert from lamports + elif hasattr(market.insurance_fund, 'balance'): + market_info["insurance_fund_balance"] = market.insurance_fund.balance / 1e6 # Convert from lamports + + perp_markets[str(market_index)] = market_info + + return { + "result": "success", + "data": { + "spot_markets": spot_markets, + "perp_markets": perp_markets, + "last_update": state.last_oracle_slot + } + } + + except Exception as e: + logger.error(f"Error getting VAT state: {str(e)}") + return {"result": "miss", "reason": f"error: {str(e)}"} \ No newline at end of file diff --git a/backend/app.py b/backend/app.py index 321b392..193859d 100644 --- a/backend/app.py +++ b/backend/app.py @@ -1,9 +1,11 @@ import glob import os +import logging from contextlib import asynccontextmanager from dotenv import load_dotenv -from fastapi import FastAPI +from fastapi import FastAPI, Request +from fastapi.middleware.cors import CORSMiddleware from backend.api import ( asset_liability, @@ -13,11 +15,20 @@ price_shock, snapshot, ucache, + risk_metrics, + debug, ) from backend.middleware.cache_middleware import CacheMiddleware from backend.middleware.readiness import ReadinessMiddleware from backend.state import BackendState +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) + load_dotenv() state = BackendState() @@ -27,35 +38,51 @@ async def lifespan(app: FastAPI): url = os.getenv("RPC_URL") if not url: raise ValueError("RPC_URL environment variable is not set.") + global state - state.initialize(url) - - print("Checking if cached vat exists") - cached_vat_path = sorted(glob.glob("pickles/*")) - if len(cached_vat_path) > 0: - print("Loading cached vat") - await state.load_pickle_snapshot(cached_vat_path[-1]) - else: - print("No cached vat found, bootstrapping") - await state.bootstrap() - await state.take_pickle_snapshot() - state.ready = True - import random - import time - - time.sleep(random.randint(1, 10)) - print("Starting app") - yield - - # Cleanup - state.ready = False - await state.dc.unsubscribe() - await state.connection.close() + logger.info("Initializing backend state...") + try: + state.initialize(url) + + logger.info("Checking if cached vat exists") + cached_vat_path = sorted(glob.glob("pickles/*")) + if len(cached_vat_path) > 0: + logger.info(f"Loading cached vat from {cached_vat_path[-1]}") + await state.load_pickle_snapshot(cached_vat_path[-1]) + else: + logger.info("No cached vat found, bootstrapping") + await state.bootstrap() + logger.info("Taking pickle snapshot") + await state.take_pickle_snapshot() + + state.ready = True + logger.info("Backend state initialization complete") + yield + + except Exception as e: + logger.error(f"Failed to initialize backend state: {e}") + raise + finally: + # Cleanup + logger.info("Shutting down...") + state.ready = False + if hasattr(state, 'dc'): + await state.dc.unsubscribe() + if hasattr(state, 'connection'): + await state.connection.close() + logger.info("App shutdown complete") app = FastAPI(lifespan=lifespan) app.add_middleware(ReadinessMiddleware, state=state) app.add_middleware(CacheMiddleware, state=state, cache_dir="cache") +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) app.include_router(health.router, prefix="/api/health", tags=["health"]) app.include_router(metadata.router, prefix="/api/metadata", tags=["metadata"]) @@ -66,9 +93,25 @@ async def lifespan(app: FastAPI): ) app.include_router(snapshot.router, prefix="/api/snapshot", tags=["snapshot"]) app.include_router(ucache.router, prefix="/api/ucache", tags=["ucache"]) +app.include_router(risk_metrics.router, prefix="/api/risk-metrics", tags=["risk-metrics"]) +app.include_router(debug.router, prefix="/api/debug", tags=["debug"]) # NOTE: All other routes should be in /api/* within the /api folder. Routes outside of /api are not exposed in k8s @app.get("/") async def root(): return {"message": "risk dashboard backend is online"} + +@app.middleware("http") +async def add_backend_state(request: Request, call_next): + """Middleware to attach backend state to request.""" + logger.debug("Adding backend state to request") + try: + if not hasattr(request.state, "backend_state"): + request.state.backend_state = state + logger.debug("Backend state attached to request") + response = await call_next(request) + return response + except Exception as e: + logger.error(f"Error in backend state middleware: {e}") + raise diff --git a/backend/middleware/readiness.py b/backend/middleware/readiness.py index 7975f29..dfcda27 100644 --- a/backend/middleware/readiness.py +++ b/backend/middleware/readiness.py @@ -1,19 +1,48 @@ -from fastapi import HTTPException -from starlette.middleware.base import BaseHTTPMiddleware -from starlette.types import ASGIApp +import logging +from typing import Callable -from backend.state import BackendRequest, BackendState +from fastapi import Request, Response +from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint +from starlette.responses import JSONResponse +from backend.state import BackendState + +logger = logging.getLogger(__name__) class ReadinessMiddleware(BaseHTTPMiddleware): - def __init__(self, app: ASGIApp, state: BackendState): + def __init__(self, app, state: BackendState): super().__init__(app) self.state = state + logger.info("ReadinessMiddleware initialized") - async def dispatch(self, request: BackendRequest, call_next): - if not self.state.ready and request.url.path != "/health": - raise HTTPException(status_code=503, detail="Service is not ready") - - request.state.backend_state = self.state - response = await call_next(request) - return response + async def dispatch( + self, request: Request, call_next: RequestResponseEndpoint + ) -> Response: + """ + Check if the backend state is ready before processing requests. + """ + logger.debug("Checking state readiness") + + # Health, debug, and VAT state endpoints should always be accessible + if request.url.path in [ + "/api/health", + "/api/risk-metrics/health", + "/api/risk-metrics/debug", + "/api/risk-metrics/vat-state" + ]: + return await call_next(request) + + # For all other endpoints, check state readiness + if not self.state.is_ready: + logger.warning("Backend state not ready") + return JSONResponse( + status_code=503, + content={"result": "miss", "reason": "state_not_ready"} + ) + + try: + response = await call_next(request) + return response + except Exception as e: + logger.error(f"Error in request processing: {e}") + raise diff --git a/backend/state.py b/backend/state.py index b8066c4..e89790e 100644 --- a/backend/state.py +++ b/backend/state.py @@ -1,6 +1,8 @@ import os from asyncio import create_task, gather from datetime import datetime +import logging +from typing import Optional from anchorpy.provider import Wallet from driftpy.account_subscription_config import AccountSubscriptionConfig @@ -24,12 +26,14 @@ from backend.utils.vat import load_newest_files from backend.utils.waiting_for import waiting_for +logger = logging.getLogger(__name__) + class BackendState: connection: AsyncClient dc: DriftClient - spot_map: MarketMap - perp_map: MarketMap + spot_markets: MarketMap + perp_markets: MarketMap user_map: UserMap stats_map: UserStatsMap @@ -38,56 +42,97 @@ class BackendState: vat: Vat ready: bool - def initialize( - self, url: str - ): # Not using __init__ because we need the rpc url to be passed in - self.connection = AsyncClient(url) - self.dc = DriftClient( - self.connection, - Wallet.dummy(), - "mainnet", - account_subscription=AccountSubscriptionConfig("cached"), - ) - self.perp_map = MarketMap( - MarketMapConfig( - self.dc.program, - MarketType.Perp(), - MarketMapWebsocketConfig(), - self.dc.connection, - ) - ) - self.spot_map = MarketMap( - MarketMapConfig( - self.dc.program, - MarketType.Spot(), - MarketMapWebsocketConfig(), - self.dc.connection, - ) - ) - self.user_map = UserMap(UserMapConfig(self.dc, UserMapWebsocketConfig())) - self.stats_map = UserStatsMap(UserStatsMapConfig(self.dc)) - self.vat = Vat( - self.dc, - self.user_map, - self.stats_map, - self.spot_map, - self.perp_map, - ) + def __init__(self): self.ready = False - self.current_pickle_path = "bootstrap" + self.vat = None + self.dc = None + self.connection = None + self.spot_markets = None + self.perp_markets = None + self.user_map = None + self.stats_map = None + self.current_pickle_path = None self.last_oracle_slot = 0 + logger.info("BackendState initialized") + + def initialize(self, url: str): + """Initialize the backend state with RPC URL.""" + logger.info(f"Initializing backend state with URL: {url}") + try: + self.connection = AsyncClient(url) + self.dc = DriftClient( + self.connection, + Wallet.dummy(), + "mainnet", + account_subscription=AccountSubscriptionConfig("cached"), + ) + logger.info("Created DriftClient") + + self.perp_markets = MarketMap( + MarketMapConfig( + self.dc.program, + MarketType.Perp(), + MarketMapWebsocketConfig(), + self.dc.connection, + ) + ) + logger.info("Created perp_markets") + + self.spot_markets = MarketMap( + MarketMapConfig( + self.dc.program, + MarketType.Spot(), + MarketMapWebsocketConfig(), + self.dc.connection, + ) + ) + logger.info("Created spot_markets") + + self.user_map = UserMap(UserMapConfig(self.dc, UserMapWebsocketConfig())) + logger.info("Created user_map") + + self.stats_map = UserStatsMap(UserStatsMapConfig(self.dc)) + logger.info("Created stats_map") + + # Don't create VAT here, it will be created after subscriptions in bootstrap + logger.info("Backend state initialized successfully") + except Exception as e: + logger.error(f"Failed to initialize backend state: {e}") + self.ready = False + raise async def bootstrap(self): - with waiting_for("drift client"): - await self.dc.subscribe() - with waiting_for("subscriptions"): - await gather( - create_task(self.spot_map.subscribe()), - create_task(self.perp_map.subscribe()), - create_task(self.user_map.subscribe()), - create_task(self.stats_map.subscribe()), + """Bootstrap the backend state.""" + logger.info("Starting bootstrap process") + try: + with waiting_for("drift client"): + await self.dc.subscribe() + with waiting_for("subscriptions"): + await gather( + create_task(self.spot_markets.subscribe()), + create_task(self.perp_markets.subscribe()), + create_task(self.user_map.subscribe()), + create_task(self.stats_map.subscribe()), + ) + + logger.info("Creating VAT object...") + self.vat = Vat( + self.dc, + self.user_map, + self.stats_map, + self.spot_markets, + self.perp_markets, ) - self.current_pickle_path = "bootstrap" + logger.info("VAT object created successfully") + logger.info(f"VAT attributes: {dir(self.vat)}") + + # Set ready flag only after successful bootstrap + self.ready = True + logger.info("Bootstrap completed successfully") + except Exception as e: + logger.error(f"Bootstrap failed: {e}") + self.ready = False + raise async def take_pickle_snapshot(self): now = datetime.now() @@ -104,27 +149,157 @@ async def take_pickle_snapshot(self): return result async def load_pickle_snapshot(self, directory: str): - pickle_map = load_newest_files(directory) - self.current_pickle_path = os.path.realpath(directory) - with waiting_for("unpickling"): - await self.vat.unpickle( - users_filename=pickle_map["usermap"], - user_stats_filename=pickle_map["userstats"], - spot_markets_filename=pickle_map["spot"], - perp_markets_filename=pickle_map["perp"], - spot_oracles_filename=pickle_map["spotoracles"], - perp_oracles_filename=pickle_map["perporacles"], - ) + """Load state from pickle snapshot.""" + logger.info(f"Loading pickle snapshot from {directory}") + try: + pickle_map = load_newest_files(directory) + self.current_pickle_path = os.path.realpath(directory) + + logger.info("Starting VAT unpickling process...") + logger.info(f"Pickle map: {pickle_map}") + + # Create VAT object before unpickling + if self.vat is None: + logger.info("Creating new VAT object...") + self.vat = Vat( + self.dc, + self.user_map, + self.stats_map, + self.spot_markets, + self.perp_markets, + ) + logger.info("VAT object created successfully") + logger.info(f"Initial VAT attributes: {dir(self.vat)}") + logger.info(f"Initial VAT type: {type(self.vat)}") + + with waiting_for("unpickling"): + logger.info("Unpickling VAT with files:") + logger.info(f"- Users: {pickle_map['usermap']}") + logger.info(f"- User Stats: {pickle_map['userstats']}") + logger.info(f"- Spot Markets: {pickle_map['spot']}") + logger.info(f"- Perp Markets: {pickle_map['perp']}") + logger.info(f"- Spot Oracles: {pickle_map['spotoracles']}") + logger.info(f"- Perp Oracles: {pickle_map['perporacles']}") + + await self.vat.unpickle( + users_filename=pickle_map["usermap"], + user_stats_filename=pickle_map["userstats"], + spot_markets_filename=pickle_map["spot"], + perp_markets_filename=pickle_map["perp"], + spot_oracles_filename=pickle_map["spotoracles"], + perp_oracles_filename=pickle_map["perporacles"], + ) + + logger.info("VAT unpickling complete") + logger.info(f"VAT attributes after unpickling: {dir(self.vat)}") + logger.info(f"VAT type after unpickling: {type(self.vat)}") + logger.info(f"VAT spot markets available: {hasattr(self.vat, 'spot_markets')}") + logger.info(f"VAT perp markets available: {hasattr(self.vat, 'perp_markets')}") + if hasattr(self.vat, 'spot_markets'): + logger.info(f"Spot markets type: {type(self.vat.spot_markets)}") + logger.info(f"Spot markets attributes: {dir(self.vat.spot_markets)}") + if hasattr(self.vat, 'perp_markets'): + logger.info(f"Perp markets type: {type(self.vat.perp_markets)}") + logger.info(f"Perp markets attributes: {dir(self.vat.perp_markets)}") - self.last_oracle_slot = int( - pickle_map["perporacles"].split("_")[-1].split(".")[0] - ) - return pickle_map + self.last_oracle_slot = int( + pickle_map["perporacles"].split("_")[-1].split(".")[0] + ) + + # Set ready flag only after successful load + self.ready = True + logger.info("Pickle snapshot loaded successfully") + return pickle_map + + except Exception as e: + logger.error(f"Failed to load pickle snapshot: {e}") + self.ready = False + raise async def close(self): await self.dc.unsubscribe() await self.connection.close() + @property + def is_ready(self) -> bool: + """ + Check if the backend state is ready. + """ + try: + logger.debug("Checking readiness") + + # Check if state is initialized + if not self.ready: + logger.debug("State not initialized") + return False + + # Check if VAT is initialized + if not self.vat: + logger.debug("VAT not initialized") + return False + + # Check if DC is initialized + if not self.dc: + logger.debug("DC not initialized") + return False + + # Check if connection is initialized + if not self.connection: + logger.debug("Connection not initialized") + return False + + # Get market maps + spot_market_map = None + perp_market_map = None + + if hasattr(self.vat, "spot_markets"): + if hasattr(self.vat.spot_markets, "market_map"): + spot_market_map = self.vat.spot_markets.market_map + elif hasattr(self.vat.spot_markets, "markets"): + spot_market_map = self.vat.spot_markets.markets + + if hasattr(self.vat, "perp_markets"): + if hasattr(self.vat.perp_markets, "market_map"): + perp_market_map = self.vat.perp_markets.market_map + elif hasattr(self.vat.perp_markets, "markets"): + perp_market_map = self.vat.perp_markets.markets + + if not spot_market_map or not perp_market_map: + logger.debug("Market maps not available") + return False + + # Check if market maps have data + try: + spot_markets_ready = False + perp_markets_ready = False + + # Check spot markets + if spot_market_map: + for _ in spot_market_map: + spot_markets_ready = True + break + + # Check perp markets + if perp_market_map: + for _ in perp_market_map: + perp_markets_ready = True + break + + if not spot_markets_ready or not perp_markets_ready: + logger.debug("Market maps empty") + return False + + except Exception as e: + logger.error(f"Error checking market maps: {e}") + return False + + logger.debug("All components ready") + return True + + except Exception as e: + logger.error(f"Error checking readiness: {e}") + return False + class BackendRequest(Request): @property diff --git a/backend/utils/matrix.py b/backend/utils/matrix.py index 7c9f257..2c9e917 100644 --- a/backend/utils/matrix.py +++ b/backend/utils/matrix.py @@ -1,7 +1,9 @@ import pandas as pd import logging from driftpy.constants.spot_markets import mainnet_spot_market_configs +from driftpy.constants import BASE_PRECISION from driftpy.pickle.vat import Vat +from driftpy.math.margin import MarginCategory from backend.utils.user_metrics import get_user_leverages_for_asset_liability @@ -19,77 +21,120 @@ def format_metric( return f"{formatted} ✅" if should_highlight and mode > 0 else formatted -async def get_matrix(vat: Vat, mode: int = 0, perp_market_index: int = 0): - NUMBER_OF_SPOT = len(mainnet_spot_market_configs) - logger.info(f"Processing data for {NUMBER_OF_SPOT} spot markets...") - - logger.info("Calculating user leverages...") - res = get_user_leverages_for_asset_liability(vat.users) - leverage_data = { - 0: res["leverages_none"], - 1: res["leverages_none"], - 2: [x for x in res["leverages_initial"] if int(x["health"]) <= 10], - 3: [x for x in res["leverages_maintenance"] if int(x["health"]) <= 10], - } - - user_keys = ( - [x["user_key"] for x in leverage_data[mode]] - if mode in [2, 3] - else res["user_keys"] - ) - logger.info(f"Processing data for {len(user_keys)} users...") +async def get_leverage_data(vat, mode): + """Get leverage data for the given mode.""" + try: + result = get_user_leverages_for_asset_liability(vat.users) + if not result: + return None + + leverage_data = { + 0: result["leverages_none"], + 1: result["leverages_none"], + 2: [x for x in result["leverages_initial"] if int(x["health"]) <= 10], + 3: [x for x in result["leverages_maintenance"] if int(x["health"]) <= 10], + } + return leverage_data + except Exception as e: + logger.error(f"Error getting leverage data: {str(e)}") + return None - df = pd.DataFrame(leverage_data[mode], index=user_keys) - logger.info("Initializing market columns...") - new_columns = {} - for i in range(NUMBER_OF_SPOT): - prefix = f"spot_{i}" - column_names = [ - f"{prefix}_all_assets", - f"{prefix}_all", - f"{prefix}_all_perp", - f"{prefix}_all_spot", - f"{prefix}_perp_{perp_market_index}_long", - f"{prefix}_perp_{perp_market_index}_short", - ] - for col in column_names: - new_columns[col] = pd.Series(0.0, index=df.index) - - logger.info("Calculating market metrics for each user...") - for idx, row in df.iterrows(): - spot_asset = row["spot_asset"] - - for market_id, value in row["net_v"].items(): - if value <= 0: +async def get_matrix(vat: Vat, mode: int = 0, perp_market_index: int = 0): + """ + Get the matrix of user positions and metrics. + """ + try: + # Get spot markets + spot_markets = set() + if hasattr(vat, "spot_markets") and hasattr(vat.spot_markets, "markets"): + for market_index in vat.spot_markets.markets: + spot_markets.add(market_index) + + # Get perp markets + perp_markets = set() + if hasattr(vat, "perp_markets") and hasattr(vat.perp_markets, "markets"): + for market_index in vat.perp_markets.markets: + perp_markets.add(market_index) + + # Get user keys + user_keys = [] + if hasattr(vat, "users") and hasattr(vat.users, "users"): + for user_key in vat.users.users: + user_keys.append(user_key) + + # Get leverage data + leverage_data = await get_leverage_data(vat, mode) + if not leverage_data: + return None + + # Get user keys based on mode + user_keys = ( + [x["user_key"] for x in leverage_data[mode]] + if mode in [2, 3] + else user_keys + ) + logger.info(f"Processing data for {len(user_keys)} users...") + + df = pd.DataFrame(leverage_data[mode], index=user_keys) + + logger.info("Initializing market columns...") + new_columns = {} + for market_id in spot_markets: + prefix = f"spot_{market_id}" + column_names = [ + f"{prefix}_all_assets", + f"{prefix}_all", + f"{prefix}_all_perp", + f"{prefix}_all_spot", + f"{prefix}_perp_{perp_market_index}_long", + f"{prefix}_perp_{perp_market_index}_short", + ] + for col in column_names: + new_columns[col] = pd.Series(0.0, index=df.index) + + logger.info("Calculating market metrics for each user...") + for user_key in user_keys: + try: + user = vat.users.users[user_key] + for market_id in spot_markets: + prefix = f"spot_{market_id}" + + # Calculate metrics for each market + all_assets = user.get_spot_market_asset_value(market_id, MarginCategory.MAINTENANCE) + all_liabilities = user.get_spot_market_liability_value(market_id, MarginCategory.MAINTENANCE) + all_perp = user.get_perp_market_value(market_id, MarginCategory.MAINTENANCE) + all_spot = user.get_spot_market_value(market_id, MarginCategory.MAINTENANCE) + + # Get perp position details + perp_long = 0.0 + perp_short = 0.0 + perp_position = user.get_perp_position(perp_market_index) + if perp_position: + base_asset_amount = perp_position.base_asset_amount / BASE_PRECISION + if base_asset_amount > 0: + perp_long = base_asset_amount + else: + perp_short = abs(base_asset_amount) + + # Update DataFrame + new_columns[f"{prefix}_all_assets"][user_key] = all_assets + new_columns[f"{prefix}_all"][user_key] = all_liabilities + new_columns[f"{prefix}_all_perp"][user_key] = all_perp + new_columns[f"{prefix}_all_spot"][user_key] = all_spot + new_columns[f"{prefix}_perp_{perp_market_index}_long"][user_key] = perp_long + new_columns[f"{prefix}_perp_{perp_market_index}_short"][user_key] = perp_short + + except Exception as e: + logger.error(f"Error processing user {user_key}: {str(e)}") continue - - base_name = f"spot_{market_id}" - metrics = { - f"{base_name}_all_assets": value, - f"{base_name}_all": value - / spot_asset - * (row["perp_liability"] + row["spot_liability"]), - f"{base_name}_all_perp": value / spot_asset * row["perp_liability"], - f"{base_name}_all_spot": value / spot_asset * row["spot_liability"], - } - - net_perp = float(row["net_p"][perp_market_index]) - - if net_perp > 0: - metrics[f"{base_name}_perp_{perp_market_index}_long"] = ( - value / spot_asset * net_perp - ) - if net_perp < 0: - metrics[f"{base_name}_perp_{perp_market_index}_short"] = ( - value / spot_asset * net_perp - ) - - for col, val in metrics.items(): - new_columns[col][idx] = val - - logger.info("Finalizing DataFrame...") - df = pd.concat([df, pd.DataFrame(new_columns)], axis=1) - logger.info("Matrix calculation complete") - - return df + + # Add new columns to DataFrame + for col_name, col_data in new_columns.items(): + df[col_name] = col_data + + return df + + except Exception as e: + logger.error(f"Error in get_matrix: {str(e)}") + return None diff --git a/backend/utils/risk_metrics.py b/backend/utils/risk_metrics.py new file mode 100644 index 0000000..53e2fac --- /dev/null +++ b/backend/utils/risk_metrics.py @@ -0,0 +1,118 @@ +from typing import Dict, Tuple +from driftpy.drift_user import DriftUser +from driftpy.math.margin import MarginCategory +from driftpy.constants.numeric_constants import MARGIN_PRECISION, QUOTE_PRECISION +from driftpy.constants.spot_markets import mainnet_spot_market_configs +from driftpy.constants.perp_markets import mainnet_perp_market_configs + +def check_liquidity(user: DriftUser, price_impact: float, market_index: int) -> bool: + """ + Check if the liquidity criteria passes for a given market. + + Args: + user: DriftUser instance + price_impact: Price impact percentage from Jupiter + market_index: Spot market index + + Returns: + bool: True if liquidity check passes + """ + market_config = mainnet_spot_market_configs[market_index] + maint_asset_weight = market_config.maintenance_asset_weight / MARGIN_PRECISION + return price_impact < (1 - maint_asset_weight) + +def check_spot_leverage(user: DriftUser, market_index: int) -> bool: + """ + Check if spot position's effective leverage is within limits. + + Args: + user: DriftUser instance + market_index: Spot market index + + Returns: + bool: True if spot leverage check passes + """ + market_config = mainnet_spot_market_configs[market_index] + maint_asset_weight = market_config.maintenance_asset_weight / MARGIN_PRECISION + + # Calculate effective leverage for spot positions + asset_value = user.get_spot_market_asset_value(market_index, MarginCategory.MAINTENANCE) + liability_value = user.get_spot_market_liability_value(market_index, MarginCategory.MAINTENANCE) + + if asset_value == 0: + return True # No position means no leverage + + effective_leverage = liability_value / asset_value + return effective_leverage < (0.5 * maint_asset_weight) + +def check_perp_leverage(user: DriftUser, market_index: int) -> bool: + """ + Check if perp position's effective leverage is within limits. + + Args: + user: DriftUser instance + market_index: Perp market index + + Returns: + bool: True if perp leverage check passes + """ + # Calculate effective leverage for perp positions + perp_value = abs(user.get_perp_market_value(market_index)) + total_collateral = user.get_total_collateral(MarginCategory.MAINTENANCE) + + if total_collateral == 0: + return False # No collateral means infinite leverage + + effective_leverage = perp_value / total_collateral + return 1 <= effective_leverage <= 2 + +def check_insurance_coverage(user: DriftUser, market_index: int, insurance_fund_balance: int) -> bool: + """ + Check if high leverage positions are covered by insurance fund. + + Args: + user: DriftUser instance + market_index: Perp market index + insurance_fund_balance: Current insurance fund balance for the market + + Returns: + bool: True if insurance coverage is sufficient + """ + leverage = user.get_leverage() / MARGIN_PRECISION + if leverage <= 2: + return True + + position_notional = abs(user.get_perp_market_value(market_index)) + return position_notional <= insurance_fund_balance + +def calculate_target_scale_iaw( + user: DriftUser, + market_index: int, + price_impact: float, + insurance_fund_balance: int +) -> Tuple[float, Dict[str, bool]]: + """ + Calculate Target Scale IAW based on all safety criteria. + + Args: + user: DriftUser instance + market_index: Market index + price_impact: Price impact percentage from Jupiter + insurance_fund_balance: Current insurance fund balance + + Returns: + Tuple[float, Dict[str, bool]]: Target Scale IAW value and dict of criteria results + """ + criteria_results = { + "liquidity_check": check_liquidity(user, price_impact, market_index), + "spot_leverage": check_spot_leverage(user, market_index), + "perp_leverage": check_perp_leverage(user, market_index), + "insurance_coverage": check_insurance_coverage(user, market_index, insurance_fund_balance) + } + + # If all criteria pass, set to 1.2x total deposits notional + if all(criteria_results.values()): + total_deposits = user.get_spot_market_asset_value(None, MarginCategory.INITIAL) / QUOTE_PRECISION + return (1.2 * total_deposits, criteria_results) + + return (0.0, criteria_results) \ No newline at end of file diff --git a/backend/utils/user_metrics.py b/backend/utils/user_metrics.py index bf38212..5d1fc49 100644 --- a/backend/utils/user_metrics.py +++ b/backend/utils/user_metrics.py @@ -1,17 +1,20 @@ import copy import functools -from typing import List, Optional +import logging +from typing import List, Optional, Dict, Any from driftpy.accounts.cache import DriftClientCache from driftpy.constants.numeric_constants import MARGIN_PRECISION, QUOTE_PRECISION from driftpy.constants.perp_markets import mainnet_perp_market_configs from driftpy.constants.spot_markets import mainnet_spot_market_configs +from driftpy.constants import BASE_PRECISION from driftpy.drift_client import DriftClient from driftpy.drift_user import DriftUser from driftpy.math.margin import MarginCategory from driftpy.types import OraclePriceData from driftpy.user_map.user_map import UserMap +logger = logging.getLogger(__name__) def get_init_health(user: DriftUser): """ @@ -217,23 +220,91 @@ def calculate_leverages_for_price_shock( ] -def get_user_leverages_for_asset_liability(user_map: UserMap): - user_keys = list(user_map.user_map.keys()) - user_values = list(user_map.values()) - - leverages_none = calculate_leverages_for_asset_liability(user_values, None) - leverages_initial = calculate_leverages_for_asset_liability( - user_values, MarginCategory.INITIAL - ) - leverages_maintenance = calculate_leverages_for_asset_liability( - user_values, MarginCategory.MAINTENANCE - ) - return { - "leverages_none": leverages_none, - "leverages_initial": leverages_initial, - "leverages_maintenance": leverages_maintenance, - "user_keys": user_keys, - } +def get_user_leverages_for_asset_liability(users: UserMap) -> Dict[str, Any]: + """ + Calculate user leverages for asset liability matrix. + """ + try: + # Get user keys safely + user_keys = [] + if hasattr(users, 'users'): + for user_key in users.users: + user_keys.append(user_key) + elif hasattr(users, 'user_map'): + for user_key in users.user_map: + user_keys.append(user_key) + + # Initialize result arrays + leverages_none = [] + leverages_initial = [] + leverages_maintenance = [] + + # Process each user + for user_key in user_keys: + try: + user = users.users[user_key] if hasattr(users, 'users') else users.user_map[user_key] + + # Get user metrics + spot_asset = user.get_total_spot_value(MarginCategory.MAINTENANCE) + spot_liability = user.get_total_spot_liability_value(MarginCategory.MAINTENANCE) + perp_liability = user.get_total_perp_liability_value(MarginCategory.MAINTENANCE) + + # Calculate health + health = user.get_health(MarginCategory.MAINTENANCE) + + # Get net values + net_v = {} + net_p = {} + + # Get spot market values + for market_index in range(100): # Assuming max 100 markets + try: + value = user.get_spot_market_asset_value(market_index, MarginCategory.MAINTENANCE) + if value > 0: + net_v[market_index] = value + except: + continue + + # Get perp market values + for market_index in range(100): # Assuming max 100 markets + try: + perp_position = user.get_perp_position(market_index) + if perp_position: + net_p[market_index] = perp_position.base_asset_amount / BASE_PRECISION + except: + continue + + # Create user data + user_data = { + "user_key": user_key, + "spot_asset": spot_asset, + "spot_liability": spot_liability, + "perp_liability": perp_liability, + "health": health, + "net_v": net_v, + "net_p": net_p, + } + + # Add to appropriate arrays + leverages_none.append(user_data) + if health <= 10: + leverages_initial.append(user_data) + leverages_maintenance.append(user_data) + + except Exception as e: + logger.error(f"Error processing user {user_key}: {str(e)}") + continue + + return { + "user_keys": user_keys, + "leverages_none": leverages_none, + "leverages_initial": leverages_initial, + "leverages_maintenance": leverages_maintenance, + } + + except Exception as e: + logger.error(f"Error in get_user_leverages_for_asset_liability: {str(e)}") + return None def get_user_leverages_for_price_shock( diff --git a/constraints.txt b/constraints.txt new file mode 100644 index 0000000..be9bda8 --- /dev/null +++ b/constraints.txt @@ -0,0 +1,4 @@ +httpcore==0.16.3 +httpx==0.23.1 +packaging==23.1 +driftpy==0.7.99 \ No newline at end of file diff --git a/requirements-minimal.txt b/requirements-minimal.txt new file mode 100644 index 0000000..131805f --- /dev/null +++ b/requirements-minimal.txt @@ -0,0 +1,6 @@ +driftpy==0.7.99 +fastapi==0.115.0 +httpx==0.23.1 +httpcore==0.16.3 +packaging==23.1 +uvicorn==0.31.0 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 7ed8c26..c7533f3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +-c constraints.txt aiodns==3.0.0 aiohttp==3.8.3 aiosignal==1.3.1 @@ -47,8 +48,8 @@ GitPython==3.1.43 grpcio==1.64.1 gunicorn==23.0.0 h11==0.14.0 -httpcore==0.16.3 -httpx==0.23.1 +httpcore>=1.0.0 +httpx>=0.25.2 humanize==4.10.0 idna==3.4 iniconfig==1.1.1 @@ -115,6 +116,8 @@ pyrsistent==0.19.2 pythclient==0.1.4 python-dateutil==2.9.0.post0 python-dotenv==1.0.0 +python-dateutil==2.9.0.post0 +python-dotenv==1.0.0 pytz==2024.1 PyYAML==6.0.1 pyyaml_env_tag==0.1 diff --git a/src/.DS_Store b/src/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..a8d6adc6374480fbc7468b43ff0d40bc2d4c0d4b GIT binary patch literal 6148 zcmeHKy-veG4ED7hiptOlG2Sb5W=X5Cv2|eVzgnbJ$-=;ZlpSV-*cf;M-l0#!%FgGr z+ayX`2?-&DY{~Z}XZw8jof6*>ksI93dqiC#YC{-dWd@ZNQ>TO-Qo4trPZ-zZu3hO=~?$+XX&4tuWCIG!fy6dvu#`^VN8{FMk-u)O<_n8&ftv{}X;I1OmY>MD zvz2p>uiqNakAZ`dy~w)HUdg-tvUs&7#(*(k4E#I;sM##-j-a*1fH7bU>=@wxgNHIk zin(C;b)X3c6<`b9LGa9X36AlKkzy_g3&cq%P(qzvF`R^>->YAwmz`tU^wc=qsz>>7K))vRL) Date: Thu, 26 Dec 2024 17:56:22 -0800 Subject: [PATCH 09/10] added documentation --- .DS_Store | Bin 10244 -> 10244 bytes .gitignore | 2 +- ...0241223-160016 Maintenance Asset Weight.md | 73 +++++++ ...223-160343 Aggregate Effective Leverage.md | 84 ++++++++ .../20241223-161359 Effective Leverage.md | 132 +++++++++++++ .../20241223-162925 Position Notional.md | 144 ++++++++++++++ documentation/20241223-164141 Collateral.md | 178 +++++++++++++++++ ...0241223-170104 Excess Leverage Coverage.md | 180 ++++++++++++++++++ .../20241223-172140 Initial Asset Weight.md | 171 +++++++++++++++++ .../20241223-173355 Target Scale IAW.md | 147 ++++++++++++++ ...20241223-174915 Total Deposits Notional.md | 137 +++++++++++++ ...Dashboard Trial Assignment (refactored).md | 112 +++++++++++ .../Drift_v2_Risk_Dashboard_Analysis.md | 168 ++++++++++++++++ documentation/debug_findings.md | 127 ++++++++++++ documentation/debug_progress.md | 61 ++++++ documentation/vat_error_analysis.md | 125 ++++++++++++ documentation/vat_market_map_issue.md | 79 ++++++++ 17 files changed, 1919 insertions(+), 1 deletion(-) create mode 100644 documentation/20241223-160016 Maintenance Asset Weight.md create mode 100644 documentation/20241223-160343 Aggregate Effective Leverage.md create mode 100644 documentation/20241223-161359 Effective Leverage.md create mode 100644 documentation/20241223-162925 Position Notional.md create mode 100644 documentation/20241223-164141 Collateral.md create mode 100644 documentation/20241223-170104 Excess Leverage Coverage.md create mode 100644 documentation/20241223-172140 Initial Asset Weight.md create mode 100644 documentation/20241223-173355 Target Scale IAW.md create mode 100644 documentation/20241223-174915 Total Deposits Notional.md create mode 100644 documentation/Drift Risk Dashboard Trial Assignment (refactored).md create mode 100644 documentation/Drift_v2_Risk_Dashboard_Analysis.md create mode 100644 documentation/debug_findings.md create mode 100644 documentation/debug_progress.md create mode 100644 documentation/vat_error_analysis.md create mode 100644 documentation/vat_market_map_issue.md diff --git a/.DS_Store b/.DS_Store index 276c807d9c5a31441a03860b23f712bed9a63183..6161bdfb6a031313ab4fc894d5d9863aab338313 100644 GIT binary patch delta 100 zcmZn(XbISGR)lfq)>BG+bi1q)UpRmU*_04P2lBme*a delta 128 zcmZn(XbISGR)lfKJrt}Mn)z&3WkOzwK@vbmPQ6T3T8$|wY8iaqRRT#LGjr+ hxq10rlhZ}z8M`O{myqAQOf->eGn;}XD_MHunE=_xCYS&K diff --git a/.gitignore b/.gitignore index 8829ad9..9d8bba8 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,4 @@ ignore ucache matrix_response.json logs/* -documentation/* \ No newline at end of file +.DS_Store diff --git a/documentation/20241223-160016 Maintenance Asset Weight.md b/documentation/20241223-160016 Maintenance Asset Weight.md new file mode 100644 index 0000000..e1ebc35 --- /dev/null +++ b/documentation/20241223-160016 Maintenance Asset Weight.md @@ -0,0 +1,73 @@ +--- +aliases: + - Maintenance Asset Weight +--- +___ +Body/Content + +The **“maint asset weight”** (short for “maintenance asset weight”) is a parameter used in risk management within trading protocols like Drift to determine the margin or capital required to maintain a position. It is typically defined as a fraction (or percentage) of the position’s notional value and represents the minimum capital a user must have in their account to avoid liquidation. + + +**Context for Drift Protocol** + +• **Maint asset weight** helps the protocol evaluate the risk associated with a position and ensures that the platform remains solvent during volatile market conditions. + +• The weight is often specified in the protocol’s documentation for each market or asset class. For example: + +• A maint asset weight of 0.8 means 80% of the position’s notional value is required as collateral to avoid liquidation. + +• A maint asset weight of 1 means the full value of the position is required as collateral. + + + +**In This Assignment** + + + +The **“maint asset weight”** is part of the condition for **On-Chain Liquidity Check**: + + + + + +\text{price impact } < (1 - \text{maint asset weight}) + + + +• **Interpretation**: + +• The formula evaluates whether the price impact from a simulated swap is acceptable, compared to a threshold derived from the maint asset weight. + +• A smaller maint asset weight indicates that the protocol tolerates less price impact, as more margin is required. + + + +**Example** + +• If the maint asset weight is **0.9**, then: + + + +\text{price impact must be less than } (1 - 0.9) = 0.1 \, \text{(10%)}. + + + +• This means the swap should result in no more than a 10% price impact for the criterion to pass. + + + +**Where to Find Maint Asset Weight** + +• The value for the maint asset weight is typically specified in the protocol’s market configuration or documentation. In the context of Drift Protocol, refer to the [Market Specs](https://docs.drift.trade/trading/market-specs) for details. + + + +Let me know if you need further clarification! + +___ +Footer/References + +___ +Tags + +___ \ No newline at end of file diff --git a/documentation/20241223-160343 Aggregate Effective Leverage.md b/documentation/20241223-160343 Aggregate Effective Leverage.md new file mode 100644 index 0000000..f41988a --- /dev/null +++ b/documentation/20241223-160343 Aggregate Effective Leverage.md @@ -0,0 +1,84 @@ +--- +aliases: + - Aggregate Effective Leverage +--- +___ +Body/Content + +**Aggregate Effective Leverage** is a metric used in trading systems like Drift Protocol to measure the risk exposure of an account relative to its collateral. It accounts for the combined effect of all positions (spot and derivative) in an account, scaled by their notional value, and evaluates the leverage at an aggregate level. + +**Key Components of Aggregate Effective Leverage** + +1. **[[20241223-161359 Effective Leverage|Effective Leverage]]:** + • Defined as: + [[20241223-161359 Effective Leverage|Effective Leverage]] = ([[20241223-162925 Position Notional|Position Notional]]/[[20241223-164141 Collateral|Collateral]]) + It represents the ratio of the notional value of a position (or group of positions) to the collateral backing it. + +2. **Aggregate Notional Value:** + • The combined notional value of all positions in the account. Notional value is the dollar value of the assets represented by the positions. + +3. **Aggregate Collateral:** + • The total amount of collateral available in the account to support all positions. + +--- + +**Formula for Aggregate Effective Leverage** + +Aggregate effective leverage sums up the effective leverage of individual positions, weighted by their respective notional values: + +Aggregate Effective Leverage = (Sum of All Position Notional Values/Total Collateral) + +Where: + +• **Position Notional Values**: The dollar equivalent of each position (spot and derivatives). + +• **Total Collateral**: The total funds or assets in the account used to back all positions. + + +--- + +**In This Assignment** + +The **Effective Leverage (Spot Positions)** criterion specifies that the **Aggregate Effective Leverage** for spot positions must satisfy: + +Aggregate Effective Leverage < (0.5 x [[20241223-160016 Maintenance Asset Weight|Maintenance Asset Weight]]) + +This rule ensures that the user’s overall leverage remains below a risk threshold relative to the asset’s maintenance weight. It prevents over-leveraging in spot markets. + +--- +**Example Calculation** +1. **User Account Details:** + • Total collateral: **$10,000** + • Spot Position 1: Notional value **$5,000** + • Spot Position 2: Notional value **$3,000** + +2. **Aggregate Effective Leverage:** + Aggregate Effective Leverage = (Total Notional Value of Spot Positions/Total Collateral) + Aggregate Effective Leverage = ((5000 + 3000)/(10000)) = 0.8 +3. **Pass Criteria:** + • If the **maint asset weight** is **0.9**, the pass condition becomes: + Aggregate Effective Leverage < (0.5 x 0.9) = 0.45 + • In this case, **0.8 > 0.45**, so the criterion **fails**. + +--- + +**Engineering Considerations** + +1. **Data Sources:** + • Ensure accurate retrieval of **notional values** for all spot positions and total collateral from the user’s account. +2. **Edge Cases:** + • **No positions**: If the user has no active spot positions, the effective leverage should default to **0** (passing the criterion). + • **Insufficient collateral**: Handle cases where collateral is missing or zero to avoid division by zero errors. +3. **Testing:** + • Validate with accounts having: + • No positions. + • High leverage with one or multiple positions. + • Boundary cases where leverage is exactly at the threshold. + +___ +Footer/References + +___ +Tags + +___ \ No newline at end of file diff --git a/documentation/20241223-161359 Effective Leverage.md b/documentation/20241223-161359 Effective Leverage.md new file mode 100644 index 0000000..04d76d0 --- /dev/null +++ b/documentation/20241223-161359 Effective Leverage.md @@ -0,0 +1,132 @@ +--- +aliases: + - Effective Leverage +--- +___ +Body/Content + +**Effective Leverage** is a financial metric used to quantify the amount of risk a trader is taking relative to their available collateral. It provides insight into how much borrowed funds (or risk exposure) a user is utilizing compared to their own funds, considering the notional value of their positions. In trading platforms like Drift Protocol, **effective leverage** is critical for assessing risk and determining a trader’s margin requirements. + +**Definition of Effective Leverage** +Effective leverage is calculated as the ratio of the notional value of a position to the collateral backing it: + +Effective Leverage = Position Notional/Collateral + +Where: + • **Position Notional**: The total value of a position in the market. For example, in a spot market, it’s the value of the asset held. In a derivatives market, it’s the value of the contract’s exposure to the underlying asset. + • **Collateral**: The funds or assets provided to support the position. It acts as a buffer to cover losses. + +--- + +**Understanding Effective Leverage** + +1. **Position Notional**: + • This represents the **market exposure** of a position. For example, if you own 1 BTC at $30,000, the notional value of your position is $30,000. + • For derivative contracts, the notional value might include leverage built into the contract. For instance, if you enter a perpetual futures contract with 5x leverage, your notional value is 5 times your initial margin. +2. **Collateral**: + • The collateral is the trader’s own funds provided to secure their position. This can include cash, stablecoins, or other eligible assets deposited in their account. + • Platforms typically enforce a minimum margin requirement, which is the minimum collateral required to maintain a position. +3. **Leverage Ratio**: + • Leverage magnifies both gains and losses. For example: + • **1x leverage**: No borrowed funds, the trader is fully funding their position with their own collateral. + • **2x leverage**: Half the position is funded with borrowed funds, doubling potential gains or losses. + • **10x leverage**: Only 10% of the position is funded by the trader, creating significant magnification of both profits and risks. + +--- + +## **Example Calculation** + +**Spot Position Example** + • **Position**: A user holds $20,000 worth of ETH. + • **Collateral**: The user has $10,000 in collateral. + +Effective Leverage = Position Notional/Collateral = 20,000/10,000 = 2x +![[Pasted image 20241223161731.png]] + ``` +\text{Effective Leverage} = \frac{\text{Position Notional}}{\text{Collateral}} = \frac{20,000}{10,000} = 2x +``` + +The user has an effective leverage of **2x**, meaning they are exposed to twice the amount of their available funds. + +--- + +**Perpetual Futures Example** + • **Position**: A user opens a $50,000 notional position in BTC futures using 5x leverage. + • **Initial Collateral**: The user deposits $10,000 to fund the position. + +Effective Leverage = Position Notional/Collateral = (50,000/10,000) = 5x +![[Pasted image 20241223162016.png]] + +``` +\text{Effective Leverage} = \frac{\text{Position Notional}}{\text{Collateral}} = \frac{50,000}{10,000} = 5x +``` + +Here, the trader is using borrowed funds to increase their exposure by 5 times. + +--- + +**Key Use Cases** +1. **Risk Management**: + • High effective leverage increases the risk of liquidation because even small market movements can deplete the collateral. + • Trading platforms often impose leverage limits to reduce systemic risk. +2. **Margin Requirements**: + • Effective leverage influences margin requirements. Platforms may require higher collateral for positions with higher leverage. +3. **Position Monitoring**: + • Effective leverage helps traders assess the sustainability of their positions under volatile market conditions. + +--- +**Comparison to Other Metrics** + • **Effective Leverage vs. Notional Leverage**: + • **Notional Leverage**: Only considers the notional value of a position and the initial collateral deposited. + • **Effective Leverage**: Accounts for all positions (spot and derivatives) and the total collateral available, making it a more comprehensive metric. + • **Effective Leverage vs. Risk Ratio**: + • **Risk Ratio**: Often measures the probability of liquidation or account solvency. + • **Effective Leverage**: Focuses on the relative risk exposure compared to collateral. + +--- +**Engineering Considerations for Drift Protocol** + +1. **Dynamic Calculation**: + • Effective leverage should be recalculated in real-time as positions and collateral change due to price movements or user actions. +2. **Integration with Margin Calls**: + • Use effective leverage thresholds to trigger margin calls or liquidation events. +3. **Edge Cases**: + • **No Collateral**: If the collateral is zero, handle gracefully to avoid division by zero errors. Assign a default or “undefined” leverage value. + • **Highly Leveraged Users**: Identify accounts exceeding platform-defined leverage limits and take preventive action. +4. **Testing**: + • Validate calculations under scenarios with varying: + • Positions (e.g., large spot holdings, small derivatives positions). + • Collateral changes (e.g., withdrawals or price fluctuations). + +--- + +**Example: Calculating Effective Leverage in Code** + +*Before* +```python +# Simplified effective leverage calculation +def calculate_effective_leverage(notional, collateral): + return notional / collateral if collateral > 0 else None +``` + +*After* +```python +# Enhanced version with error handling and default value +def calculate_effective_leverage(notional, collateral): + try: + if collateral > 0: + return notional / collateral + else: + return float('inf') # Handle zero collateral by assigning infinite leverage + except Exception as e: + log_error(f"Failed to calculate leverage: {e}") + return None # Default to None for failed calculations +``` + +___ +Footer/References + +___ +Tags + +___ \ No newline at end of file diff --git a/documentation/20241223-162925 Position Notional.md b/documentation/20241223-162925 Position Notional.md new file mode 100644 index 0000000..d879c43 --- /dev/null +++ b/documentation/20241223-162925 Position Notional.md @@ -0,0 +1,144 @@ +--- +aliases: + - Position Notional +--- +___ +Body/Content + +**Position Notional** refers to the total market value or “notional value” of a position in a financial instrument, such as a spot asset or a derivative. It represents the size of the position in terms of its underlying asset’s price and quantity, regardless of the actual funds (collateral) used to establish the position. + +--- + +**Key Components of Position Notional** + +1. **Price**: + • The current market price of the asset or derivative. + • For spot assets, it’s the asset’s trading price. + • For derivatives, it’s the reference price used for the contract. +2. **Quantity**: + • The amount of the asset or contract held. + • For spot assets, it’s the number of units owned (e.g., 2 BTC). + • For derivatives, it’s the number of contracts or the equivalent exposure to the underlying asset. + +--- +## **Formula** +The general formula for **Position Notional** is: + +Position Notional = Price x Quantity +![[Pasted image 20241223163100.png]] +``` +\text{Position Notional} = \text{Price} \times \text{Quantity} +``` + +• **Price**: Current market price of the asset or contract. +• **Quantity**: Number of units or contracts held. + +--- + +## **Examples of Position Notional** + +### **Spot Asset Example** +• **Position**: A trader owns **3 ETH**, and the current price of ETH is **$2,000**. + Position Notional = 3 x 2,000 = 6,000 + +The **Position Notional** is **$6,000**. + +### **Futures Contract Example** +• **Position**: A trader holds **10 BTC futures contracts**, each representing **1 BTC**, with the price of BTC at **$25,000**. + Position Notional = 10 x 25,000 = 250,000 + +The **Position Notional** is **$250,000**. + +### **Leveraged Example** + +• **Position**: A trader uses 10x leverage to take a position worth **$50,000** in BTC futures, with BTC priced at **$25,000**. + • Collateral used: **$5,000**. + • Position notional: **$50,000** (reflects the market value, not the collateral used). + +--- + +### **Significance of Position Notional** + +1. **Risk Assessment**: + • Position Notional provides an absolute measure of exposure to market movements. For example, if a position has a notional value of $100,000, a 1% price move corresponds to a $1,000 gain or loss. +2. **Leverage Calculation**: + • Effective leverage is calculated by dividing the Position Notional by the trader’s collateral. + • A high notional value relative to collateral indicates higher leverage. +3. **Margin Requirements**: + • Platforms use Position Notional to determine the minimum collateral (margin) required to open or maintain a position. + • Example: A platform may require 10% of the position notional as initial margin. +4. **Portfolio Monitoring**: + • Position Notional helps traders and platforms monitor the total exposure across different assets and instruments. + +--- + +### **Relation to Different Asset Types** + +**Spot Markets** + • Position Notional is straightforward, calculated as the number of units held multiplied by the current price. + +**Derivatives Markets** + • For derivatives, Position Notional represents the equivalent exposure to the underlying asset, not the cost of the derivative. + • Example: In futures, a trader may only put up a fraction of the notional value (margin), but the exposure to price movements is based on the full notional value. + +**Options Markets** + • Position Notional is the value of the underlying asset that the option controls, not the premium paid. + • Example: A call option for 1 ETH at $2,000 has a notional value of $2,000, regardless of the premium paid. + +--- + +**Key Considerations for Drift Protocol** + +1. **Spot vs. Perp Positions**: + • For **spot positions**, the Position Notional is directly derived from the asset price and quantity. + • For **perpetual futures**, the Position Notional reflects the leveraged exposure to the underlying asset. +2. **Dynamic Pricing**: + • As prices fluctuate, Position Notional updates in real-time to reflect the current market value of the position. +3. **Calculation for Aggregated Notional**: + • In scenarios where a user holds multiple positions, the total Position Notional is the sum of the notional values for all positions. + +--- +### **Example in Code** + +**Spot Asset Example** + +```python +def calculate_position_notional(price: float, quantity: float) -> float: + return price * quantity + +# Example: 3 ETH at $2,000 each +price = 2000 +quantity = 3 +notional = calculate_position_notional(price, quantity) +print(f"Position Notional: ${notional}") # Output: $6000 +``` + +**Perpetual Futures Example** +```python +def calculate_perp_position_notional(price: float, contracts: int, contract_size: float = 1) -> float: + return price * contracts * contract_size + +# Example: 10 BTC contracts at $25,000 each, contract size = 1 BTC +price = 25000 +contracts = 10 +notional = calculate_perp_position_notional(price, contracts) +print(f"Position Notional: ${notional}") # Output: $250,000 +``` + +--- +**Potential Questions and Edge Cases** + +1. **What happens with zero quantity?** + • Position Notional would be zero. This is valid but might require handling to avoid division by zero in leverage calculations. +2. **What if the price changes rapidly?** + • Real-time updates are necessary to ensure the notional value reflects the most recent market data. +3. **Multiple positions in the same asset?** + • Sum the notional values for all positions to compute the aggregate exposure. + +___ +Footer/References + +___ +Tags + +___ \ No newline at end of file diff --git a/documentation/20241223-164141 Collateral.md b/documentation/20241223-164141 Collateral.md new file mode 100644 index 0000000..0d2b10a --- /dev/null +++ b/documentation/20241223-164141 Collateral.md @@ -0,0 +1,178 @@ +--- +aliases: + - Collateral +--- +___ +# Body/Content + +Collateral refers to the assets or funds a trader deposits with a trading platform to secure their positions and cover potential losses. It acts as a safeguard for the platform, ensuring that even in adverse market conditions, there are sufficient funds to settle losses without affecting the overall system integrity. + +In financial and trading contexts, collateral is the foundation of margin-based trading and plays a critical role in enabling leverage, determining risk, and ensuring solvency. + +--- +## **Key Characteristics of Collateral** + +1. **Security Deposit**: + • Collateral is essentially a “good faith” deposit that guarantees a trader’s ability to meet financial obligations, such as losses incurred during trading. +2. **Liquid and Accessible**: + • Collateral is usually held in highly liquid assets (e.g., stablecoins, fiat, or highly traded cryptocurrencies) to ensure it can be quickly converted to cover losses. +3. **Dynamic in Nature**: + • The required collateral changes dynamically based on market conditions, position size, and platform risk parameters. +4. **Collateralization**: + • The degree to which a position is collateralized (e.g., partially or fully) determines the risk of liquidation. Undercollateralized positions are riskier for both the trader and the platform. + +--- +## **Types of Collateral in Trading** + +1. **Cash Collateral**: + • Includes fiat currencies or stablecoins (e.g., USDT, USDC) deposited into the trading account. +2. **Crypto Collateral**: + • Includes cryptocurrencies like BTC, ETH, or other eligible tokens that can be used to back positions. +3. **Asset Collateral**: + • Includes other assets like stocks, bonds, or tokenized real-world assets, depending on the platform. +4. **Cross-Margin Collateral**: + • A combined pool of collateral that supports multiple positions across different markets. Losses in one market can be offset by gains in another. +5. **Isolated Collateral**: + • Collateral allocated to a specific position, independent of other positions. Losses are limited to the isolated collateral. + +--- +## **Role of Collateral in Trading** + +1. **Margin Trading**: + • Collateral is the basis for determining how much leverage a trader can use. Platforms calculate the collateral-to-position ratio to enforce leverage limits. +1. **Risk Management**: + • Collateral ensures that the platform can liquidate positions or cover losses without incurring systemic risk. +1. **Margin Requirements**: + • **Initial Margin**: The minimum collateral required to open a position. + • **Maintenance Margin**: The minimum collateral required to keep the position open. If the collateral falls below this level, the position is at risk of liquidation. +1. **Liquidation**: + • If the collateral value drops below the required threshold (due to losses or market fluctuations), the platform may liquidate part or all of the position to recover losses. + +--- +## **Collateral in Drift Protocol** +Drift Protocol, like other DeFi platforms, uses collateral as the backbone of its margin trading and risk management systems. + +Users deposit assets as collateral to: + +1. **Open leveraged positions**. +2. **Secure spot and perpetual trades**. +3. **Avoid liquidation during volatile market conditions**. + +Drift also incorporates concepts like **maintenance asset weight** and **effective leverage** to dynamically assess collateral adequacy for each user. + +--- +### **How Collateral is Calculated** + +1. **Spot Markets**: + • Collateral is directly equal to the user’s deposited funds (e.g., USDC or other supported stablecoins). +2. **Derivatives Markets**: + • Collateral is the user’s deposited funds minus any unrealized losses (or plus unrealized gains). +3. **Cross-Collateralization**: + • Platforms like Drift allow multiple assets as collateral. The total collateral is the sum of eligible assets, weighted by their risk parameters (e.g., volatility, liquidity). + +--- +### **Example Calculations** + + **Spot Collateral Example** + • A trader deposits **$5,000 USDC** as collateral. + • The trader uses this collateral to purchase ETH worth **$20,000** with 4x leverage. + +Here: + • **Collateral**: $5,000 USDC. + • **Leverage**: Position Notional / Collateral = 20,000 / 5,000 = 4x. + +--- +### **Perpetual Futures Collateral Example** + +• A trader deposits **2 BTC** as collateral, with BTC priced at **$25,000**. +• The trader opens a long position worth **$250,000** using 5x leverage. + +Here: + • **Collateral Value**:  2 x 25,000 = 50,000 USD. + • **Position Notional**: $250,000. + • **Effective Leverage**:  250,000 / 50,000 = 5x . + +--- + ## **Importance of Collateral Ratios** + +1. **Collateral-to-Notional Ratio**: + - Measures how much collateral backs the position. A higher ratio indicates lower risk. + +`Collateral-to-Notional Ratio = Collateral/Position Notional` + +2. **Liquidation Threshold**: + - If the collateral falls below a specified percentage of the notional value, the position is liquidated. + +--- +## **Engineering Considerations for Drift Protocol** + +1. **Real-Time Updates**: + - Collateral balances must update in real-time to account for: + - Price fluctuations. + - New deposits or withdrawals. + - Unrealized profit and loss (PnL). +2. **Risk Assessment**: + - Collateral adequacy should be assessed dynamically to ensure sufficient buffer against liquidation risks. +3. **Cross-Collateralization**: + - Allow users to use multiple assets as collateral while applying risk-weighted multipliers based on asset volatility and liquidity. +4. **Edge Cases**: + - **Zero Collateral**: Handle accounts with no collateral to prevent unintended behaviors (e.g., division by zero). + - **Extreme Volatility**: Implement safeguards to prevent rapid liquidation due to flash crashes or oracle errors. + +--- +## **Example Code for Collateral Management** + + **Collateral Calculation for Spot Markets** + +```python +def calculate_spot_collateral(deposits: float, withdrawals: float) -> float: + """ + Calculate the remaining collateral after deposits and withdrawals. + """ + return max(0, deposits - withdrawals) + +# Example usage +deposits = 5000 # USD +withdrawals = 1000 # USD +collateral = calculate_spot_collateral(deposits, withdrawals) +print(f"Collateral: ${collateral}") # Output: $4000 +``` + +**Collateral for Cross-Margin Accounts** + +```python +def calculate_cross_margin_collateral(asset_balances: dict, risk_weights: dict) -> float: + """ + Calculate total collateral for cross-margin accounts with multiple assets. + asset_balances: {asset: balance_in_usd} + risk_weights: {asset: risk_multiplier} + """ + total_collateral = 0 + for asset, balance in asset_balances.items(): + weight = risk_weights.get(asset, 1) # Default risk weight is 1 + total_collateral += balance * weight + return total_collateral + +# Example usage +asset_balances = {"BTC": 10000, "ETH": 5000, "USDC": 2000} +risk_weights = {"BTC": 0.9, "ETH": 0.8, "USDC": 1.0} +collateral = calculate_cross_margin_collateral(asset_balances, risk_weights) +print(f"Total Collateral: ${collateral}") # Output: $20,500 +``` + +--- +**Key Takeaways** + +1. **Collateral is the trader’s primary defense against liquidation** and determines their ability to take on leveraged positions. + +2. Platforms dynamically calculate collateral to adapt to changing market conditions and user activity. + +3. Managing collateral effectively is critical to maintaining solvency and optimizing leverage. + +___ +Footer/References + +___ +Tags + +___ \ No newline at end of file diff --git a/documentation/20241223-170104 Excess Leverage Coverage.md b/documentation/20241223-170104 Excess Leverage Coverage.md new file mode 100644 index 0000000..2a89bf2 --- /dev/null +++ b/documentation/20241223-170104 Excess Leverage Coverage.md @@ -0,0 +1,180 @@ +--- +aliases: + - Excess Leverage Coverage (Perp Market Insurance) + - Perp Market Insurance + - Excess Leverage Coverage +--- +___ +# Body/Content + +**Excess Leverage Coverage** evaluates whether a perpetual futures market has sufficient insurance fund reserves to cover the risk posed by highly leveraged traders. Specifically, it ensures that users whose leverage exceeds a critical threshold (e.g., **2x leverage**) are adequately backed by the insurance fund to prevent cascading liquidations or losses for the protocol. + +--- +## **Key Concepts** + +1. **Perpetual Futures (Perps)**: + - A derivative product allowing traders to take leveraged positions on an asset without expiry. + - Traders can amplify gains and losses using leverage. +2. **Leverage**: + - Defined as: + - Leverage(x) = [[20241223-162925 Position Notional|Position Notional]]/[[20241223-164141 Collateral|Collateral]] + - Higher leverage means greater exposure relative to collateral, increasing both potential returns and risks. +3. **Insurance Fund**: + - A reserve maintained by the protocol to absorb losses from liquidations that fail to cover a trader’s debt. + - Acts as a safety net to protect the system from becoming under-collateralized. +4. **Excess Leverage**: + - Refers to users whose leverage exceeds a pre-defined threshold (e.g., **2x leverage**). These users pose a higher risk of liquidation, especially during volatile market conditions. +5. **Excess Notional**: + - The portion of a trader’s position that exceeds the leverage threshold, calculated as: + - Excess Notional = Position Notional - (Collateral x Leverage Threshold) + +--- +## **Purpose of Excess Leverage Coverage** + +Excess Leverage Coverage ensures that: + +1. The **insurance fund** has enough reserves to cover the potential shortfall from liquidating high-risk positions. +2. The protocol remains solvent, even during sharp market movements or unexpected events. + +Without adequate coverage, a wave of liquidations could deplete the insurance fund and result in losses being passed on to other traders or the protocol itself. + +___ +## **How Excess Leverage Coverage Works** +1. **Identify High-Risk Users**: + - Filter users whose leverage exceeds the defined threshold (e.g., 2x leverage). + - These users are flagged as having “excess leverage.” +2. **Calculate Excess Notional**: + - For each flagged user, determine the excess notional portion of their position: + - Excess Notional = Position Notional - (Collateral x Leverage Threshold) +3. **Compare Against Insurance Fund**: + - Sum up the **excess notional** for all flagged users. + - Check if the **insurance fund** has enough reserves to cover this total. +4. **Pass/Fail Criteria**: + - **Pass**: If the insurance fund fully covers the total excess notional. + - **Fail**: If the total excess notional exceeds the insurance fund reserves. + +--- +## **Example Calculation** +**User Details** +- User’s position notional: **$100,000** +- User’s collateral: **$20,000** +- Leverage threshold: **2x** +- Insurance fund reserves: **$50,000** + +**Step 1: Calculate User’s Leverage** + +`Leverage = Position Notional / Collateral = 100,000/20,000 = 5x` + +The user’s leverage exceeds the threshold of 2x, so they are flagged. + +**Step 2: Calculate Excess Notional** + +`Excess Notional = Position Notional - (Collateral x Leverage Threshold)` + +`Excess Notional = 100,000 - (20,000 x 2) = (100,000 - 40,000) = 60,000` + +**Step 3: Check Against Insurance Fund** + + - Total Excess Notional for all users: **$60,000** + - Insurance Fund Reserves: **$50,000** + +Result: The insurance fund cannot fully cover the excess notional, so this criterion **fails**. + +--- +## **Importance in Drift Protocol** + +In Drift Protocol, **Excess Leverage Coverage** ensures: + +1. **Systemic Stability**: + - By maintaining sufficient reserves in the insurance fund, the protocol minimizes the risk of cascading liquidations. +2. **Risk Management**: + - Helps identify high-risk users early and ensure their impact on the system is manageable. +3. **User Protection**: + - Provides confidence to traders that the platform is robust, even during extreme market events. + +--- +## **Engineering Considerations** + +1. **Data Sources**: + +• Retrieve user position notional, collateral, and leverage data in real-time. + +• Access the current balance of the insurance fund. + +2. **Performance**: + +• Calculating excess leverage for all users can be resource-intensive. Optimize by focusing only on high-leverage accounts. + +3. **Edge Cases**: + +• **Zero Collateral**: Handle accounts with zero collateral to avoid division by zero. + +• **Rapid Liquidations**: Account for changes in insurance fund reserves during high volatility. + +4. **Visualization**: + +• Add a column in the dashboard to indicate whether excess notional is covered. + +• Include a tooltip explaining how excess leverage is calculated and the role of the insurance fund. + +--- +## **Example Code** + +### **Filter High-Leverage Users** +```python +def get_excess_notional(users, leverage_threshold): + """ + Calculate excess notional for users exceeding the leverage threshold. + """ + excess_notional = [] + for user in users: + notional = user['position_notional'] + collateral = user['collateral'] + leverage = notional / collateral if collateral > 0 else float('inf') + + if leverage > leverage_threshold: + excess = notional - (collateral * leverage_threshold) + excess_notional.append(max(0, excess)) + return sum(excess_notional) +``` +### **Check Against Insurance Fund** +```python +def check_insurance_coverage(users, leverage_threshold, insurance_fund_reserves): + """ + Determine if the insurance fund covers excess notional. + """ + total_excess_notional = get_excess_notional(users, leverage_threshold) + return total_excess_notional <= insurance_fund_reserves +``` +### **Example Usage** +```python +users = [ + {"position_notional": 100000, "collateral": 20000}, # 5x leverage + {"position_notional": 50000, "collateral": 25000}, # 2x leverage +] + +insurance_fund_reserves = 60000 +leverage_threshold = 2 # 2x + +result = check_insurance_coverage(users, leverage_threshold, insurance_fund_reserves) +print("Insurance Coverage Pass" if result else "Insurance Coverage Fail") +# Output: Insurance Coverage Fail +``` + +--- +## **Key Takeaways** + +1. **Excess Leverage Coverage is critical for platform stability**: + - Ensures that highly leveraged traders don’t put the entire protocol at risk. +2. **Insurance funds act as the last line of defense**: + - A well-funded insurance pool prevents cascading failures during liquidation events. +3. **Proactive monitoring of high-leverage accounts is essential**: + - Filtering for excess leverage allows early intervention and risk mitigation. + +--- +Footer/References + +___ +Tags + +___ \ No newline at end of file diff --git a/documentation/20241223-172140 Initial Asset Weight.md b/documentation/20241223-172140 Initial Asset Weight.md new file mode 100644 index 0000000..05a11fe --- /dev/null +++ b/documentation/20241223-172140 Initial Asset Weight.md @@ -0,0 +1,171 @@ +--- +aliases: + - Initial Asset Weight +--- +___ +# Body/Content + +**Initial Asset Weight (IAW)** is a parameter in trading and risk management systems that determines the **required collateral ratio for opening a position** in a specific asset. It defines the fraction of a position’s value that must be provided as collateral upfront. This weight ensures that traders allocate enough collateral to cover potential losses and that the platform maintains stability. + +--- +## **Key Characteristics of Initial Asset Weight** + +1. **Position Opening Requirement**: + - IAW specifies the minimum collateral required to open a position in a particular asset. + - For example, an IAW of **0.1 (10%)** means that a trader must provide at least 10% of the position’s value as collateral. +2. **Asset-Specific**: + - Different assets have different IAWs based on their **volatility**, **liquidity**, and **risk profile**. Riskier or less liquid assets typically have higher IAWs. +3. **Margin Calculation**: + - IAW is directly tied to the initial margin required to open a position. It ensures that traders maintain sufficient collateral to cover initial risks. +4. **Dynamic or Fixed**: + - IAW values can be **fixed** (defined in the protocol’s market specifications) or **dynamic** (adjusted based on market conditions). + +--- +## **Formula** + +The required **Initial Collateral** to open a position is calculated as: + +Initial Collateral = (Position Notional x IAW) + +Where: +- **Position Notional**: The total value of the position (price × quantity). +- **IAW**: The Initial Asset Weight for the asset being traded. + +--- +## **Role of Initial Asset Weight** + +1. **Risk Management**: + - Ensures that traders have enough “skin in the game” by requiring sufficient collateral to mitigate potential losses. + - Prevents under-collateralized positions, reducing the risk of liquidation and systemic failure. +2. **Margin Trading**: + - Establishes the baseline collateral needed to leverage a position. Higher IAW values reduce the leverage a trader can achieve. +3. **Platform Stability**: + - Protects the platform from insolvency by requiring traders to maintain adequate collateral levels relative to their position sizes. +4. **Trader Safety**: + - Helps prevent over-leveraging, ensuring that traders are less likely to face sudden liquidations due to small market fluctuations. + +--- +## **Example Calculation** + +### **Spot Market Example** + - Asset: **ETH** + - IAW: **0.2 (20%)** + - Position Notional: **$10,000** + +To open this position, the trader must provide: + +`Initial Collateral = 10,000 x 0.2 = 2,000` + +--- +### **Perpetual Futures Example** + - Asset: **BTC** + - IAW: **0.1 (10%)** + - Position Notional: **$50,000** + + To open this position, the trader must provide: + +`Initial Collateral = 50,000 x 0.1 = 5,000` + +--- +## **How IAW Differs From Other Weights** + +1. **Initial Asset Weight (IAW)**: + - Defines the minimum collateral required to open a position. + - Focuses on **initial risk**. +2. **Maintenance Asset Weight (MAW)**: + - Specifies the collateral required to maintain a position without being liquidated. + - Often lower than IAW to provide some buffer against price fluctuations. +3. **Risk Weight**: + - Represents a broader measure of an asset’s risk profile, used in calculating portfolio risk or setting margin multipliers. + +--- +## **Use of Initial Asset Weight in Drift Protocol** + +In Drift Protocol, **IAW** plays a critical role in risk management by: + +1. **Determining Collateral Requirements**: + - Enforces a minimum level of collateral for spot and perpetual positions. +2. **Balancing Leverage and Risk**: + - Limits the leverage a trader can achieve on riskier or more volatile assets. +3. **Configuring Asset Parameters**: + - The protocol assigns IAW values in its [Market Specifications](https://docs.drift.trade/trading/market-specs), ensuring asset-specific risk considerations. + +--- +## **Engineering Considerations** + +1. **Asset-Specific Parameters**: + - Ensure the IAW for each asset is defined in the protocol’s configuration. For example: + - High-volatility assets (e.g., BTC) might have an IAW of **10-20%**. + - Stablecoins (e.g., USDC) might have an IAW of **5%** or lower. +2. **Dynamic Adjustments**: + - Implement mechanisms to adjust IAW dynamically based on: + - Changes in market volatility. + - Liquidity conditions. +3. **Edge Cases**: + - **Zero Collateral**: Prevent users from opening positions with insufficient collateral. + - **Rapid Price Movements**: Adjust IAW or enforce additional safeguards to prevent systemic risk during market crashes. +4. **Visualization**: + - Display IAW prominently in the Risk Dashboard for each asset, ensuring traders understand the collateral requirements. + +--- +## **Example Code for Initial Asset Weight** + +### **Calculate Initial Collateral** +```python +def calculate_initial_collateral(notional: float, iaw: float) -> float: + """ + Calculate the required initial collateral based on position notional and IAW. + """ + return notional * iaw + +# Example usage +position_notional = 10000 # USD +iaw = 0.2 # 20% +initial_collateral = calculate_initial_collateral(position_notional, iaw) +print(f"Initial Collateral Required: ${initial_collateral}") # Output: $2000 +``` + +### **Validate Position Opening** +```python +def validate_position_opening(collateral: float, notional: float, iaw: float) -> bool: + """ + Validate whether the user has enough collateral to open a position. + """ + required_collateral = calculate_initial_collateral(notional, iaw) + return collateral >= required_collateral + +# Example usage +user_collateral = 1500 # USD +position_notional = 10000 # USD +iaw = 0.2 # 20% + +can_open_position = validate_position_opening(user_collateral, position_notional, iaw) +print("Position Opening Valid" if can_open_position else "Insufficient Collateral") +# Output: Insufficient Collateral +``` + +--- +## **Potential Questions and Edge Cases** + +1. **How does IAW affect leverage?** + - The lower the IAW, the higher the leverage a trader can achieve: + - IAW of 0.1 allows up to 10x leverage. + - IAW of 0.2 limits leverage to 5x. +2. **What happens if collateral drops below IAW?** + - If collateral falls below the initial requirement after opening a position, the **Maintenance Asset Weight (MAW)** determines whether the position remains open or gets liquidated. +3. **Can IAW vary for the same asset?** + - Yes, it can vary based on trading conditions (e.g., spot vs. perpetual) or be adjusted dynamically by the protocol. + +--- +**Key Takeaways** + +1. **IAW ensures traders commit adequate collateral** before opening positions, safeguarding both the protocol and its users. +2. It is **asset-specific**, reflecting the risk and volatility of each asset. +3. Properly managing IAW settings is critical for balancing leverage, risk, and platform stability. +___ +Footer/References + +___ +Tags + +___ \ No newline at end of file diff --git a/documentation/20241223-173355 Target Scale IAW.md b/documentation/20241223-173355 Target Scale IAW.md new file mode 100644 index 0000000..9547e2f --- /dev/null +++ b/documentation/20241223-173355 Target Scale IAW.md @@ -0,0 +1,147 @@ +--- +aliases: + - Target Scale IAW + - Target Scale Initial Asset Weight +--- +___ +# Body/Content +**Target Scale Initial Asset Weight (Target Scale IAW)** is a computed parameter used in risk management frameworks, like those in Drift Protocol, to dynamically scale collateral or margin requirements based on predefined safety criteria. It determines the optimal “scale” or multiplier of deposits notional that can be safely allocated to an asset or position, reflecting the asset’s ability to maintain stability and pass specific risk checks. + +In simpler terms, **Target Scale IAW** represents the adjusted collateral scaling factor, ensuring that positions are adequately backed without exposing the protocol or users to unnecessary risk. + +--- +## **Role of Target Scale IAW** + +1. **Dynamic Risk Management**: + - Unlike a fixed IAW, **Target Scale IAW** adjusts based on an asset’s performance against safety criteria. + - This ensures that the protocol’s requirements are responsive to changing risk conditions. +2. **Enhanced Collateral Efficiency**: + - Traders can maximize their leverage or position sizes when assets are deemed safer (i.e., when all safety criteria pass). +3. **Preventative Safeguard**: + - Restricts scaling for assets or positions that fail one or more safety checks, mitigating systemic risks in volatile or illiquid markets. +4. **Transparent Decision-Making**: + - The **Target Scale IAW** is computed from explicit rules, making it easier for users to understand why they can or cannot maximize their position sizes. + +--- +## **Formula** + +The **Target Scale IAW** is a conditional value: + +Target Scale IAW = +((1.2 x Total Deposits Notional) {if all safety criteria pass}) || ((N/A) or {fallback value})` + +Where: + - **Total Deposits Notional**: The total collateral value deposited by the user. + - **Safety Criteria**: Defined rules for determining an asset’s stability and risk profile (e.g., liquidity, leverage checks). + +--- +## **Safety Criteria for Target Scale IAW** +In Drift Protocol’s example, four specific criteria determine whether **Target Scale IAW** is set: +1. **On-Chain Liquidity Check**: + - Evaluates whether a simulated swap incurs acceptable price impact (low slippage). +2. **Effective Leverage (Spot Positions)**: + - Ensures that spot positions do not exceed safe leverage thresholds. +3. **Effective Leverage (Perp Positions)**: + - Ensures that perpetual futures positions are within a specified leverage range. +4. **Excess Leverage Coverage (Perp Market Insurance)**: + - Verifies that users with excessive leverage are sufficiently covered by the insurance fund. + +If all these criteria pass, the **Target Scale IAW** is maximized; otherwise, it defaults to a fallback value (e.g., N/A or zero). + +--- +## **Practical Use of Target Scale IAW** +**Scenario 1: All Criteria Pass** + - A trader deposits $50,000. + - All safety checks pass. + - Target Scale IAW is calculated as: + - 1.2 x 50,000 = 60,000 + - This allows the trader to scale their positions up to a notional value of $60,000. + +**Scenario 2: Some Criteria Fail** + - A trader deposits $50,000. + - One or more criteria fail (e.g., [[20241223-170104 Excess Leverage Coverage|Excess Leverage Coverage]] is insufficient). + - Target Scale IAW is set to **N/A** or another fallback value. + +--- +## **Key Benefits of Target Scale IAW** + +1. **Encourages Safer Trading**: + - Traders are incentivized to maintain positions and strategies that pass safety checks, reducing systemic risks. +2. **Dynamic Scaling**: + - Adjusts in real-time based on asset performance and market conditions, allowing flexibility without compromising stability. +3. **Protocol Stability**: + - Protects the platform by limiting exposure to risky positions, ensuring adequate collateralization. +4. **Transparency**: + - Each component of the calculation is explicitly tied to measurable criteria, fostering trust and understanding among users. + +--- +## **Engineering Considerations** + +1. **Real-Time Updates**: + - The **Target Scale IAW** must recalculate dynamically as market conditions, positions, or collateral levels change. +2. **Fallback Handling**: + - Define a clear fallback behavior when criteria fail: + - Use “N/A” or “0” to indicate an invalid Target Scale IAW. + - Log the reasons for failure for transparency. +1. **Error Handling**: + - Account for missing or delayed data (e.g., from oracles or external APIs) to avoid disruptions in Target Scale IAW calculations. +2. **Visualization**: + - Display the Target Scale IAW and its breakdown (criteria results, calculations) prominently in the Risk Dashboard. + - Provide tooltips or hover-over details explaining the formula and results. +3. **Testing**: + - Validate against edge cases: + - All criteria pass. + - Partial failure. + - Missing or malformed data inputs. + +--- +## **Example Implementation** + +### **Target Scale IAW Calculation** +```python +def calculate_target_scale_iaw(deposits_notional: float, criteria_results: dict) -> float: + """ + Calculate Target Scale IAW based on total deposits notional and criteria results. + + Args: + deposits_notional (float): Total collateral deposited. + criteria_results (dict): Pass/Fail results for each safety criterion. + + Returns: + float: Target Scale IAW, or 0 if criteria fail. + """ + if all(criteria_results.values()): # All criteria pass + return 1.2 * deposits_notional + else: + return 0 # Fallback value if any criteria fail + +# Example usage +deposits_notional = 50000 # USD +criteria_results = { + "on_chain_liquidity": True, + "spot_leverage": True, + "perp_leverage": True, + "excess_coverage": False, # This fails +} + +target_scale_iaw = calculate_target_scale_iaw(deposits_notional, criteria_results) +print(f"Target Scale IAW: {target_scale_iaw if target_scale_iaw > 0 else 'N/A'}") +# Output: Target Scale IAW: N/A +``` + +--- +## **Key Takeaways** + +1. **Target Scale IAW** is a dynamic parameter that adjusts based on safety criteria, promoting responsible trading and platform stability. + +2. It allows traders to safely scale their positions while ensuring adequate collateralization. + +3. A robust implementation requires transparent criteria evaluation, real-time recalculations, and fallback mechanisms for failure cases. + +___ +Footer/References + +___ +Tags + +___ \ No newline at end of file diff --git a/documentation/20241223-174915 Total Deposits Notional.md b/documentation/20241223-174915 Total Deposits Notional.md new file mode 100644 index 0000000..e45468d --- /dev/null +++ b/documentation/20241223-174915 Total Deposits Notional.md @@ -0,0 +1,137 @@ +--- +aliases: + - Total Deposits Notional + - Deposits Notional +--- +___ +# Body/Content +**Total Deposits Notional** represents the total dollar-equivalent value of all collateral deposited by a trader on a trading platform. It includes the combined value of all assets held by the trader, converted to a common denomination (usually USD or another stable value reference), and serves as the foundation for calculating margin, leverage, and risk metrics. + +--- +## **Key Characteristics of Total Deposits Notional** + +1. **Aggregate Collateral Value**: + - It sums up the dollar-equivalent value of all deposited assets, including cryptocurrencies, stablecoins, or other eligible assets. +2. **Dynamic**: + - The value is dynamic and fluctuates with market prices for volatile assets. For instance, if a trader deposits BTC as collateral, the total deposits notional changes as the price of BTC changes. +3. **Common Denomination**: + - All deposited assets are converted to a common unit (e.g., USD) using their current market prices, enabling consistent calculations. +4. **Risk Management Input**: + - Used in risk calculations such as margin requirements, liquidation thresholds, and leverage limits. +5. **Scope of Usage**: + - In trading platforms like Drift Protocol, total deposits notional underpins calculations for metrics such as **Target Scale IAW**, **effective leverage**, and **margin utilization**. + +--- +## **Formula for Total Deposits Notional** +Total Deposits Notional} = \sum_{i=1}^{n} (\text{Asset Quantity}_i \times \text{Price}_i) +![[Pasted image 20241223175443.png]] +Where: + - Asset Quantity_i: The quantity of the i^{th} deposited asset. + - Price_i: The current market price of the i^{th} asset. + - n: Total number of distinct assets deposited. + +--- +## **Examples of Total Deposits Notional** +### **Single Asset Example** + - A trader deposits **2 BTC** as collateral, and the current BTC price is **$30,000**. + +Total Deposits Notional = 2 x 30,000 = 60,000 USD + +### **Multiple Asset Example** + - A trader deposits the following assets: + - **2 BTC** at a price of **$30,000** + - **5 ETH** at a price of **$2,000** + - **10,000 USDC** (stablecoin, assumed to be pegged to $1). + +Total Deposits Notional = (2 x 30,000) + (5 x 2,000) + (10,000 x 1) +Total Deposits Notional = (60,000 + 10,000 + 10,000) = 80,000 USD + +--- +## **How Total Deposits Notional is Used** +1. **Margin Requirements**: + - Determines the [[20241223-164141 Collateral|Collateral]] available for opening and maintaining positions. +2. **Leverage Calculations**: + - Serves as the denominator in effective leverage calculations: + - [[20241223-161359 Effective Leverage|Effective Leverage]] = [[20241223-162925 Position Notional|Position Notional]] / [[20241223-174915 Total Deposits Notional|Total Deposits Notional]] +3. **Risk Metrics**: + - Used to compute metrics like **[[20241223-173355 Target Scale IAW|Target Scale IAW]]: + - [[20241223-173355 Target Scale IAW|Target Scale IAW]] = 1.2 x [[20241223-174915 Total Deposits Notional|Total Deposits Notional]] (if all criteria pass)}. +4. **Liquidation Thresholds**: + - Helps set thresholds where positions are liquidated if the [[20241223-164141 Collateral|Collateral]] value drops below maintenance requirements. + +--- +## **Factors Impacting Total Deposits Notional** +1. **Market Volatility**: + - The notional value of volatile assets (e.g., BTC, ETH) changes with price fluctuations, directly affecting the **total deposits notional**. +2. **Deposits and Withdrawals**: + - Adding or removing assets from the account impacts the total deposits notional. +3. **Asset Eligibility**: + - Only eligible assets (as defined by the platform) are included in the calculation. For instance, assets with low liquidity or high volatility may not qualify. +4. **Exchange Rates**: + - Conversion between assets is dependent on the current exchange rate or price provided by the platform’s price oracle. + +--- +## **Engineering Considerations** +1. **Real-Time Updates**: + - Total deposits notional should be recalculated in real-time or near-real-time to account for market price changes, deposits, and withdrawals. +2. **Price Oracle Integration**: + - Ensure accurate and reliable price feeds for asset valuation. Handle scenarios like delayed or incorrect oracle data gracefully. +3. **Fallback Handling**: + - Provide a default behavior if an asset’s price is unavailable or if an oracle error occurs (e.g., exclude the asset from calculations or use the last known price). +4. **Edge Cases**: + - **Zero Deposits**: Handle accounts with no deposits, ensuring calculations default appropriately (e.g., total deposits notional is zero). + - **Highly Volatile Assets**: Apply safeguards to prevent drastic swings in total deposits notional due to flash crashes or temporary oracle anomalies. + +--- +## **Example Implementation** +### **Calculate Total Deposits Notional** +```python +def calculate_total_deposits_notional(assets: dict, price_oracle: dict) -> float: + """ + Calculate the total deposits notional value in USD. + + Args: + assets (dict): A dictionary of asset quantities. Example: {"BTC": 2, "ETH": 5, "USDC": 10000}. + price_oracle (dict): A dictionary of asset prices. Example: {"BTC": 30000, "ETH": 2000, "USDC": 1}. + + Returns: + float: The total deposits notional in USD. + """ + total_notional = 0 + for asset, quantity in assets.items(): + price = price_oracle.get(asset, 0) # Default price is 0 if not found + total_notional += quantity * price + return total_notional + +# Example usage +assets = {"BTC": 2, "ETH": 5, "USDC": 10000} +price_oracle = {"BTC": 30000, "ETH": 2000, "USDC": 1} +total_notional = calculate_total_deposits_notional(assets, price_oracle) +print(f"Total Deposits Notional: ${total_notional}") +# Output: Total Deposits Notional: $80,000 +``` + +--- +## **Potential Questions and Edge Cases** +1. **What happens if an asset’s price is unavailable?** + - Exclude the asset from the calculation or use the last known price. Alternatively, mark the calculation as incomplete and notify the user. +2. **How does volatility affect total deposits notional?** + - Assets like BTC and ETH introduce significant variability. Stablecoins can be used to stabilize the total deposits notional. +3. **What about multiple collateral types?** + - Ensure all eligible assets are included, applying conversion rates for a consistent valuation. +4. **Can total deposits notional be negative?** + - No, as only deposited assets are counted. Withdrawals or fees reduce the notional but cannot result in a negative total. + +--- +## **Key Takeaways** +1. **Total Deposits Notional** reflects the combined dollar-equivalent value of all collateral deposited by a user. +2. It is a dynamic, real-time metric that changes with price fluctuations, deposits, and withdrawals. +3. It underpins critical risk and leverage calculations, ensuring the protocol maintains stability while maximizing collateral efficiency. + +___ +Footer/References + +___ +Tags + +___ \ No newline at end of file diff --git a/documentation/Drift Risk Dashboard Trial Assignment (refactored).md b/documentation/Drift Risk Dashboard Trial Assignment (refactored).md new file mode 100644 index 0000000..9de6e22 --- /dev/null +++ b/documentation/Drift Risk Dashboard Trial Assignment (refactored).md @@ -0,0 +1,112 @@ + +# **Risk Dashboard Trial Assignment** + +## **Objective** + +Extend Drift Protocol's **Risk Dashboard** with a new column named **Target Scale IAW** (Initial Asset Weight). This column will calculate and display a value based on whether a **spot asset meets safety criteria**. If **all criteria pass**, the **Target Scale IAW** should be set to: + +> **1.2x total deposits notional** + +This assignment serves as an introduction to Drift Protocol's tech stack and its risk management tools in a **non-production environment**. The **Risk Dashboard** is primarily a back-end tool and involves interpreting, validating, and displaying risk-related data. + +--- + +## **Criteria for Passing** + +You will implement individual **Pass/Fail** columns for each of the following criteria to determine if the **Target Scale IAW** can be set. These columns should be clear, concise, and easily understandable. The criteria are: + +### **1. On-Chain Liquidity Check** +- **Condition**: Simulate a swap of one of the largest user account positions using Jupiter, measuring the **price impact %**. +- **Pass Rule**: + - Pass if: price impact < (1 - [[20241223-160016 Maintenance Asset Weight|Maintenance Asset Weight]]). + +--- + +### **2. Effective Leverage (Spot Positions)** +- **Pass Rule**: + - Pass if: [[20241223-160343 Aggregate Effective Leverage|Aggregate Effective Leverage]] < (0.5 * [[20241223-160016 Maintenance Asset Weight|Maintenance Asset Weight]]). + +--- + +### **3. Effective Leverage (Perp Positions)** +- **Pass Rule**: + - Pass if: 1x <= [[20241223-161359 Effective Leverage|Effective Leverage]] <= 2x. + +--- + +### **4. [[20241223-170104 Excess Leverage Coverage|Excess Leverage Coverage (Perp Market Insurance)]]** +- **Condition**: Filter for users whose leverage exceeds 2x. +- **Pass Rule**: + - Pass if: filtering for users with leverage > 2 have excess notional fully covered by the perp market's insurance fund. + +--- + +## **Implementation Steps** + +### **1. Add the Target Scale IAW Column** +- Create a new column in the **Risk Dashboard** called **Target Scale IAW**. +- Set the value to **1.2x total deposits notional** if all four safety criteria columns pass. +- If any of the criteria fail, display **N/A** or an appropriate fallback value, such as **"Criteria not met"**. + +--- + +### **2. Add Individual Pass/Fail Columns** +- Implement four separate columns, one for each criterion listed above. +- Each column should display either **"Pass"** or **"Fail"** based on whether the corresponding rule is satisfied. +- Example column names: + - **On-Chain Liquidity Pass** + - **Spot Effective Leverage Pass** + - **Perp Effective Leverage Pass** + - **Excess Leverage Coverage Pass** + +--- + +### **3. Expandable Tooltips for Detailed Explanations** +- Add a tooltip for each criterion that explains: + - The condition being tested. + - The mathematical formula or logic used. + - Data sources and assumptions. + - Examples to clarify edge cases or common failure scenarios. +- Example: + - **On-Chain Liquidity Check Tooltip**: + - "This column evaluates whether simulating the swap of a large user position results in a price impact that exceeds the acceptable threshold. The acceptable threshold is derived from (1 - maint asset weight). Data is sourced from Jupiter." + +--- + +## **Explicit Engineering Considerations** + +### **Data Source Integration** +- Ensure the integration with on-chain protocols like **Jupiter** for simulating swaps is robust and handles errors (e.g., API failures or unexpected data). +- Confirm that the **maint asset weight** is accurately retrieved from the appropriate Drift Protocol market specs. + +### **Edge Cases** +- **Criteria failure**: Handle scenarios where one or more criteria fail. Ensure the **Target Scale IAW** column outputs a clear and user-friendly fallback value. +- **Data unavailability**: Implement fail-safes to handle missing or incomplete data, with error messaging/logging. + +### **Testing Requirements** +- Write unit tests for each criterion to validate correct Pass/Fail outcomes based on sample data. +- Include test cases for: + - Boundary values (e.g., effective leverage exactly at 1x or 2x). + - Missing or malformed data inputs. + +### **Performance** +- Ensure calculations and queries are efficient, as the Risk Dashboard may involve real-time updates for multiple assets and user accounts. + +--- + +## **Example Outputs** + +### **Before** +| User | Target Scale IAW | Comments | +|------|-------------------|----------| +| Bob | N/A | Missing criteria details | + +### **After** +| User | Target Scale IAW | On-Chain Liquidity Pass | Spot Leverage Pass | Perp Leverage Pass | Excess Coverage Pass | +| ----- | ---------------- | ----------------------- | ------------------ | ------------------ | -------------------- | +| Bob | 1.2x | Pass | Pass | Pass | Pass | +| Alice | N/A | Fail | Pass | Pass | Fail | + +--- + +This structure ensures clarity, supports debugging, and aligns with best practices for robust engineering deliverables. Let me know if further refinements or additional examples are needed. diff --git a/documentation/Drift_v2_Risk_Dashboard_Analysis.md b/documentation/Drift_v2_Risk_Dashboard_Analysis.md new file mode 100644 index 0000000..de7aedb --- /dev/null +++ b/documentation/Drift_v2_Risk_Dashboard_Analysis.md @@ -0,0 +1,168 @@ + +# Drift v2 Risk Dashboard Codebase Analysis + +## Directory Tree + +``` +📄 .dockerignore +📄 .env.example +📂 .github + 📂 workflows + 📄 master.yaml +📄 .gitignore +📄 .pre-commit-config.yaml +📂 .streamlit + 📄 config.toml +📄 Dockerfile-backend +📄 Dockerfile-frontend +📄 README.md +📂 backend + 📂 api + 📄 __init__.py + 📄 asset_liability.py + 📄 health.py + 📄 liquidation.py + 📄 metadata.py + 📄 price_shock.py + 📄 snapshot.py + 📄 ucache.py + 📄 app.py + 📂 middleware + 📄 cache_middleware.py + 📄 readiness.py + 📂 scripts + 📄 generate_ucache.py + 📄 state.py + 📂 utils + 📄 matrix.py + 📄 repeat_every.py + 📄 user_metrics.py + 📄 vat.py + 📄 waiting_for.py +📄 gunicorn_config.py +📂 images + 📄 drift.svg + 📄 driftlogo.png +📄 requirements.txt +📂 src + 📂 lib + 📄 api.py + 📄 page.py + 📄 user_metrics.py + 📄 main.py + 📂 page + 📄 asset_liability.py + 📄 backend.py + 📄 health.py + 📄 health_cached.py + 📄 liquidation_curves.py + 📄 orderbook.py + 📄 price_shock.py + 📄 welcome.py + 📄 style.css + 📄 utils.py + +``` + +--- + + +## Consolidated Analysis of the Application + +### Overview of the Application +- **Purpose**: + The application monitors and visualizes financial metrics such as leverage, asset-liability matrices, and risk scenarios for a decentralized finance (DeFi) ecosystem. +- **Technologies**: + - **Back-End**: FastAPI, Driftpy, Python + - **Front-End**: Streamlit, Plotly, Pandas + - **Database/Storage**: Caching via pickled files + - **Others**: Docker, Gunicorn, `.env` configuration + +### Back-End Analysis +- **Architecture**: + - Built with FastAPI, the back-end provides modular APIs organized into directories like `asset_liability`, `price_shock`, and `health`. + - Centralized state management via `BackendState` class, integrating Solana blockchain data with `Driftpy`. +- **Data Flow**: + - Data is fetched from the Solana blockchain using `AsyncClient` and cached as pickled files for performance. + - The `load_pickle_snapshot` function loads the latest cached data into a shared `Vat` object. + - API endpoints expose this data for front-end consumption. +- **Integrations**: + - **Driftpy**: Handles interactions with Solana blockchain and the Drift protocol. + - **Solana**: Provides oracle data, user accounts, and market metrics. +- **Key Services/Modules**: + - `asset_liability`: Computes asset-liability matrices. + - `price_shock`: Analyzes leverage and bankruptcy risk under oracle price distortions. + - `health`: Provides account and system health metrics. + +### Front-End Analysis +- **Framework**: Streamlit +- **Structure**: + - **Main Entry**: `main.py` sets up the application with configuration, styling, and navigation. + - **Pages**: Modular design with individual pages (`asset_liability.py`, `price_shock.py`) for specific metrics. + - **Styling**: Custom CSS for enhanced UI, applied via `style.css`. +- **UI/UX Features**: + - Interactive data visualizations powered by Plotly. + - Sidebar navigation for seamless exploration of metrics. + - Clear, concise presentation of complex data using Pandas for preprocessing. + +### Component-Level Analysis +#### Back-End +- **`BackendState`**: + - Manages connections to Solana and Driftpy clients. + - Caches data for efficient access and uses the `Vat` object to process metrics. +- **APIs**: + - `/matrix`: Fetches asset-liability matrix data. + - `/price_shock`: Provides leverage and bankruptcy metrics under varying conditions. + +#### Front-End +- **`asset_liability.py`**: + - Fetches matrix data and presents it in a summarized form. + - Highlights financial risks using Pandas transformations. +- **`price_shock.py`**: + - Models the effect of price changes on user leverage and bankruptcies. + - Visualizes results dynamically with Plotly. + +### Application Logic +- **Core Processes**: + - Data is fetched, cached, and preprocessed in the back-end. + - The front-end interacts with APIs to retrieve and visualize metrics. +- **Inter-Component Communication**: + - Front-end fetches data dynamically using modular API calls (`lib.api.api2`). + - Back-end orchestrates data processing via `BackendState` and utility modules. + +### Development Practices +- **Strengths**: + - Modular design: Clear separation of concerns across back-end and front-end. + - Code reusability: Utility functions like `generate_summary_data` and `load_newest_files` are well-structured. + - Environment flexibility: `.env` variables ensure easy deployment in different environments. +- **Potential Improvements**: + - Enhance documentation for API endpoints and utility functions. + - Add error handling for API calls to improve user experience during network issues. + +### Key Features +- **Back-End**: + - Efficient caching mechanism for state persistence and fast restarts. + - Real-time interaction with Solana blockchain data. +- **Front-End**: + - Interactive visualizations for exploring financial risks. + - Modular and extensible design to accommodate new metrics. + +### Deployment and Configuration +- **Deployment Instructions**: + - Back-End: Run Gunicorn with `gunicorn backend.app:app -c gunicorn_config.py`. + - Front-End: Launch with `streamlit run src/main.py`. +- **Environment Configurations**: + - Uses `.env` file for critical variables like `BACKEND_URL` and `RPC_URL`. + - Dockerized setup with separate Dockerfiles for front-end and back-end. +- **CI/CD**: + - GitHub workflows might automate deployment (files in `.github` directory). + +### Summary of Findings +- **Strengths**: + - The application is modular, scalable, and designed for analyzing complex financial metrics. + - Interactive front-end enhances usability and understanding of data. +- **Areas for Improvement**: + - Improve inline documentation and logging for better maintainability. + - Implement robust error handling for API integrations. + - Consider database integration for historical data analysis instead of relying solely on pickled files. + diff --git a/documentation/debug_findings.md b/documentation/debug_findings.md new file mode 100644 index 0000000..e2de7b1 --- /dev/null +++ b/documentation/debug_findings.md @@ -0,0 +1,127 @@ +# VAT Error Debug Summary + +## Current Status + +We've been investigating a `vat_error` related to market data access in the v2-risk-dashboard2 project. The error persists despite several implemented changes and successful initialization logs. + +### Implemented Changes +- Updated `backend/utils/matrix.py` to use `spot_markets` instead of `spot_map` +- Modified `backend/state.py` to use consistent market attributes +- Created a new debug endpoint `/vat-state` to inspect the VAT object state +- Added extensive logging throughout the initialization process + +### Key Findings + +1. **Successful Initialization** + - Logs show VAT object correctly initialized with `spot_markets` and `perp_markets` attributes + - MarketMap objects have proper structure with expected methods + - Pickle files load successfully with all required data + +2. **Persistent Error** + - Still receiving `'Vat' object has no attribute 'spot_map'` error + - Error occurs despite logs confirming attributes exist after initialization + - VAT object shows correct attributes in `dir()` output + +3. **State Transitions** + - Server reloads maintain proper initialization + - Pickle loading process completes successfully + - Market data structures appear intact after unpickling + +## Recommendations + +### 1. Class Structure Investigation +```python +import inspect + +def inspect_vat(): + # View method resolution order + mro = inspect.getmro(Vat) + # Check for metaclasses + metaclass = type(Vat) +``` + +### 2. Pickle Format Verification +```python +import pickle + +def verify_pickle(): + with open('pickles/vat-2024-12-21-15-19-45/spot_308973682.pkl', 'rb') as f: + data = pickle.load(f) + print(type(data), dir(data)) +``` + +### 3. Dynamic Attribute Monitoring +```python +def __getattr__(self, name): + logger.error(f"Attempted to access non-existent attribute: {name}") + logger.error(f"Available attributes: {dir(self)}") + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") +``` + +### 4. State Transition Logging +```python +@property +def spot_markets(self): + logger.debug("Accessing spot_markets") + return self._spot_markets + +@spot_markets.setter +def spot_markets(self, value): + logger.debug(f"Setting spot_markets to {type(value)}") + self._spot_markets = value +``` + +### 5. Thread Safety Implementation +- Add locks around critical sections +- Include thread IDs in logs +- Monitor concurrent access patterns + +### 6. Compatibility Layer +```python +@property +def spot_map(self): + return self.spot_markets +``` + +### 7. Deep State Inspection +```python +def get_full_state_tree(obj, max_depth=3, current_depth=0): + if current_depth >= max_depth: + return str(type(obj)) + if isinstance(obj, (str, int, float, bool)): + return obj + return { + attr: get_full_state_tree(getattr(obj, attr), max_depth, current_depth + 1) + for attr in dir(obj) + if not attr.startswith('_') + } +``` + +### 8. Version Compatibility +- Check pickle protocol versions +- Verify dependency versions match pickle creation environment +- Implement version checks in unpickling process + +## Next Steps + +1. Implement the compatibility layer as an immediate workaround +2. Add comprehensive attribute access logging +3. Verify pickle data structure matches current code expectations +4. Monitor state transitions during server reloads +5. Consider implementing thread safety measures + +## Log Analysis + +The logs show successful initialization and unpickling: + +``` +2024-12-22 23:10:44,626 - backend.state - INFO - VAT spot markets available: True +2024-12-22 23:10:44,626 - backend.state - INFO - VAT perp markets available: True +2024-12-22 23:10:44,626 - backend.state - INFO - Spot markets type: +``` + +This suggests the issue might be related to: +- Attribute access timing +- Thread safety concerns +- Pickle version compatibility +- Class inheritance complexity \ No newline at end of file diff --git a/documentation/debug_progress.md b/documentation/debug_progress.md new file mode 100644 index 0000000..47d3172 --- /dev/null +++ b/documentation/debug_progress.md @@ -0,0 +1,61 @@ +# Debug Progress Summary + +## Overview +We are debugging issues with the backend state initialization and API endpoints in the v2-risk-dashboard2 project. The main focus has been on resolving a `vat_error` and ensuring proper market data access. + +## What's Been Done + +### 1. Backend State Initialization +- Added detailed logging to `backend/state.py` to track initialization process +- Successfully confirmed that backend state initializes with the RPC URL +- Verified successful loading of pickle snapshots from `pickles/vat-2024-12-21-15-19-45` + +### 2. API Endpoints +- Added a `/health` endpoint to check backend state readiness +- Added a `/debug` endpoint to inspect state details +- Modified the `target_scale_iaw` endpoint with retry mechanism +- Added logging throughout the API layer + +### 3. Frontend Pages Analysis +- Examined `orderbook.py`, `price_shock.py`, and `asset_liability.py` +- Found that frontend uses driftpy's market configurations +- Identified that frontend accesses market data through prefixed columns + +### 4. Debugging Tools +- Implemented enhanced logging across the application +- Added retry mechanisms for state initialization +- Created debug endpoints for state inspection + +## Current Status + +### Working Components +- Backend state initialization completes successfully +- Pickle snapshot loading works (takes ~8.1s) +- Health check endpoint returns successful responses +- Frontend pages are properly structured + +### Current Issues +1. **Main Issue**: `vat_error` persists + - Error message: `"object of type 'MarketMap' has no len()"` + - Occurs when trying to access market data through VAT + +2. **Endpoint Behavior** + - `/api/risk-metrics/target_scale_iaw` returns "miss" responses + - Backend state reports as ready but market data access fails + +### Next Steps +1. Modify market data access to use backend state's `spot_map` and `perp_map` directly +2. Implement proper error handling for MarketMap access +3. Add more detailed logging for market data initialization +4. Verify market data availability after pickle loading + +## Technical Details +- Using pickle snapshot: `vat-2024-12-21-15-19-45` +- RPC URL: `https://rpc.ironforge.network/mainnet` +- Loaded pickle files: + - perp_308973683.pkl + - spot_308973682.pkl + - userstats_308973684.pkl + - spotoracles_308973682.pkl + - usermap_308973682.pkl + - perporacles_308973682.pkl \ No newline at end of file diff --git a/documentation/vat_error_analysis.md b/documentation/vat_error_analysis.md new file mode 100644 index 0000000..5ca1c82 --- /dev/null +++ b/documentation/vat_error_analysis.md @@ -0,0 +1,125 @@ +# VAT Error Analysis and Resolution Plan + +## Current Issues + +### 1. MarketMap Length Error +- **Error**: `"object of type 'MarketMap' has no len()"` +- **Context**: Occurs during market data access through VAT +- **Location**: Triggered when attempting to use `len(mainnet_spot_market_configs)` +- **Impact**: Affects market data access and risk calculations + +### 2. API Endpoint Issues +- `/api/risk-metrics/target_scale_iaw` returns "miss" responses +- Backend state reports ready but market data access fails +- Potential disconnect between state readiness and data availability + +## Root Cause Analysis + +### Market Data Access +1. **Configuration Loading** + - Current approach relies on `mainnet_spot_market_configs` + - MarketMap object doesn't support direct length operations + - Pickle loading works but data access patterns may be incorrect + +2. **State Management** + - Backend state initializes successfully + - Pickle snapshots load (takes ~8.1s) + - Potential race condition between state readiness and data availability + +## Proposed Solutions + +### 1. Immediate Fix for MarketMap Length Error +```python +# Replace: +NUMBER_OF_SPOT = len(mainnet_spot_market_configs) + +# With: +NUMBER_OF_SPOT = len(vat.spot_map.markets) # or +NUMBER_OF_SPOT = len(list(vat.spot_map.markets.keys())) +``` + +### 2. Enhanced Validation +1. **New Debug Endpoint** + - Report number of markets in spot_map and perp_map + - List available market keys + - Verify pickle data loading status + - Show current market configurations state + +2. **Data Access Strategy** + - Direct use of `spot_map` and `perp_map` from backend state + - Implementation of error handling for market data access + - Retry mechanism with exponential backoff + +### 3. Improved Logging +- Add structured logging for: + - Market map initialization process + - Pickle loading steps + - Market data access attempts + - Backend state transitions + +### 4. State Management Enhancements +- Implement state machine for backend: + - Track initialization progress + - Validate market data availability + - Provide component status indicators + - Handle state transitions gracefully + +## Implementation Priority + +1. **High Priority** + - Fix MarketMap length error + - Implement basic error handling + - Add critical logging points + +2. **Medium Priority** + - Create debug endpoint + - Enhance state management + - Implement retry mechanisms + +3. **Lower Priority** + - Add comprehensive logging + - Implement state machine + - Add detailed validation checks + +## Technical Context + +### Current Environment +- Pickle snapshot: `vat-2024-12-21-15-19-45` +- RPC URL: `https://rpc.ironforge.network/mainnet` +- Loaded pickle files: + - perp_308973683.pkl + - spot_308973682.pkl + - userstats_308973684.pkl + - spotoracles_308973682.pkl + - usermap_308973682.pkl + - perporacles_308973682.pkl + +### Dependencies +- Backend relies on driftpy for market configurations +- Frontend uses prefixed columns for market data access +- Pickle snapshots for state persistence + +## Next Steps + +1. **Immediate Actions** + - Implement MarketMap length fix + - Add basic error handling + - Deploy critical logging + +2. **Validation** + - Test market data access + - Verify state initialization + - Validate pickle loading + +3. **Monitoring** + - Track error rates + - Monitor state transitions + - Validate data consistency + +## Success Criteria + +1. No MarketMap length errors +2. Successful market data access +3. Proper error handling and recovery +4. Clear state visibility +5. Consistent API responses \ No newline at end of file diff --git a/documentation/vat_market_map_issue.md b/documentation/vat_market_map_issue.md new file mode 100644 index 0000000..1c2adc3 --- /dev/null +++ b/documentation/vat_market_map_issue.md @@ -0,0 +1,79 @@ +# VAT MarketMap Length Issue Analysis + +## Current Status + +We're currently dealing with a persistent error in the risk metrics service where attempting to get the length of a `MarketMap` object fails with the error: `"object of type 'MarketMap' has no len()"`. + +## Changes Made + +1. **Backend State Updates** + - Modified `backend/state.py` to check market map readiness without using `len()` + - Implemented iterative checks to verify market data availability + - Added more robust error handling for market map access + +2. **Debug Endpoint Improvements** + - Updated `backend/api/debug.py` to safely access market data + - Added detailed component status reporting + - Implemented safe iteration over market maps instead of using `len()` + +3. **Risk Metrics Updates** + - Modified `backend/api/risk_metrics.py` to handle market data access safely + - Added proper error handling for market data retrieval + - Improved market index validation + +4. **Matrix Calculation Changes** + - Updated `backend/utils/matrix.py` to use safe iteration methods + - Implemented market data collection without relying on `len()` + - Added better error handling for market data access + +5. **User Metrics Improvements** + - Modified `backend/utils/user_metrics.py` to handle market data safely + - Updated leverage calculations to work with market maps properly + - Added better error handling for user data access + +## Current Issues + +1. **Primary Issue** + - The `MarketMap` length error persists despite our changes + - The error appears in the debug endpoint response + - This suggests there might be another location where `len()` is being called on the `MarketMap` object + +2. **Secondary Effects** + - All markets show `insurance_fund_balance: 0` and `mint: null` + - The VAT state endpoint works but may not have complete data + - The target_scale_iaw endpoint returns "miss" responses + +## Next Steps + +1. **Investigation** + - Need to locate any remaining direct usage of `len()` on `MarketMap` objects + - Review the VAT object initialization process + - Check for any circular dependencies in market data access + +2. **Potential Solutions** + - Implement a custom `__len__` method for `MarketMap` if possible + - Create wrapper methods for safely accessing market data + - Add more comprehensive logging to track market data flow + +3. **Validation** + - Need to verify market data is being loaded correctly + - Ensure pickle files contain the expected data + - Validate market configurations match expectations + +## Technical Details + +- Using pickle snapshot: `vat-2024-12-21-15-19-45` +- RPC URL: `https://rpc.ironforge.network/mainnet` +- Loaded pickle files: + - perp_308973683.pkl + - spot_308973682.pkl + - userstats_308973684.pkl + - spotoracles_308973682.pkl + - usermap_308973682.pkl + - perporacles_308973682.pkl + +## Dependencies + +- Backend relies on driftpy for market configurations +- Frontend uses prefixed columns for market data access +- Pickle snapshots for state persistence \ No newline at end of file From 415af8746266acecf7621997795948a7a10a4e7f Mon Sep 17 00:00:00 2001 From: goldhaxx <1616671+goldhaxx@users.noreply.github.com> Date: Mon, 10 Mar 2025 17:06:30 -0700 Subject: [PATCH 10/10] Downgrade httpcore and httpx dependencies to resolve compatibility issues - Reverted httpcore from >=1.0.0 to 0.16.3 - Reverted httpx from >=0.25.2 to 0.23.1 - Ensures stable dependency versions for the project --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index c7533f3..5f947a6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -48,8 +48,8 @@ GitPython==3.1.43 grpcio==1.64.1 gunicorn==23.0.0 h11==0.14.0 -httpcore>=1.0.0 -httpx>=0.25.2 +httpcore==0.16.3 +httpx==0.23.1 humanize==4.10.0 idna==3.4 iniconfig==1.1.1