1+ """Tests for basic SMASH operations."""
2+
3+
4+ import re
5+ import logging
6+ import requests
7+ from http import HTTPStatus
8+ import pytest
9+ from cardano_clusterlib import clusterlib
10+
11+ from cardano_node_tests .utils import logfiles
12+ from cardano_node_tests .utils import configuration
13+ from cardano_node_tests .utils import dbsync_utils
14+ from cardano_node_tests .utils import dbsync_queries
15+ from cardano_node_tests .utils import smash_utils
16+ from cardano_node_tests .utils import helpers
17+
18+ LOGGER = logging .getLogger (__name__ )
19+
20+
21+ @pytest .fixture (autouse = True )
22+ def check_smash_availability () -> None :
23+ """Fixture to check SMASH availability before each test."""
24+ if not configuration .HAS_SMASH :
25+ pytest .skip ("Skipping test because SMASH service is not available." )
26+
27+
28+ def check_request_error (
29+ err : requests .exceptions .RequestException ,
30+ expected_status : HTTPStatus ,
31+ expected_code : str | None ,
32+ expected_description : str
33+ ) -> None :
34+ """Assert expected HTTP errors in requests, handling both JSON and text responses."""
35+ response = err .response
36+ assert response .status_code == expected_status
37+
38+ try :
39+ error_data = response .json ()
40+ actual_code = error_data .get ("code" )
41+ actual_description = error_data .get ("description" )
42+ except ValueError :
43+ # If not JSON, treat the entire response as text
44+ actual_code = None
45+ actual_description = response .text .strip ()
46+
47+ if expected_code :
48+ assert actual_code == expected_code
49+
50+ assert actual_description == expected_description
51+
52+
53+ class TestBasicSmash :
54+ """Basic tests for SMASH service."""
55+
56+ @pytest .fixture ()
57+ def locked_pool (
58+ self ,
59+ cluster_lock_pool : clusterlib .ClusterLib ,
60+ ) -> dbsync_queries .PoolDataDBRow :
61+ """Get id of locked pool from cluster_lock_pool fixture."""
62+ cluster_obj , pool_name = cluster_lock_pool
63+ pools_ids = cluster_obj .g_query .get_stake_pools ()
64+ locked_pool_number = pool_name .replace ("node-pool" , "" )
65+ pattern = re .compile (r'pool' + re .escape (locked_pool_number ) + r'(\D|$)' )
66+ pools = [next (dbsync_queries .query_pool_data (p )) for p in pools_ids ]
67+ locked_pool_data = next ((p for p in pools if pattern .search (p .metadata_url )), None )
68+ return locked_pool_data
69+
70+ @pytest .fixture (scope = "session" )
71+ def smash (
72+ self ,
73+ ) -> None | smash_utils .SmashClient :
74+ """Create SMASH client."""
75+ smash = smash_utils .get_client ()
76+ return smash
77+
78+ def test_fetch_pool_metadata (
79+ self ,
80+ locked_pool : dbsync_queries .PoolDataDBRow ,
81+ smash : smash_utils .SmashClient
82+ ):
83+ pool_id = locked_pool .view
84+
85+ # Offchain metadata is inserted into database few minutes after start of a cluster
86+ def _query_func ():
87+ pool_metadata = next (iter (dbsync_queries .query_off_chain_pool_data (pool_id )), None )
88+ assert pool_metadata != None , dbsync_utils .NO_RESPONSE_STR
89+ return pool_metadata
90+ metadata_dbsync = dbsync_utils .retry_query (query_func = _query_func , timeout = 360 )
91+
92+ expected_metadata = smash_utils .PoolMetadata (
93+ name = metadata_dbsync .json ["name" ],
94+ description = metadata_dbsync .json ["description" ],
95+ ticker = metadata_dbsync .ticker_name ,
96+ homepage = metadata_dbsync .json ["homepage" ]
97+ )
98+ actual_metadata = smash .get_pool_metadata (pool_id , metadata_dbsync .hash .hex ())
99+ assert expected_metadata == actual_metadata
100+
101+ def test_delist_pool (
102+ self ,
103+ locked_pool : dbsync_queries .PoolDataDBRow ,
104+ smash : smash_utils .SmashClient ,
105+ request : pytest .FixtureRequest ,
106+ worker_id : str ,
107+ ):
108+ pool_id = locked_pool .view
109+
110+ # Define and register function that ensures pool is re-enlisted after test completion
111+ def pool_cleanup ():
112+ smash .enlist_pool (pool_id )
113+ request .addfinalizer (pool_cleanup )
114+
115+ # Delist the pool
116+ pool_data = dbsync_utils .get_pool_data (pool_id )
117+ expected_delisted_pool = smash_utils .PoolData (pool_id = pool_data .hash )
118+ actual_delisted_pool = smash .delist_pool (pool_id )
119+ assert expected_delisted_pool == actual_delisted_pool
120+
121+ # Check if fetching metadata for a delisted pool returns an error
122+ try :
123+ smash .get_pool_metadata (pool_id , pool_data .metadata_hash )
124+ except requests .exceptions .RequestException as err :
125+ assert HTTPStatus .FORBIDDEN == err .response .status_code
126+ assert f"Pool { pool_data .hash } is delisted" == err .response .text
127+
128+ # Ignore expected errors in logs that would fail test in teardown phase
129+ err_msg = 'Delisted pool already exists!'
130+ expected_err_regexes = [err_msg ]
131+ logfiles .add_ignore_rule (
132+ files_glob = "smash.stdout" ,
133+ regex = "|" .join (expected_err_regexes ),
134+ ignore_file_id = worker_id ,
135+ )
136+ # Ensure re-delisting an already delisted pool returns an error
137+ try :
138+ smash .delist_pool (pool_id )
139+ except requests .exceptions .RequestException as err :
140+ check_request_error (err , HTTPStatus .BAD_REQUEST , "DbInsertError" , err_msg )
141+
142+ def test_enlist_pool (
143+ self ,
144+ locked_pool : dbsync_queries .PoolDataDBRow ,
145+ smash : smash_utils .SmashClient ,
146+ ):
147+ pool_id = locked_pool .view
148+
149+ # Ensure enlisting an already enlisted pool returns an error
150+ try :
151+ smash .enlist_pool (pool_id )
152+ except requests .exceptions .RequestException as err :
153+ check_request_error (err , HTTPStatus .NOT_FOUND , "RecordDoesNotExist" , "The requested record does not exist." )
154+
155+ # Delist the pool
156+ smash .delist_pool (pool_id )
157+ pool_data = dbsync_utils .get_pool_data (pool_id )
158+ try :
159+ smash .get_pool_metadata (pool_id , pool_data .metadata_hash )
160+ except requests .exceptions .RequestException as err :
161+ check_request_error (err , HTTPStatus .FORBIDDEN , None , f"Pool { pool_data .hash } is delisted" )
162+
163+ # Enlist pool
164+ actual_res_enlist = smash .enlist_pool (pool_id )
165+ expected_res_enlist = smash_utils .PoolData (pool_id = pool_data .hash )
166+ assert expected_res_enlist == actual_res_enlist
167+
168+ def test_reserve_ticker (
169+ self ,
170+ locked_pool : dbsync_queries .PoolDataDBRow ,
171+ smash : smash_utils .SmashClient ,
172+ request : pytest .FixtureRequest ,
173+ ):
174+ pool_id = locked_pool .view
175+
176+ # Register cleanup function that removes ticker from database after test completion
177+ request .addfinalizer (dbsync_queries .delete_reserved_pool_tickers )
178+
179+ # Reserve ticker
180+ ticker = helpers .get_rand_str (length = 3 )
181+ actual_response = smash .reserve_ticker (ticker_name = ticker , pool_hash = pool_id )
182+ expected_response = smash_utils .PoolTicker (name = f"{ ticker } " )
183+ assert expected_response == actual_response
184+
185+ # Reserve already taken ticker
186+ try :
187+ smash .reserve_ticker (ticker_name = ticker , pool_hash = pool_id )
188+ except requests .exceptions .RequestException as err :
189+ check_request_error (
190+ err , HTTPStatus .BAD_REQUEST , "TickerAlreadyReserved" ,
191+ f"Ticker name \" { ticker } \" is already reserved"
192+ )
0 commit comments