1+ """Tests for basic SMASH operations."""
2+
3+ import logging
4+ import re
5+ from http import HTTPStatus
6+
7+ import pytest
8+ import requests
9+ from cardano_clusterlib import clusterlib
10+
11+ from cardano_node_tests .utils import configuration
12+ from cardano_node_tests .utils import dbsync_queries
13+ from cardano_node_tests .utils import dbsync_utils
14+ from cardano_node_tests .utils import helpers
15+ from cardano_node_tests .utils import logfiles
16+ from cardano_node_tests .utils import smash_utils
17+
18+ LOGGER = logging .getLogger (__name__ )
19+
20+
21+ @pytest .fixture (autouse = True )
22+ def check_smash_availability () -> None :
23+ """Fixture to check SMASH availability before each test."""
24+ if not configuration .HAS_SMASH :
25+ pytest .skip ("Skipping test because SMASH service is not available." )
26+
27+
28+ def check_request_error (
29+ err : requests .exceptions .RequestException ,
30+ expected_status : HTTPStatus ,
31+ expected_code : str | None ,
32+ expected_description : str ,
33+ ) -> None :
34+ """Assert expected HTTP errors in requests, handling both JSON and text responses."""
35+ response = err .response
36+ assert response .status_code == expected_status
37+
38+ try :
39+ error_data = response .json ()
40+ actual_code = error_data .get ("code" )
41+ actual_description = error_data .get ("description" )
42+ except ValueError :
43+ # If not JSON, treat the entire response as text
44+ actual_code = None
45+ actual_description = response .text .strip ()
46+
47+ if expected_code :
48+ assert actual_code == expected_code
49+
50+ assert actual_description == expected_description
51+
52+
53+ class TestBasicSmash :
54+ """Basic tests for SMASH service."""
55+
56+ @pytest .fixture ()
57+ def locked_pool (
58+ self ,
59+ cluster_lock_pool : tuple [clusterlib .ClusterLib , str ],
60+ ) -> dbsync_queries .PoolDataDBRow :
61+ """Get id of locked pool from cluster_lock_pool fixture."""
62+ cluster_obj , pool_name = cluster_lock_pool
63+ pools_ids = cluster_obj .g_query .get_stake_pools ()
64+ locked_pool_number = pool_name .replace ("node-pool" , "" )
65+ pattern = re .compile (r"pool" + re .escape (locked_pool_number ) + r"(\D|$)" )
66+ pools = [next (dbsync_queries .query_pool_data (p )) for p in pools_ids ]
67+ locked_pool_data = next ((p for p in pools if pattern .search (p .metadata_url )), None )
68+ return locked_pool_data
69+
70+ @pytest .fixture (scope = "session" )
71+ def smash (
72+ self ,
73+ ) -> None | smash_utils .SmashClient :
74+ """Create SMASH client."""
75+ smash = smash_utils .get_client ()
76+ return smash
77+
78+ def test_fetch_pool_metadata (
79+ self , locked_pool : dbsync_queries .PoolDataDBRow , smash : smash_utils .SmashClient
80+ ):
81+ pool_id = locked_pool .view
82+
83+ # Offchain metadata is inserted into database few minutes after start of a cluster
84+ def _query_func ():
85+ pool_metadata = next (iter (dbsync_queries .query_off_chain_pool_data (pool_id )), None )
86+ assert pool_metadata is not None , dbsync_utils .NO_RESPONSE_STR
87+ return pool_metadata
88+
89+ metadata_dbsync = dbsync_utils .retry_query (query_func = _query_func , timeout = 360 )
90+
91+ expected_metadata = smash_utils .PoolMetadata (
92+ name = metadata_dbsync .json ["name" ],
93+ description = metadata_dbsync .json ["description" ],
94+ ticker = metadata_dbsync .ticker_name ,
95+ homepage = metadata_dbsync .json ["homepage" ],
96+ )
97+ actual_metadata = smash .get_pool_metadata (pool_id , metadata_dbsync .hash .hex ())
98+ assert expected_metadata == actual_metadata
99+
100+ def test_delist_pool (
101+ self ,
102+ locked_pool : dbsync_queries .PoolDataDBRow ,
103+ smash : smash_utils .SmashClient ,
104+ request : pytest .FixtureRequest ,
105+ worker_id : str ,
106+ ):
107+ pool_id = locked_pool .view
108+
109+ # Define and register function that ensures pool is re-enlisted after test completion
110+ def pool_cleanup ():
111+ smash .enlist_pool (pool_id )
112+
113+ request .addfinalizer (pool_cleanup )
114+
115+ # Delist the pool
116+ pool_data = dbsync_utils .get_pool_data (pool_id )
117+ expected_delisted_pool = smash_utils .PoolData (pool_id = pool_data .hash )
118+ actual_delisted_pool = smash .delist_pool (pool_id )
119+ assert expected_delisted_pool == actual_delisted_pool
120+
121+ # Check if fetching metadata for a delisted pool returns an error
122+ try :
123+ smash .get_pool_metadata (pool_id , pool_data .metadata_hash )
124+ except requests .exceptions .RequestException as err :
125+ check_request_error (
126+ err , HTTPStatus .FORBIDDEN , None , f"Pool { pool_data .hash } is delisted"
127+ )
128+
129+ # Ignore expected errors in logs that would fail test in teardown phase
130+ err_msg = "Delisted pool already exists!"
131+ expected_err_regexes = [err_msg ]
132+ logfiles .add_ignore_rule (
133+ files_glob = "smash.stdout" ,
134+ regex = "|" .join (expected_err_regexes ),
135+ ignore_file_id = worker_id ,
136+ )
137+ # Ensure re-delisting an already delisted pool returns an error
138+ try :
139+ smash .delist_pool (pool_id )
140+ except requests .exceptions .RequestException as err :
141+ check_request_error (err , HTTPStatus .BAD_REQUEST , "DbInsertError" , err_msg )
142+
143+ def test_enlist_pool (
144+ self ,
145+ locked_pool : dbsync_queries .PoolDataDBRow ,
146+ smash : smash_utils .SmashClient ,
147+ ):
148+ pool_id = locked_pool .view
149+ pool_data = dbsync_utils .get_pool_data (pool_id )
150+ if pool_data is None :
151+ raise ValueError (f"Pool data not found for pool_id: { pool_id } " )
152+
153+ # Ensure enlisting an already enlisted pool returns an error
154+ try :
155+ smash .enlist_pool (pool_id )
156+ except requests .exceptions .RequestException as err :
157+ check_request_error (
158+ err ,
159+ HTTPStatus .NOT_FOUND ,
160+ "RecordDoesNotExist" ,
161+ "The requested record does not exist." ,
162+ )
163+
164+ # Delist the pool
165+ smash .delist_pool (pool_id )
166+ try :
167+ smash .get_pool_metadata (pool_id , pool_data .metadata_hash )
168+ except requests .exceptions .RequestException as err :
169+ check_request_error (
170+ err , HTTPStatus .FORBIDDEN , None , f"Pool { pool_data .hash } is delisted"
171+ )
172+
173+ # Enlist the pool
174+ actual_res_enlist = smash .enlist_pool (pool_id )
175+ expected_res_enlist = smash_utils .PoolData (pool_id = pool_data .hash )
176+ assert expected_res_enlist == actual_res_enlist
177+
178+ def test_reserve_ticker (
179+ self ,
180+ locked_pool : dbsync_queries .PoolDataDBRow ,
181+ smash : smash_utils .SmashClient ,
182+ request : pytest .FixtureRequest ,
183+ ):
184+ pool_id = locked_pool .view
185+
186+ # Register cleanup function that removes ticker from database after test completion
187+ request .addfinalizer (dbsync_queries .delete_reserved_pool_tickers )
188+
189+ # Reserve ticker
190+ ticker = helpers .get_rand_str (length = 3 )
191+ actual_response = smash .reserve_ticker (ticker_name = ticker , pool_hash = pool_id )
192+ expected_response = smash_utils .PoolTicker (name = f"{ ticker } " )
193+ assert expected_response == actual_response
194+
195+ # Reserve already taken ticker
196+ try :
197+ smash .reserve_ticker (ticker_name = ticker , pool_hash = pool_id )
198+ except requests .exceptions .RequestException as err :
199+ check_request_error (
200+ err ,
201+ HTTPStatus .BAD_REQUEST ,
202+ "TickerAlreadyReserved" ,
203+ f'Ticker name "{ ticker } " is already reserved' ,
204+ )
0 commit comments