Skip to content

Commit e175f3e

Browse files
Add SMASH service support for testing
1 parent 8b0442e commit e175f3e

File tree

5 files changed

+353
-0
lines changed

5 files changed

+353
-0
lines changed
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
#!/usr/bin/env bash
2+
3+
set -uo pipefail
4+
5+
SOCKET_PATH="$(readlink -m "$CARDANO_NODE_SOCKET_PATH")"
6+
STATE_CLUSTER="${SOCKET_PATH%/*}"
7+
STATE_CLUSTER_NAME="${STATE_CLUSTER##*/}"
8+
INSTANCE_NUM="%%INSTANCE_NUM%%"
9+
10+
export PGPASSFILE="$STATE_CLUSTER/pgpass"
11+
export SMASH_ADMIN="${SMASH_ADMIN:-admin}"
12+
export SMASH_PASSWORD="${SMASH_PASSWORD:-password}"
13+
export SMASH_ADMINS_FILE="$STATE_CLUSTER/admins.txt"
14+
export SMASH_BASE_PORT="${SMASH_BASE_PORT:-31000}"
15+
export SMASH_PORT="$((SMASH_BASE_PORT + ${INSTANCE_NUM:-0}))"
16+
17+
echo "${SMASH_ADMIN}, ${SMASH_PASSWORD}" > "$SMASH_ADMINS_FILE"
18+
19+
exec "$DBSYNC_REPO/smash-server/bin/cardano-smash-server" --config "./$STATE_CLUSTER_NAME/dbsync-config.yaml" --port "$SMASH_PORT" --admins "$SMASH_ADMINS_FILE"

cardano_node_tests/cluster_scripts/conway_fast/start-cluster

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
# controlling environment variables:
44
# DBSYNC_REPO - will start and configure db-sync if the value is path to db-sync repository
5+
# SMASH - if set, will start and configure smash
56
# ENABLE_LEGACY - if set, local cluster will use legacy networking
67
# MIXED_P2P - if set, local cluster will use P2P for some nodes and legacy topology for others
78
# UTXO_BACKEND - 'mem' or 'disk', default is 'mem' (or legacy) if unset
@@ -206,6 +207,23 @@ startsecs=5
206207
EoF
207208
fi
208209

210+
# enable smash service
211+
if [ -n "${DBSYNC_REPO:-""}" ] && [ -n "${SMASH:-""}" ]; then
212+
[ -e "${DBSYNC_REPO}/smash-server/bin/cardano-smash-server" ] || \
213+
{ echo "The \`${DBSYNC_REPO}/smash-server/bin/cardano-smash-server\` not found, line $LINENO" >&2; exit 1; } # assert
214+
215+
cat >> "${STATE_CLUSTER}/supervisor.conf" <<EoF
216+
217+
[program:smash]
218+
command=${SCRIPT_DIR}/run-cardano-smash
219+
stderr_logfile=./${STATE_CLUSTER_NAME}/smash.stderr
220+
stdout_logfile=./${STATE_CLUSTER_NAME}/smash.stdout
221+
autostart=false
222+
autorestart=false
223+
startsecs=5
224+
EoF
225+
fi
226+
209227
# enable cardano-submit-api service
210228
if [ "$ENABLE_SUBMIT_API" -eq 1 ]; then
211229
cat >> "${STATE_CLUSTER}/supervisor.conf" <<EoF
@@ -625,6 +643,12 @@ if [ -n "${DBSYNC_REPO:-""}" ]; then
625643
supervisorctl -s "unix:///${SUPERVISORD_SOCKET_PATH}" start dbsync
626644
fi
627645

646+
# start smash
647+
if [ -n "${DBSYNC_REPO:-""}" ] && [ -n "${SMASH:-""}" ]; then
648+
echo "Starting smash"
649+
supervisorctl -s "unix:///${SUPERVISORD_SOCKET_PATH}" start smash
650+
fi
651+
628652
echo "Sleeping for initial Tx submission delay of $TX_SUBMISSION_DELAY seconds"
629653
sleep "$TX_SUBMISSION_DELAY"
630654

Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
"""Tests for basic SMASH operations."""
2+
3+
4+
import logging
5+
from http import HTTPStatus
6+
import pytest
7+
from cardano_clusterlib import clusterlib
8+
9+
from cardano_node_tests.utils import logfiles
10+
from cardano_node_tests.utils import configuration
11+
from cardano_node_tests.utils import dbsync_utils
12+
from cardano_node_tests.utils import dbsync_queries
13+
from cardano_node_tests.utils import smash_utils
14+
15+
LOGGER = logging.getLogger(__name__)
16+
17+
18+
@pytest.fixture(autouse=True)
19+
def check_smash_availability():
20+
"""Fixture to check SMASH availability before each test."""
21+
if not configuration.HAS_SMASH:
22+
pytest.skip("Skipping test because SMASH service is not available.")
23+
24+
25+
class TestBasicSmash:
26+
"""Basic tests for SMASH service."""
27+
28+
@pytest.fixture()
29+
def locked_pool(
30+
self,
31+
cluster_lock_pool: clusterlib.ClusterLib,
32+
) -> dbsync_queries.PoolDataDBRow:
33+
"""Get pool id from cluster with locked pool."""
34+
cluster_obj, pool_name = cluster_lock_pool
35+
pools_ids = cluster_obj.g_query.get_stake_pools()
36+
locked_pool_number = pool_name[-1]
37+
pools = [next(dbsync_queries.query_pool_data(p)) for p in pools_ids]
38+
locked_pool_data = next((item for item in pools if 'pool' + locked_pool_number in item.metadata_url), None)
39+
return locked_pool_data
40+
41+
@pytest.fixture(scope="session")
42+
def smash(
43+
self,
44+
) -> smash_utils.SmashClient:
45+
"""Create SMASH client."""
46+
smash = smash_utils.get_client()
47+
return smash
48+
49+
def test_fetch_pool_metadata(
50+
self,
51+
locked_pool: dbsync_queries.PoolDataDBRow,
52+
smash: smash_utils.SmashClient
53+
):
54+
pool_id = locked_pool.view
55+
56+
# Offchain metadata is inserted into database few minutes after start of a cluster
57+
def _query_func():
58+
pool_metadata = next(iter(dbsync_queries.query_off_chain_pool_data(pool_id)), None)
59+
assert pool_metadata != None, dbsync_utils.NO_RESPONSE_STR
60+
return pool_metadata
61+
metadata_dbsync = dbsync_utils.retry_query(query_func=_query_func, timeout=360)
62+
63+
expected_metadata = smash_utils.PoolMetadata(
64+
name=metadata_dbsync.json["name"],
65+
description=metadata_dbsync.json["description"],
66+
ticker=metadata_dbsync.ticker_name,
67+
homepage=metadata_dbsync.json["homepage"]
68+
)
69+
actual_metadata = smash.get_pool_metadata(pool_id, metadata_dbsync.hash.hex())
70+
assert expected_metadata == actual_metadata
71+
72+
def test_delist_pool(
73+
self,
74+
locked_pool: dbsync_queries.PoolDataDBRow,
75+
smash: smash_utils.SmashClient,
76+
request: pytest.FixtureRequest,
77+
worker_id: str,
78+
):
79+
pool_id = locked_pool.view
80+
81+
# Define and register function that ensures pool is re-enlisted after test
82+
def pool_cleanup():
83+
smash.enlist_pool(pool_id)
84+
request.addfinalizer(pool_cleanup)
85+
86+
# Delist the pool
87+
pool_data = dbsync_utils.get_pool_data(pool_id)
88+
expected_delisted_pool = smash_utils.PoolData(pool_id=pool_data.hash)
89+
actual_delisted_pool = smash.delist_pool(pool_id)
90+
assert expected_delisted_pool == actual_delisted_pool
91+
92+
# Check if fetching metadata for a delisted pool returns an error
93+
res_metadata = smash.get_pool_metadata(pool_id, pool_data.metadata_hash)
94+
assert isinstance(res_metadata, smash_utils.SmashError)
95+
assert HTTPStatus.FORBIDDEN == res_metadata.status_code
96+
assert f"Pool {pool_data.hash} is delisted" == res_metadata.message
97+
98+
# Ignore expected errors in logs that would fail test in teardown phase
99+
err_msg = 'Delisted pool already exists!'
100+
expected_err_regexes = [err_msg]
101+
logfiles.add_ignore_rule(
102+
files_glob="smash.stdout",
103+
regex="|".join(expected_err_regexes),
104+
ignore_file_id=worker_id,
105+
)
106+
# Ensure re-delisting an already delisted pool returns an error
107+
res_delist = smash.delist_pool(pool_id)
108+
assert isinstance(res_delist, smash_utils.SmashError)
109+
assert HTTPStatus.BAD_REQUEST == res_delist.status_code
110+
assert 'DbInsertError' == res_delist.message.get("code")
111+
assert err_msg == res_delist.message.get("description")
112+
113+
def test_enlist_pool(
114+
self,
115+
locked_pool: dbsync_queries.PoolDataDBRow,
116+
smash: smash_utils.SmashClient,
117+
):
118+
pool_id = locked_pool.view
119+
120+
# Ensure enlisting an already enlisted pool returns an error
121+
res_enlist = smash.enlist_pool(pool_id)
122+
assert isinstance(res_enlist, smash_utils.SmashError)
123+
assert HTTPStatus.NOT_FOUND == res_enlist.status_code
124+
assert 'RecordDoesNotExist' == res_enlist.message.get("code")
125+
assert 'The requested record does not exist.' == res_enlist.message.get("description")
126+
127+
# Delist the pool
128+
smash.delist_pool(pool_id)
129+
pool_data = dbsync_utils.get_pool_data(pool_id)
130+
res_metadata = smash.get_pool_metadata(pool_id, pool_data.metadata_hash)
131+
assert f"Pool {pool_data.hash} is delisted" == res_metadata.message
132+
133+
# Enlist pool
134+
actual_res_enlist = smash.enlist_pool(pool_id)
135+
expected_res_enlist = smash_utils.PoolData(pool_id=pool_data.hash)
136+
assert expected_res_enlist == actual_res_enlist

cardano_node_tests/utils/configuration.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,10 @@ def _check_cardano_node_socket_path() -> None:
3535
# See `cat /proc/sys/net/ipv4/ip_local_port_range`.
3636
PORTS_BASE = int(os.environ.get("PORTS_BASE") or 23000)
3737

38+
SMASH_BASE_PORT = int(os.environ.get("SMASH_BASE_PORT", "31000"))
39+
SMASH_ADMIN = os.environ.get("SMASH_ADMIN", "admin")
40+
SMASH_PASSWORD = os.environ.get("SMASH_PASSWORD", "password")
41+
3842
# Used also in startup scripts as `if [ -n "$VAR" ]...`
3943
ENABLE_LEGACY = (os.environ.get("ENABLE_LEGACY") or "") != ""
4044
# Used also in startup scripts as `if [ -n "$VAR" ]...`
@@ -46,6 +50,9 @@ def _check_cardano_node_socket_path() -> None:
4650
# Number of new blocks before the Tx is considered confirmed. Use default value if set to 0.
4751
CONFIRM_BLOCKS_NUM = int(os.environ.get("CONFIRM_BLOCKS_NUM") or 0)
4852

53+
# Used also in startup scripts as `if [ -n "$VAR" ]...`
54+
PV10 = (os.environ.get("PV10") or "") == ""
55+
4956
# Used also in startup scripts
5057
UTXO_BACKEND = os.environ.get("UTXO_BACKEND") or ""
5158
if UTXO_BACKEND not in ("", "mem", "disk"):
@@ -100,6 +107,14 @@ def _check_cardano_node_socket_path() -> None:
100107
else:
101108
DBSYNC_BIN = pl.Path("/nonexistent")
102109

110+
HAS_SMASH = HAS_DBSYNC and bool(os.environ.get("SMASH"))
111+
if HAS_SMASH:
112+
SMASH_BIN = (
113+
pl.Path(os.environ["DBSYNC_REPO"]).expanduser() / "smash-server" / "bin" / "cardano-smash-server"
114+
).resolve()
115+
else:
116+
SMASH_BIN = pl.Path("/nonexistent")
117+
103118
DONT_OVERWRITE_OUTFILES = bool(os.environ.get("DONT_OVERWRITE_OUTFILES"))
104119

105120
# Cluster instances are kept running after tests finish
Lines changed: 159 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,159 @@
1+
import logging
2+
import typing as tp
3+
from dataclasses import dataclass
4+
from functools import lru_cache
5+
6+
import requests
7+
from http import HTTPStatus
8+
from requests.auth import HTTPBasicAuth
9+
10+
from cardano_node_tests.utils import cluster_nodes
11+
from cardano_node_tests.utils import configuration
12+
13+
LOGGER = logging.getLogger(__name__)
14+
15+
16+
@dataclass(frozen=True, order=True)
17+
class PoolMetadata:
18+
name: str
19+
description: str
20+
ticker: str
21+
homepage: str
22+
23+
24+
@dataclass(frozen=True, order=True)
25+
class PoolData:
26+
pool_id: str
27+
28+
29+
@dataclass
30+
class SmashError:
31+
"""Standardized error response for API failures."""
32+
status_code: HTTPStatus
33+
message: str
34+
35+
def to_dict(self) -> tp.Dict[str, tp.Any]:
36+
return {"status_code": self.status_code, "message": self.message}
37+
38+
39+
class SmashClient:
40+
"""Utility class for interacting with SMASH via REST API."""
41+
42+
def __init__(self, instance_num: int) -> None:
43+
self.instance_num = instance_num
44+
self.port = configuration.SMASH_BASE_PORT + instance_num
45+
self.base_url = f"http://localhost:{self.port}"
46+
self.auth = self._get_auth()
47+
48+
def _get_auth(self) -> tp.Optional[HTTPBasicAuth]:
49+
"""Get Basic Auth credentials if configured."""
50+
admin = getattr(configuration, "SMASH_ADMIN", None)
51+
password = getattr(configuration, "SMASH_PASSWORD", None)
52+
return HTTPBasicAuth(admin, password) if admin and password else None
53+
54+
def get_pool_metadata(self, pool_id: str, pool_meta_hash: str) -> PoolMetadata | SmashError:
55+
"""Fetch stake pool metadata from SMASH, returning a `PoolMetadata` dataclass."""
56+
url = f"{self.base_url}/api/v1/metadata/{pool_id}/{pool_meta_hash}"
57+
try:
58+
response = requests.get(url, auth=self.auth)
59+
response.raise_for_status()
60+
data = response.json()
61+
# Ensure required fields exist before creating the dataclass
62+
if not all(key in data for key in ("name", "description", "ticker", "homepage")):
63+
return None
64+
return PoolMetadata(
65+
name=data["name"],
66+
description=data["description"],
67+
ticker=data["ticker"],
68+
homepage=data["homepage"]
69+
)
70+
71+
except requests.exceptions.RequestException as e:
72+
return SmashError(
73+
status_code=e.response.status_code,
74+
message=e.response.text
75+
)
76+
77+
def delist_pool(self, pool_id: str) -> PoolData | SmashError:
78+
"""Delist a stake pool, returning PoolData on success or a SmashError on failure."""
79+
url = f"{self.base_url}/api/v1/delist"
80+
81+
try:
82+
response = requests.patch(url, json={"poolId": pool_id}, auth=self.auth)
83+
response.raise_for_status()
84+
data = response.json()
85+
return PoolData(pool_id=data["poolId"])
86+
87+
except requests.exceptions.RequestException as err:
88+
return SmashError(
89+
status_code=err.response.status_code,
90+
message=err.response.json()
91+
)
92+
93+
def enlist_pool(self, pool_id: str) -> bool:
94+
"""Enlist a stake pool, returning PoolData on success or a SmashError on failure."""
95+
url = f"{self.base_url}/api/v1/enlist"
96+
try:
97+
response = requests.patch(url, json={"poolId": pool_id}, auth=self.auth)
98+
response.raise_for_status()
99+
data = response.json()
100+
return PoolData(pool_id=data["poolId"])
101+
except requests.exceptions.RequestException as err:
102+
return SmashError(
103+
status_code=err.response.status_code,
104+
message=err.response.json()
105+
)
106+
107+
def reserve_ticker(self, ticker_name: str, pool_hash: str) -> bool:
108+
"""Reserve a ticker for a stake pool."""
109+
url = f"{self.base_url}/api/v1/tickers/{ticker_name}"
110+
try:
111+
response = requests.post(url, json={"poolHash": pool_hash}, auth=self.auth)
112+
response.raise_for_status()
113+
return True
114+
except requests.exceptions.RequestException as err:
115+
LOGGER.warning(f"Failed to reserve ticker {ticker_name}: {err}")
116+
return False
117+
118+
def get_pool_errors(self, pool_id: str, from_date: tp.Optional[str] = None) -> tp.Optional[list]:
119+
"""Fetch errors for a specific stake pool."""
120+
url = f"{self.base_url}/api/v1/errors/{pool_id}"
121+
params = {"fromDate": from_date} if from_date else None
122+
try:
123+
response = requests.get(url, params=params, auth=self.auth)
124+
response.raise_for_status()
125+
return response.json()
126+
except requests.exceptions.RequestException as err:
127+
LOGGER.warning(f"Failed to fetch errors for pool {pool_id}: {err}")
128+
return None
129+
130+
def get_retired_pools(self) -> tp.Optional[list]:
131+
"""Fetch list of retired pools."""
132+
url = f"{self.base_url}/api/v1/retired"
133+
try:
134+
response = requests.get(url, auth=self.auth)
135+
response.raise_for_status()
136+
return response.json()
137+
except requests.exceptions.RequestException as err:
138+
LOGGER.warning(f"Failed to fetch retired pools: {err}")
139+
return None
140+
141+
142+
class SmashManager:
143+
144+
instances: tp.ClassVar[dict[int, SmashClient]] = {}
145+
146+
@classmethod
147+
def get_smash_instance(cls) -> SmashClient:
148+
"""Return a singleton instance of `SmashClient` for the given cluster instance."""
149+
instance_num = cluster_nodes.get_instance_num()
150+
if instance_num not in cls.instances:
151+
cls.instances[instance_num] = SmashClient(instance_num)
152+
return cls.instances[instance_num]
153+
154+
155+
def get_client() -> SmashClient | None:
156+
"""Global access to the SMASH client singleton."""
157+
if not configuration.HAS_SMASH:
158+
return None
159+
return SmashManager.get_smash_instance()

0 commit comments

Comments
 (0)