1
1
import asyncio
2
- import time
3
2
from unittest .mock import AsyncMock , MagicMock , ANY
4
3
5
4
import pytest
6
- from scalecodec import ss58_encode
7
5
from websockets .exceptions import InvalidURI
8
6
9
7
from async_substrate_interface .async_substrate import AsyncSubstrateInterface
10
8
from async_substrate_interface .types import ScaleObj
11
- from tests .helpers .settings import ARCHIVE_ENTRYPOINT , LATENT_LITE_ENTRYPOINT
12
9
13
10
14
11
@pytest .mark .asyncio
@@ -102,7 +99,7 @@ async def test_runtime_call(monkeypatch):
102
99
@pytest .mark .asyncio
103
100
async def test_websocket_shutdown_timer ():
104
101
# using default ws shutdown timer of 5.0 seconds
105
- async with AsyncSubstrateInterface (LATENT_LITE_ENTRYPOINT ) as substrate :
102
+ async with AsyncSubstrateInterface ("wss://lite.sub.latent.to:443" ) as substrate :
106
103
await substrate .get_chain_head ()
107
104
await asyncio .sleep (6 )
108
105
assert (
@@ -116,112 +113,3 @@ async def test_websocket_shutdown_timer():
116
113
await substrate .get_chain_head ()
117
114
await asyncio .sleep (6 ) # same sleep time as before
118
115
assert substrate .ws ._initialized is True # connection should still be open
119
-
120
-
121
- @pytest .mark .asyncio
122
- async def test_legacy_decoding ():
123
- # roughly 4000 blocks before metadata v15 was added
124
- pre_metadata_v15_block = 3_010_611
125
-
126
- async with AsyncSubstrateInterface (ARCHIVE_ENTRYPOINT ) as substrate :
127
- block_hash = await substrate .get_block_hash (pre_metadata_v15_block )
128
- events = await substrate .get_events (block_hash )
129
- assert isinstance (events , list )
130
-
131
- query_map_result = await substrate .query_map (
132
- module = "SubtensorModule" ,
133
- storage_function = "NetworksAdded" ,
134
- block_hash = block_hash ,
135
- )
136
- async for key , value in query_map_result :
137
- assert isinstance (key , int )
138
- assert isinstance (value , ScaleObj )
139
-
140
- timestamp = await substrate .query (
141
- "Timestamp" ,
142
- "Now" ,
143
- block_hash = block_hash ,
144
- )
145
- assert timestamp .value == 1716358476004
146
-
147
-
148
- @pytest .mark .asyncio
149
- async def test_ss58_conversion ():
150
- async with AsyncSubstrateInterface (
151
- LATENT_LITE_ENTRYPOINT , ss58_format = 42 , decode_ss58 = False
152
- ) as substrate :
153
- block_hash = await substrate .get_chain_finalised_head ()
154
- qm = await substrate .query_map (
155
- "SubtensorModule" ,
156
- "OwnedHotkeys" ,
157
- block_hash = block_hash ,
158
- )
159
- # only do the first page, bc otherwise this will be massive
160
- for key , value in qm .records :
161
- assert isinstance (key , tuple )
162
- assert isinstance (value , ScaleObj )
163
- assert isinstance (value .value , list )
164
- assert len (key ) == 1
165
- for key_tuple in value .value :
166
- assert len (key_tuple [0 ]) == 32
167
- random_key = key_tuple [0 ]
168
-
169
- ss58_of_key = ss58_encode (bytes (random_key ), substrate .ss58_format )
170
- assert isinstance (ss58_of_key , str )
171
-
172
- substrate .decode_ss58 = True # change to decoding True
173
-
174
- qm = await substrate .query_map (
175
- "SubtensorModule" ,
176
- "OwnedHotkeys" ,
177
- block_hash = block_hash ,
178
- )
179
- for key , value in qm .records :
180
- assert isinstance (key , str )
181
- assert isinstance (value , ScaleObj )
182
- assert isinstance (value .value , list )
183
- if len (value .value ) > 0 :
184
- for decoded_key in value .value :
185
- assert isinstance (decoded_key , str )
186
-
187
-
188
- @pytest .mark .asyncio
189
- async def test_fully_exhaust_query_map ():
190
- async with AsyncSubstrateInterface (LATENT_LITE_ENTRYPOINT ) as substrate :
191
- block_hash = await substrate .get_chain_finalised_head ()
192
- non_fully_exhauster_start = time .time ()
193
- non_fully_exhausted_qm = await substrate .query_map (
194
- "SubtensorModule" ,
195
- "CRV3WeightCommits" ,
196
- block_hash = block_hash ,
197
- )
198
- initial_records_count = len (non_fully_exhausted_qm .records )
199
- assert initial_records_count <= 100 # default page size
200
- exhausted_records_count = 0
201
- async for _ in non_fully_exhausted_qm :
202
- exhausted_records_count += 1
203
- non_fully_exhausted_time = time .time () - non_fully_exhauster_start
204
-
205
- assert len (non_fully_exhausted_qm .records ) >= initial_records_count
206
- fully_exhausted_start = time .time ()
207
- fully_exhausted_qm = await substrate .query_map (
208
- "SubtensorModule" ,
209
- "CRV3WeightCommits" ,
210
- block_hash = block_hash ,
211
- fully_exhaust = True ,
212
- )
213
-
214
- fully_exhausted_time = time .time () - fully_exhausted_start
215
- initial_records_count_fully_exhaust = len (fully_exhausted_qm .records )
216
- assert fully_exhausted_time <= non_fully_exhausted_time , (
217
- f"Fully exhausted took longer than non-fully exhausted with "
218
- f"{ len (non_fully_exhausted_qm .records )} records in non-fully exhausted "
219
- f"in { non_fully_exhausted_time } seconds, and { initial_records_count_fully_exhaust } in fully exhausted"
220
- f" in { fully_exhausted_time } seconds. This could be caused by the fact that on this specific block, "
221
- f"there are fewer records than take up a single page. This difference should still be small."
222
- )
223
- fully_exhausted_records_count = 0
224
- async for _ in fully_exhausted_qm :
225
- fully_exhausted_records_count += 1
226
- assert fully_exhausted_records_count == initial_records_count_fully_exhaust
227
- assert initial_records_count_fully_exhaust == exhausted_records_count
0 commit comments