Skip to content

Commit dc04fda

Browse files
committed
Merge branch 'main' into farmer_solver_networking
2 parents ea2ee80 + 61034b5 commit dc04fda

19 files changed

+658
-600
lines changed

chia/_tests/cmds/cmd_test_utils.py

Lines changed: 1 addition & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
from chia.full_node.full_node_rpc_client import FullNodeRpcClient
2323
from chia.rpc.rpc_client import RpcClient
2424
from chia.simulator.simulator_full_node_rpc_client import SimulatorFullNodeRpcClient
25-
from chia.types.coin_record import CoinRecord
2625
from chia.types.signing_mode import SigningMode
2726
from chia.util.bech32m import encode_puzzle_hash
2827
from chia.util.config import load_config
@@ -31,7 +30,7 @@
3130
from chia.wallet.nft_wallet.nft_wallet import NFTWallet
3231
from chia.wallet.transaction_record import TransactionRecord
3332
from chia.wallet.util.transaction_type import TransactionType
34-
from chia.wallet.util.tx_config import CoinSelectionConfig, TXConfig
33+
from chia.wallet.util.tx_config import TXConfig
3534
from chia.wallet.util.wallet_types import WalletType
3635
from chia.wallet.wallet_request_types import (
3736
GetSyncStatusResponse,
@@ -232,46 +231,6 @@ async def nft_calculate_royalties(
232231
)
233232
)
234233

235-
async def get_spendable_coins(
236-
self,
237-
wallet_id: int,
238-
coin_selection_config: CoinSelectionConfig,
239-
) -> tuple[list[CoinRecord], list[CoinRecord], list[Coin]]:
240-
"""
241-
We return a tuple containing: (confirmed records, unconfirmed removals, unconfirmed additions)
242-
"""
243-
self.add_to_log(
244-
"get_spendable_coins",
245-
(wallet_id, coin_selection_config),
246-
)
247-
confirmed_records = [
248-
CoinRecord(
249-
Coin(bytes32([1] * 32), bytes32([2] * 32), uint64(1234560000)),
250-
uint32(123456),
251-
uint32(0),
252-
False,
253-
uint64(0),
254-
),
255-
CoinRecord(
256-
Coin(bytes32([3] * 32), bytes32([4] * 32), uint64(1234560000)),
257-
uint32(123456),
258-
uint32(0),
259-
False,
260-
uint64(0),
261-
),
262-
]
263-
unconfirmed_removals = [
264-
CoinRecord(
265-
Coin(bytes32([5] * 32), bytes32([6] * 32), uint64(1234570000)),
266-
uint32(123457),
267-
uint32(0),
268-
True,
269-
uint64(0),
270-
)
271-
]
272-
unconfirmed_additions = [Coin(bytes32([7] * 32), bytes32([8] * 32), uint64(1234580000))]
273-
return confirmed_records, unconfirmed_removals, unconfirmed_additions
274-
275234
async def send_transaction_multi(
276235
self,
277236
wallet_id: int,

chia/_tests/core/full_node/test_full_node.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3294,3 +3294,62 @@ def compare_unfinished_blocks(block1: UnfinishedBlock, block2: UnfinishedBlock)
32943294
# Final assertion to check the entire block
32953295
assert block1 == block2, "The entire block objects are not identical"
32963296
return True
3297+
3298+
3299+
@pytest.mark.anyio
3300+
@pytest.mark.parametrize(
3301+
"condition, error",
3302+
[
3303+
(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, "ASSERT_HEIGHT_RELATIVE_FAILED"),
3304+
(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, "ASSERT_HEIGHT_ABSOLUTE_FAILED"),
3305+
],
3306+
)
3307+
async def test_pending_tx_cache_retry_on_new_peak(
3308+
condition: ConditionOpcode, error: str, blockchain_constants: ConsensusConstants, caplog: pytest.LogCaptureFixture
3309+
) -> None:
3310+
"""
3311+
Covers PendingTXCache items that are placed there due to unmet relative or
3312+
absolute height conditions, to make sure those items get retried at peak
3313+
post processing when those conditions are met.
3314+
"""
3315+
async with setup_simulators_and_wallets(1, 0, blockchain_constants) as new:
3316+
full_node_api = new.simulators[0].peer_api
3317+
bt = new.bt
3318+
wallet = WalletTool(test_constants)
3319+
ph = wallet.get_new_puzzlehash()
3320+
blocks = bt.get_consecutive_blocks(
3321+
3, guarantee_transaction_block=True, farmer_reward_puzzle_hash=ph, pool_reward_puzzle_hash=ph
3322+
)
3323+
for block in blocks:
3324+
await full_node_api.full_node.add_block(block)
3325+
peak = full_node_api.full_node.blockchain.get_peak()
3326+
assert peak is not None
3327+
current_height = peak.height
3328+
# Create a transaction with a height condition that makes it pending
3329+
coin = blocks[-1].get_included_reward_coins()[0]
3330+
if condition == ConditionOpcode.ASSERT_HEIGHT_RELATIVE:
3331+
condition_height = 1
3332+
else:
3333+
condition_height = current_height + 1
3334+
condition_dic = {condition: [ConditionWithArgs(condition, [int_to_bytes(condition_height)])]}
3335+
sb = wallet.generate_signed_transaction(uint64(42), ph, coin, condition_dic)
3336+
sb_name = sb.name()
3337+
# Send the transaction
3338+
res = await full_node_api.send_transaction(SendTransaction(sb))
3339+
assert res is not None
3340+
assert ProtocolMessageTypes(res.type) == ProtocolMessageTypes.transaction_ack
3341+
transaction_ack = TransactionAck.from_bytes(res.data)
3342+
assert transaction_ack.status == MempoolInclusionStatus.PENDING.value
3343+
assert transaction_ack.error == error
3344+
# Make sure it ends up in the pending cache, not the mempool
3345+
assert full_node_api.full_node.mempool_manager.get_mempool_item(sb_name, include_pending=False) is None
3346+
assert full_node_api.full_node.mempool_manager.get_mempool_item(sb_name, include_pending=True) is not None
3347+
# Advance peak to meet the asserted height condition
3348+
with caplog.at_level(logging.DEBUG):
3349+
blocks = bt.get_consecutive_blocks(2, block_list_input=blocks, guarantee_transaction_block=True)
3350+
for block in blocks:
3351+
await full_node_api.full_node.add_block(block)
3352+
# This should trigger peak post processing with the added transaction
3353+
assert f"Added transaction to mempool: {sb_name}\n" in caplog.text
3354+
# Make sure the transaction was retried and got added to the mempool
3355+
assert full_node_api.full_node.mempool_manager.get_mempool_item(sb_name, include_pending=False) is not None

chia/_tests/core/mempool/test_mempool_manager.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
QUOTE_BYTES,
4242
QUOTE_EXECUTION_COST,
4343
MempoolManager,
44-
NewPeakItem,
4544
TimelockConditions,
4645
can_replace,
4746
check_removals,
@@ -3288,7 +3287,7 @@ async def test_new_peak_txs_added(condition_and_error: tuple[ConditionOpcode, Er
32883287
assert mempool_manager.peak is not None
32893288
condition_height = mempool_manager.peak.height + 1
32903289
condition, expected_error = condition_and_error
3291-
sb, sb_name, result = await generate_and_add_spendbundle(mempool_manager, [[condition, condition_height]])
3290+
_, sb_name, result = await generate_and_add_spendbundle(mempool_manager, [[condition, condition_height]])
32923291
_, status, error = result
32933292
assert status == MempoolInclusionStatus.PENDING
32943293
assert error == expected_error
@@ -3299,14 +3298,13 @@ async def test_new_peak_txs_added(condition_and_error: tuple[ConditionOpcode, Er
32993298
create_test_block_record(height=uint32(condition_height)), spent_coins
33003299
)
33013300
# We're not there yet (needs to be higher, not equal)
3301+
assert new_peak_info.spend_bundle_ids == []
33023302
assert mempool_manager.get_mempool_item(sb_name, include_pending=False) is None
3303-
assert new_peak_info.items == []
33043303
else:
33053304
spent_coins = None
33063305
new_peak_info = await mempool_manager.new_peak(
33073306
create_test_block_record(height=uint32(condition_height + 1)), spent_coins
33083307
)
33093308
# The item gets retried successfully now
3310-
mi = mempool_manager.get_mempool_item(sb_name, include_pending=False)
3311-
assert mi is not None
3312-
assert new_peak_info.items == [NewPeakItem(sb_name, sb, mi.conds)]
3309+
assert new_peak_info.spend_bundle_ids == [sb_name]
3310+
assert mempool_manager.get_mempool_item(sb_name, include_pending=False) is not None

chia/_tests/core/server/test_rate_limits.py

Lines changed: 18 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from __future__ import annotations
22

33
from dataclasses import dataclass
4-
from typing import Any, cast
4+
from typing import Union
55

66
import pytest
77
from chia_rs.sized_ints import uint32
@@ -13,8 +13,7 @@
1313
from chia.protocols.outbound_message import make_msg
1414
from chia.protocols.protocol_message_types import ProtocolMessageTypes
1515
from chia.protocols.shared_protocol import Capability
16-
from chia.server.rate_limit_numbers import RLSettings, compose_rate_limits, get_rate_limits_to_use
17-
from chia.server.rate_limit_numbers import rate_limits as rl_numbers
16+
from chia.server.rate_limit_numbers import RLSettings, Unlimited, get_rate_limits_to_use
1817
from chia.server.rate_limits import RateLimiter
1918
from chia.server.server import ChiaServer
2019
from chia.server.ws_connection import WSChiaConnection
@@ -67,39 +66,22 @@ async def test_limits_v2(incoming: bool, tx_msg: bool, limit_size: bool, monkeyp
6766
message_data = b"\0" * 1024
6867
msg_type = ProtocolMessageTypes.new_transaction
6968

70-
limits: dict[str, Any] = {}
69+
limits: dict[ProtocolMessageTypes, Union[RLSettings, Unlimited]]
7170

7271
if limit_size:
73-
limits.update(
74-
{
75-
# this is the rate limit across all (non-tx) messages
76-
"non_tx_freq": count * 2,
77-
# this is the byte size limit across all (non-tx) messages
78-
"non_tx_max_total_size": count * len(message_data),
79-
}
80-
)
72+
agg_limit = RLSettings(False, count * 2, len(message_data), count * len(message_data))
8173
else:
82-
limits.update(
83-
{
84-
# this is the rate limit across all (non-tx) messages
85-
"non_tx_freq": count,
86-
# this is the byte size limit across all (non-tx) messages
87-
"non_tx_max_total_size": count * 2 * len(message_data),
88-
}
89-
)
74+
agg_limit = RLSettings(False, count, len(message_data), count * 2 * len(message_data))
9075

9176
if limit_size:
92-
rate_limit = {msg_type: RLSettings(count * 2, 1024, count * len(message_data))}
77+
limits = {msg_type: RLSettings(not tx_msg, count * 2, len(message_data), count * len(message_data))}
9378
else:
94-
rate_limit = {msg_type: RLSettings(count, 1024, count * 2 * len(message_data))}
79+
limits = {msg_type: RLSettings(not tx_msg, count, len(message_data), count * 2 * len(message_data))}
9580

96-
if tx_msg:
97-
limits.update({"rate_limits_tx": rate_limit, "rate_limits_other": {}})
98-
else:
99-
limits.update({"rate_limits_other": rate_limit, "rate_limits_tx": {}})
100-
101-
def mock_get_limits(our_capabilities: list[Capability], peer_capabilities: list[Capability]) -> dict[str, Any]:
102-
return limits
81+
def mock_get_limits(
82+
our_capabilities: list[Capability], peer_capabilities: list[Capability]
83+
) -> tuple[dict[ProtocolMessageTypes, Union[RLSettings, Unlimited]], RLSettings]:
84+
return limits, agg_limit
10385

10486
import chia.server.rate_limits
10587

@@ -327,18 +309,18 @@ async def test_too_many_outgoing_messages() -> None:
327309
# Too many messages
328310
r = RateLimiter(incoming=False, get_time=lambda: 0)
329311
new_peers_message = make_msg(ProtocolMessageTypes.respond_peers, bytes([1]))
330-
non_tx_freq = get_rate_limits_to_use(rl_v2, rl_v2)["non_tx_freq"]
312+
_, agg_limit = get_rate_limits_to_use(rl_v2, rl_v2)
331313

332314
passed = 0
333315
blocked = 0
334-
for i in range(non_tx_freq):
316+
for i in range(agg_limit.frequency):
335317
if r.process_msg_and_check(new_peers_message, rl_v2, rl_v2) is None:
336318
passed += 1
337319
else:
338320
blocked += 1
339321

340322
assert passed == 10
341-
assert blocked == non_tx_freq - passed
323+
assert blocked == agg_limit.frequency - passed
342324

343325
# ensure that *another* message type is not blocked because of this
344326

@@ -351,18 +333,18 @@ async def test_too_many_incoming_messages() -> None:
351333
# Too many messages
352334
r = RateLimiter(incoming=True, get_time=lambda: 0)
353335
new_peers_message = make_msg(ProtocolMessageTypes.respond_peers, bytes([1]))
354-
non_tx_freq = get_rate_limits_to_use(rl_v2, rl_v2)["non_tx_freq"]
336+
_, agg_limit = get_rate_limits_to_use(rl_v2, rl_v2)
355337

356338
passed = 0
357339
blocked = 0
358-
for i in range(non_tx_freq):
340+
for i in range(agg_limit.frequency):
359341
if r.process_msg_and_check(new_peers_message, rl_v2, rl_v2) is None:
360342
passed += 1
361343
else:
362344
blocked += 1
363345

364346
assert passed == 10
365-
assert blocked == non_tx_freq - passed
347+
assert blocked == agg_limit.frequency - passed
366348

367349
# ensure that other message types *are* blocked because of this
368350

@@ -436,43 +418,13 @@ async def test_different_versions(
436418
# The following code checks whether all of the runs resulted in the same number of items in "rate_limits_tx",
437419
# which would mean the same rate limits are always used. This should not happen, since two nodes with V2
438420
# will use V2.
439-
total_tx_msg_count = len(
440-
get_rate_limits_to_use(a_con.local_capabilities, a_con.peer_capabilities)["rate_limits_tx"]
441-
)
421+
total_tx_msg_count = len(get_rate_limits_to_use(a_con.local_capabilities, a_con.peer_capabilities))
442422

443423
test_different_versions_results.append(total_tx_msg_count)
444424
if len(test_different_versions_results) >= 4:
445425
assert len(set(test_different_versions_results)) >= 2
446426

447427

448-
@pytest.mark.anyio
449-
async def test_compose() -> None:
450-
rl_1 = rl_numbers[1]
451-
rl_2 = rl_numbers[2]
452-
rl_1_rate_limits_other = cast(dict[ProtocolMessageTypes, RLSettings], rl_1["rate_limits_other"])
453-
rl_2_rate_limits_other = cast(dict[ProtocolMessageTypes, RLSettings], rl_2["rate_limits_other"])
454-
rl_1_rate_limits_tx = cast(dict[ProtocolMessageTypes, RLSettings], rl_1["rate_limits_tx"])
455-
rl_2_rate_limits_tx = cast(dict[ProtocolMessageTypes, RLSettings], rl_2["rate_limits_tx"])
456-
assert ProtocolMessageTypes.respond_children in rl_1_rate_limits_other
457-
assert ProtocolMessageTypes.respond_children not in rl_1_rate_limits_tx
458-
assert ProtocolMessageTypes.respond_children not in rl_2_rate_limits_other
459-
assert ProtocolMessageTypes.respond_children in rl_2_rate_limits_tx
460-
461-
assert ProtocolMessageTypes.request_block in rl_1_rate_limits_other
462-
assert ProtocolMessageTypes.request_block not in rl_1_rate_limits_tx
463-
assert ProtocolMessageTypes.request_block not in rl_2_rate_limits_other
464-
assert ProtocolMessageTypes.request_block not in rl_2_rate_limits_tx
465-
466-
comps = compose_rate_limits(rl_1, rl_2)
467-
# v2 limits are used if present
468-
assert ProtocolMessageTypes.respond_children not in comps["rate_limits_other"]
469-
assert ProtocolMessageTypes.respond_children in comps["rate_limits_tx"]
470-
471-
# Otherwise, fall back to v1
472-
assert ProtocolMessageTypes.request_block in rl_1_rate_limits_other
473-
assert ProtocolMessageTypes.request_block not in rl_1_rate_limits_tx
474-
475-
476428
@pytest.mark.anyio
477429
@pytest.mark.parametrize(
478430
"msg_type, size",

chia/_tests/util/test_network_protocol_test.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
import inspect
55
from typing import Any, cast
66

7+
import pytest
8+
79
from chia.protocols import (
810
farmer_protocol,
911
full_node_protocol,
@@ -264,3 +266,19 @@ def test_missing_messages() -> None:
264266
assert types_in_module(shared_protocol) == shared_msgs, (
265267
f"message types were added or removed from shared_protocol. {STANDARD_ADVICE}"
266268
)
269+
270+
271+
@pytest.mark.parametrize("version", [1, 2])
272+
def test_rate_limits_complete(version: int) -> None:
273+
from chia.protocols.protocol_message_types import ProtocolMessageTypes
274+
from chia.server.rate_limit_numbers import rate_limits
275+
276+
if version == 1:
277+
composed = rate_limits[1]
278+
elif version == 2:
279+
composed = {
280+
**rate_limits[1],
281+
**rate_limits[2],
282+
}
283+
284+
assert set(composed.keys()) == set(ProtocolMessageTypes)

0 commit comments

Comments
 (0)