diff --git a/chia/_tests/blockchain/blockchain_test_utils.py b/chia/_tests/blockchain/blockchain_test_utils.py index 3de9ff90553d..4bc137e5c258 100644 --- a/chia/_tests/blockchain/blockchain_test_utils.py +++ b/chia/_tests/blockchain/blockchain_test_utils.py @@ -116,11 +116,10 @@ async def _validate_and_add_block( if err is not None: # Got an error raise AssertionError(err) - else: - # Here we will enforce checking of the exact error - if err != expected_error: - # Did not get the right error, or did not get an error - raise AssertionError(f"Expected {expected_error} but got {err}") + # Here we will enforce checking of the exact error + elif err != expected_error: + # Did not get the right error, or did not get an error + raise AssertionError(f"Expected {expected_error} but got {err}") if expected_result is not None and expected_result != result: raise AssertionError(f"Expected {expected_result} but got {result}") diff --git a/chia/_tests/core/data_layer/test_data_store.py b/chia/_tests/core/data_layer/test_data_store.py index 35777e8a7b1c..91a0a2d9430e 100644 --- a/chia/_tests/core/data_layer/test_data_store.py +++ b/chia/_tests/core/data_layer/test_data_store.py @@ -2385,11 +2385,10 @@ async def test_get_leaf_at_minimum_height( if isinstance(node, InternalNode): heights[node.left_hash] = heights[node.hash] + 1 heights[node.right_hash] = heights[node.hash] + 1 + elif min_leaf_height is not None: + min_leaf_height = min(min_leaf_height, heights[node.hash]) else: - if min_leaf_height is not None: - min_leaf_height = min(min_leaf_height, heights[node.hash]) - else: - min_leaf_height = heights[node.hash] + min_leaf_height = heights[node.hash] assert min_leaf_height is not None if pre > 0: diff --git a/chia/_tests/core/full_node/test_full_node.py b/chia/_tests/core/full_node/test_full_node.py index 268a270b9306..89022b8f8c13 100644 --- a/chia/_tests/core/full_node/test_full_node.py +++ b/chia/_tests/core/full_node/test_full_node.py @@ -2678,16 +2678,15 @@ async def test_long_reorg_nodes( blocks = default_10000_blocks[: 1600 - chain_length] reorg_blocks = test_long_reorg_blocks_light[: 1600 - chain_length] reorg_height = 2000 - else: - if fork_point == 1500: - blocks = default_10000_blocks[: 1900 - chain_length] - reorg_blocks = test_long_reorg_1500_blocks[: 1900 - chain_length] - reorg_height = 2300 - else: # pragma: no cover - pytest.skip("We rely on the light-blocks test for a 0 forkpoint") - blocks = default_10000_blocks[: 1100 - chain_length] - # reorg_blocks = test_long_reorg_blocks[: 1100 - chain_length] - reorg_height = 1600 + elif fork_point == 1500: + blocks = default_10000_blocks[: 1900 - chain_length] + reorg_blocks = test_long_reorg_1500_blocks[: 1900 - chain_length] + reorg_height = 2300 + else: # pragma: no cover + pytest.skip("We rely on the light-blocks test for a 0 forkpoint") + blocks = default_10000_blocks[: 1100 - chain_length] + # reorg_blocks = test_long_reorg_blocks[: 1100 - chain_length] + reorg_height = 1600 # this is a pre-requisite for a reorg to happen assert default_10000_blocks[reorg_height].weight > reorg_blocks[-1].weight @@ -3163,15 +3162,14 @@ async def declare_pos_unfinished_block( challenge_chain_sp = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash() if block.reward_chain_block.reward_chain_sp_vdf is not None: reward_chain_sp = block.reward_chain_block.reward_chain_sp_vdf.output.get_hash() + elif len(block.finished_sub_slots) > 0: + reward_chain_sp = block.finished_sub_slots[-1].reward_chain.get_hash() else: - if len(block.finished_sub_slots) > 0: - reward_chain_sp = block.finished_sub_slots[-1].reward_chain.get_hash() - else: - curr = blockchain.block_record(block.prev_header_hash) - while not curr.first_in_sub_slot: - curr = blockchain.block_record(curr.prev_hash) - assert curr.finished_reward_slot_hashes is not None - reward_chain_sp = curr.finished_reward_slot_hashes[-1] + curr = blockchain.block_record(block.prev_header_hash) + while not curr.first_in_sub_slot: + curr = blockchain.block_record(curr.prev_hash) + assert curr.finished_reward_slot_hashes is not None + reward_chain_sp = curr.finished_reward_slot_hashes[-1] farmer_reward_address = block.foliage.foliage_block_data.farmer_reward_puzzle_hash pool_target = block.foliage.foliage_block_data.pool_target pool_target_signature = block.foliage.foliage_block_data.pool_signature diff --git a/chia/_tests/plotting/test_plot_manager.py b/chia/_tests/plotting/test_plot_manager.py index 3108da756943..15ea5c2e3886 100644 --- a/chia/_tests/plotting/test_plot_manager.py +++ b/chia/_tests/plotting/test_plot_manager.py @@ -117,10 +117,9 @@ def refresh_callback(self, event: PlotRefreshEvents, refresh_result: PlotRefresh if plot_info.prover.get_filename() == value.prover.get_filename(): values_found += 1 continue - else: - if value in expected_list: - values_found += 1 - continue + elif value in expected_list: + values_found += 1 + continue if values_found != len(expected_list): log.error(f"{name} invalid: values_found {values_found} expected {len(expected_list)}") return diff --git a/chia/_tests/wallet/test_singleton_lifecycle_fast.py b/chia/_tests/wallet/test_singleton_lifecycle_fast.py index bc92c86ff59b..4f9e0b708748 100644 --- a/chia/_tests/wallet/test_singleton_lifecycle_fast.py +++ b/chia/_tests/wallet/test_singleton_lifecycle_fast.py @@ -65,10 +65,9 @@ def satisfies_hint(obj: T, type_hint: type[T]) -> bool: object_hint_pairs.extend((v, args[1]) for v in obj.values()) else: raise NotImplementedError(f"Type {origin} is not yet supported") - else: - # Handle concrete types - if type(obj) is not type_hint: - return False + # Handle concrete types + elif type(obj) is not type_hint: + return False return True diff --git a/chia/cmds/keys_funcs.py b/chia/cmds/keys_funcs.py index 5b0eef7b7a3f..c4ad0053f7d5 100644 --- a/chia/cmds/keys_funcs.py +++ b/chia/cmds/keys_funcs.py @@ -743,11 +743,10 @@ def derive_child_key( if non_observer_derivation: assert current_sk is not None # semantics above guarantee this current_sk = _derive_path(current_sk, path_indices) + elif current_sk is not None: + current_sk = _derive_path_unhardened(current_sk, path_indices) else: - if current_sk is not None: - current_sk = _derive_path_unhardened(current_sk, path_indices) - else: - current_pk = _derive_pk_unhardened(current_pk, path_indices) + current_pk = _derive_pk_unhardened(current_pk, path_indices) derivation_root_sk = current_sk derivation_root_pk = current_pk @@ -768,13 +767,12 @@ def derive_child_key( assert derivation_root_sk is not None # semantics above guarantee this sk = _derive_path(derivation_root_sk, [i]) pk = sk.get_g1() + elif derivation_root_sk is not None: + sk = _derive_path_unhardened(derivation_root_sk, [i]) + pk = sk.get_g1() else: - if derivation_root_sk is not None: - sk = _derive_path_unhardened(derivation_root_sk, [i]) - pk = sk.get_g1() - else: - sk = None - pk = _derive_pk_unhardened(derivation_root_pk, [i]) + sk = None + pk = _derive_pk_unhardened(derivation_root_pk, [i]) hd_path: str = ( " (" + hd_path_root + str(i) + ("n" if non_observer_derivation else "") + ")" if show_hd_path else "" ) diff --git a/chia/cmds/wallet_funcs.py b/chia/cmds/wallet_funcs.py index 6f0051e02054..bae70e4518d8 100644 --- a/chia/cmds/wallet_funcs.py +++ b/chia/cmds/wallet_funcs.py @@ -1244,9 +1244,8 @@ async def mint_nft( raise ValueError("Disabling DID ownership is not supported for this NFT wallet, it does have a DID") else: did_id = None - else: - if not wallet_has_did: - did_id = "" + elif not wallet_has_did: + did_id = "" mint_response = await wallet_client.mint_nft( request=NFTMintNFTRequest( diff --git a/chia/consensus/block_body_validation.py b/chia/consensus/block_body_validation.py index f68adaf09398..c113184cf9c6 100644 --- a/chia/consensus/block_body_validation.py +++ b/chia/consensus/block_body_validation.py @@ -334,9 +334,8 @@ async def validate_block_body( if block.transactions_generator is not None: if std_hash(bytes(block.transactions_generator)) != block.transactions_info.generator_root: return Err.INVALID_TRANSACTIONS_GENERATOR_HASH - else: - if block.transactions_info.generator_root != bytes([0] * 32): - return Err.INVALID_TRANSACTIONS_GENERATOR_HASH + elif block.transactions_info.generator_root != bytes([0] * 32): + return Err.INVALID_TRANSACTIONS_GENERATOR_HASH # 8a. The generator_ref_list must be the hash of the serialized bytes of # the generator ref list for this block (or 'one' bytes [0x01] if no generator) diff --git a/chia/consensus/block_creation.py b/chia/consensus/block_creation.py index 7ad94421aea2..45b14f63feb9 100644 --- a/chia/consensus/block_creation.py +++ b/chia/consensus/block_creation.py @@ -354,17 +354,16 @@ def create_unfinished_block( else: if new_sub_slot: rc_sp_hash = finished_sub_slots[-1].reward_chain.get_hash() + elif is_genesis: + rc_sp_hash = constants.GENESIS_CHALLENGE else: - if is_genesis: - rc_sp_hash = constants.GENESIS_CHALLENGE - else: - assert prev_block is not None - assert blocks is not None - curr = prev_block - while not curr.first_in_sub_slot: - curr = blocks.block_record(curr.prev_hash) - assert curr.finished_reward_slot_hashes is not None - rc_sp_hash = curr.finished_reward_slot_hashes[-1] + assert prev_block is not None + assert blocks is not None + curr = prev_block + while not curr.first_in_sub_slot: + curr = blocks.block_record(curr.prev_hash) + assert curr.finished_reward_slot_hashes is not None + rc_sp_hash = curr.finished_reward_slot_hashes[-1] signage_point = SignagePoint(None, None, None, None) cc_sp_signature: Optional[G2Element] = get_plot_signature(cc_sp_hash, proof_of_space.plot_public_key) diff --git a/chia/consensus/block_header_validation.py b/chia/consensus/block_header_validation.py index 9fb5174a8d8b..3ec214a04736 100644 --- a/chia/consensus/block_header_validation.py +++ b/chia/consensus/block_header_validation.py @@ -138,13 +138,12 @@ def validate_unfinished_header_block( if not curr.finished_challenge_slot_hashes[-1] == challenge_hash: print(curr.finished_challenge_slot_hashes[-1], challenge_hash) return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH) - else: - # 2c. check sub-slot challenge hash for empty slot - if ( - not header_block.finished_sub_slots[finished_sub_slot_n - 1].challenge_chain.get_hash() - == challenge_hash - ): - return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH) + # 2c. check sub-slot challenge hash for empty slot + elif ( + not header_block.finished_sub_slots[finished_sub_slot_n - 1].challenge_chain.get_hash() + == challenge_hash + ): + return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH) if genesis_block: # 2d. Validate that genesis block has no ICC @@ -176,20 +175,19 @@ def validate_unfinished_header_block( icc_vdf_input = ClassgroupElement.get_default_element() else: icc_vdf_input = prev_b.infused_challenge_vdf_output - else: - # This is not the first sub slot after the last block, so we might not have an ICC - if ( - header_block.finished_sub_slots[finished_sub_slot_n - 1].reward_chain.deficit - < constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - ): - finished_ss = header_block.finished_sub_slots[finished_sub_slot_n - 1] - assert finished_ss.infused_challenge_chain is not None + # This is not the first sub slot after the last block, so we might not have an ICC + elif ( + header_block.finished_sub_slots[finished_sub_slot_n - 1].reward_chain.deficit + < constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK + ): + finished_ss = header_block.finished_sub_slots[finished_sub_slot_n - 1] + assert finished_ss.infused_challenge_chain is not None - # Only sets the icc iff the previous sub slots deficit is 4 or less - icc_challenge_hash = finished_ss.infused_challenge_chain.get_hash() - icc_iters_committed = prev_b.sub_slot_iters - icc_iters_proof = icc_iters_committed - icc_vdf_input = ClassgroupElement.get_default_element() + # Only sets the icc iff the previous sub slots deficit is 4 or less + icc_challenge_hash = finished_ss.infused_challenge_chain.get_hash() + icc_iters_committed = prev_b.sub_slot_iters + icc_iters_proof = icc_iters_committed + icc_vdf_input = ClassgroupElement.get_default_element() # 2e. Validate that there is not icc iff icc_challenge hash is None assert (sub_slot.infused_challenge_chain is None) == (icc_challenge_hash is None) @@ -241,10 +239,9 @@ def validate_unfinished_header_block( != sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash ): return None, ValidationError(Err.INVALID_ICC_HASH_CC) - else: - # 2h. Check infused challenge sub-slot hash not included for other deficits - if sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None: - return None, ValidationError(Err.INVALID_ICC_HASH_CC) + # 2h. Check infused challenge sub-slot hash not included for other deficits + elif sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None: + return None, ValidationError(Err.INVALID_ICC_HASH_CC) # 2i. Check infused challenge sub-slot hash in reward sub-slot if ( @@ -396,10 +393,9 @@ def validate_unfinished_header_block( f"{sub_slot.reward_chain.deficit}", ), ) - else: - # 2t. Otherwise, deficit stays the same at the slot ends, cannot reset until 0 - if sub_slot.reward_chain.deficit != prev_b.deficit: - return None, ValidationError(Err.INVALID_DEFICIT, "deficit is wrong at slot end") + # 2t. Otherwise, deficit stays the same at the slot ends, cannot reset until 0 + elif sub_slot.reward_chain.deficit != prev_b.deficit: + return None, ValidationError(Err.INVALID_DEFICIT, "deficit is wrong at slot end") # 3. Check sub-epoch summary # Note that the subepoch summary is the summary of the previous subepoch (not the one that just finished) @@ -635,16 +631,15 @@ def validate_unfinished_header_block( return None, ValidationError(Err.INVALID_RC_SP_VDF) if new_sub_slot: rc_sp_hash = header_block.finished_sub_slots[-1].reward_chain.get_hash() + elif genesis_block: + rc_sp_hash = constants.GENESIS_CHALLENGE else: - if genesis_block: - rc_sp_hash = constants.GENESIS_CHALLENGE - else: - assert prev_b is not None - curr = prev_b - while not curr.first_in_sub_slot: - curr = blocks.block_record(curr.prev_hash) - assert curr.finished_reward_slot_hashes is not None - rc_sp_hash = curr.finished_reward_slot_hashes[-1] + assert prev_b is not None + curr = prev_b + while not curr.first_in_sub_slot: + curr = blocks.block_record(curr.prev_hash) + assert curr.finished_reward_slot_hashes is not None + rc_sp_hash = curr.finished_reward_slot_hashes[-1] # 12. Check reward chain sp signature if not AugSchemeMPL.verify( @@ -761,25 +756,24 @@ def validate_unfinished_header_block( != constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH ): return None, ValidationError(Err.INVALID_PREFARM) + # 20b. If pospace has a pool pk, heck pool target signature. Should not check this for genesis block. + elif header_block.reward_chain_block.proof_of_space.pool_public_key is not None: + assert header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash is None + assert header_block.foliage.foliage_block_data.pool_signature is not None + if not AugSchemeMPL.verify( + header_block.reward_chain_block.proof_of_space.pool_public_key, + bytes(header_block.foliage.foliage_block_data.pool_target), + header_block.foliage.foliage_block_data.pool_signature, + ): + return None, ValidationError(Err.INVALID_POOL_SIGNATURE) else: - # 20b. If pospace has a pool pk, heck pool target signature. Should not check this for genesis block. - if header_block.reward_chain_block.proof_of_space.pool_public_key is not None: - assert header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash is None - assert header_block.foliage.foliage_block_data.pool_signature is not None - if not AugSchemeMPL.verify( - header_block.reward_chain_block.proof_of_space.pool_public_key, - bytes(header_block.foliage.foliage_block_data.pool_target), - header_block.foliage.foliage_block_data.pool_signature, - ): - return None, ValidationError(Err.INVALID_POOL_SIGNATURE) - else: - # 20c. Otherwise, the plot is associated with a contract puzzle hash, not a public key - assert header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash is not None - if ( - header_block.foliage.foliage_block_data.pool_target.puzzle_hash - != header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash - ): - return None, ValidationError(Err.INVALID_POOL_TARGET) + # 20c. Otherwise, the plot is associated with a contract puzzle hash, not a public key + assert header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash is not None + if ( + header_block.foliage.foliage_block_data.pool_target.puzzle_hash + != header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash + ): + return None, ValidationError(Err.INVALID_POOL_TARGET) # 21. Check extension data if applicable. None for mainnet. # 22. Check if foliage block is present @@ -928,18 +922,17 @@ def validate_finished_header_block( # 29. Check challenge chain infusion point VDF if new_sub_slot: cc_vdf_challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash() + # Not first block in slot + elif genesis_block: + # genesis block + cc_vdf_challenge = constants.GENESIS_CHALLENGE else: - # Not first block in slot - if genesis_block: - # genesis block - cc_vdf_challenge = constants.GENESIS_CHALLENGE - else: - assert prev_b is not None - # Not genesis block, go back to first block in slot - curr = prev_b - while curr.finished_challenge_slot_hashes is None: - curr = blocks.block_record(curr.prev_hash) - cc_vdf_challenge = curr.finished_challenge_slot_hashes[-1] + assert prev_b is not None + # Not genesis block, go back to first block in slot + curr = prev_b + while curr.finished_challenge_slot_hashes is None: + curr = blocks.block_record(curr.prev_hash) + cc_vdf_challenge = curr.finished_challenge_slot_hashes[-1] cc_target_vdf_info = VDFInfo( cc_vdf_challenge, @@ -1047,9 +1040,8 @@ def validate_finished_header_block( icc_target_vdf_info, ): return None, ValidationError(Err.INVALID_ICC_VDF, "invalid icc proof") - else: - if header_block.infused_challenge_chain_ip_proof is not None: - return None, ValidationError(Err.INVALID_ICC_VDF) + elif header_block.infused_challenge_chain_ip_proof is not None: + return None, ValidationError(Err.INVALID_ICC_VDF) # 32. Check reward block hash if header_block.foliage.reward_block_hash != header_block.reward_chain_block.get_hash(): diff --git a/chia/consensus/blockchain.py b/chia/consensus/blockchain.py index 2117f963f308..00d9df5d1a4c 100644 --- a/chia/consensus/blockchain.py +++ b/chia/consensus/blockchain.py @@ -708,9 +708,8 @@ async def validate_unfinished_block_header( if block.transactions_generator is not None: if std_hash(bytes(block.transactions_generator)) != block.transactions_info.generator_root: return None, Err.INVALID_TRANSACTIONS_GENERATOR_HASH - else: - if block.transactions_info.generator_root != bytes([0] * 32): - return None, Err.INVALID_TRANSACTIONS_GENERATOR_HASH + elif block.transactions_info.generator_root != bytes([0] * 32): + return None, Err.INVALID_TRANSACTIONS_GENERATOR_HASH if ( block.foliage_transaction_block is None diff --git a/chia/consensus/get_block_challenge.py b/chia/consensus/get_block_challenge.py index de4a6952f040..9064218d104f 100644 --- a/chia/consensus/get_block_challenge.py +++ b/chia/consensus/get_block_challenge.py @@ -72,34 +72,33 @@ def get_block_challenge( else: # No overflow, new slot with a new challenge challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash() + elif genesis_block: + challenge = constants.GENESIS_CHALLENGE else: - if genesis_block: - challenge = constants.GENESIS_CHALLENGE - else: - if overflow: - if skip_overflow_last_ss_validation: - # Overflow infusion without the new slot, so get the last challenge - challenges_to_look_for = 1 - else: - # Overflow infusion, so get the second to last challenge. skip_overflow_last_ss_validation is False, - # Which means no sub slots are omitted - challenges_to_look_for = 2 - else: + if overflow: + if skip_overflow_last_ss_validation: + # Overflow infusion without the new slot, so get the last challenge challenges_to_look_for = 1 - reversed_challenge_hashes: list[bytes32] = [] - curr: BlockRecord = blocks.block_record(header_block.prev_header_hash) - while len(reversed_challenge_hashes) < challenges_to_look_for: - if curr.first_in_sub_slot: - assert curr.finished_challenge_slot_hashes is not None - reversed_challenge_hashes += reversed(curr.finished_challenge_slot_hashes) - if len(reversed_challenge_hashes) >= challenges_to_look_for: - break - if curr.height == 0: - assert curr.finished_challenge_slot_hashes is not None - assert len(curr.finished_challenge_slot_hashes) > 0 + else: + # Overflow infusion, so get the second to last challenge. skip_overflow_last_ss_validation is False, + # Which means no sub slots are omitted + challenges_to_look_for = 2 + else: + challenges_to_look_for = 1 + reversed_challenge_hashes: list[bytes32] = [] + curr: BlockRecord = blocks.block_record(header_block.prev_header_hash) + while len(reversed_challenge_hashes) < challenges_to_look_for: + if curr.first_in_sub_slot: + assert curr.finished_challenge_slot_hashes is not None + reversed_challenge_hashes += reversed(curr.finished_challenge_slot_hashes) + if len(reversed_challenge_hashes) >= challenges_to_look_for: break - curr = blocks.block_record(curr.prev_hash) - challenge = reversed_challenge_hashes[challenges_to_look_for - 1] + if curr.height == 0: + assert curr.finished_challenge_slot_hashes is not None + assert len(curr.finished_challenge_slot_hashes) > 0 + break + curr = blocks.block_record(curr.prev_hash) + challenge = reversed_challenge_hashes[challenges_to_look_for - 1] return challenge diff --git a/chia/daemon/server.py b/chia/daemon/server.py index c03fdecf5ff9..0189650cd47b 100644 --- a/chia/daemon/server.py +++ b/chia/daemon/server.py @@ -1373,9 +1373,8 @@ async def register_service(self, websocket: WebSocketResponse, request: dict[str "service": service, "queue": self.extract_plot_queue(), } - else: - if self.ping_job is None: - self.ping_job = create_referenced_task(self.ping_task()) + elif self.ping_job is None: + self.ping_job = create_referenced_task(self.ping_task()) self.log.info(f"registered for service {service}") log.info(f"{response}") return response diff --git a/chia/data_layer/data_layer_wallet.py b/chia/data_layer/data_layer_wallet.py index 8876b81d1b11..771d2719458e 100644 --- a/chia/data_layer/data_layer_wallet.py +++ b/chia/data_layer/data_layer_wallet.py @@ -590,15 +590,14 @@ async def generate_signed_transaction( if coins is None or len(coins) == 0: if launcher_id is None: raise ValueError("Not enough info to know which DL coin to send") + elif len(coins) != 1: + raise ValueError("The wallet can only send one DL coin at a time") else: - if len(coins) != 1: - raise ValueError("The wallet can only send one DL coin at a time") + record = await self.wallet_state_manager.dl_store.get_singleton_record(next(iter(coins)).name()) + if record is None: + raise ValueError("The specified coin is not a tracked DL") else: - record = await self.wallet_state_manager.dl_store.get_singleton_record(next(iter(coins)).name()) - if record is None: - raise ValueError("The specified coin is not a tracked DL") - else: - launcher_id = record.launcher_id + launcher_id = record.launcher_id if len(amounts) != 1 or len(puzzle_hashes) != 1: raise ValueError("The wallet can only send one DL coin to one place at a time") diff --git a/chia/data_layer/data_store.py b/chia/data_layer/data_store.py index 320312c4eb10..67be682079dc 100644 --- a/chia/data_layer/data_store.py +++ b/chia/data_layer/data_store.py @@ -1497,16 +1497,15 @@ async def insert_batch( pending_root = await self.get_pending_root(store_id=store_id) if pending_root is None: latest_local_root: Optional[Root] = old_root - else: - if pending_root.status == Status.PENDING_BATCH: - # We have an unfinished batch, continue the current batch on top of it. - if pending_root.generation != old_root.generation + 1: - raise Exception("Internal error") - await self.change_root_status(pending_root, Status.COMMITTED) - await self.build_ancestor_table_for_latest_root(store_id=store_id) - latest_local_root = pending_root - else: + elif pending_root.status == Status.PENDING_BATCH: + # We have an unfinished batch, continue the current batch on top of it. + if pending_root.generation != old_root.generation + 1: raise Exception("Internal error") + await self.change_root_status(pending_root, Status.COMMITTED) + await self.build_ancestor_table_for_latest_root(store_id=store_id) + latest_local_root = pending_root + else: + raise Exception("Internal error") assert latest_local_root is not None @@ -1548,11 +1547,10 @@ async def insert_batch( if old_node is None: pending_autoinsert_hashes.append(terminal_node_hash) + elif key_hash_frequency[hash] == 1: + raise Exception(f"Key already present: {key.hex()}") else: - if key_hash_frequency[hash] == 1: - raise Exception(f"Key already present: {key.hex()}") - else: - pending_upsert_new_hashes[old_node.hash] = terminal_node_hash + pending_upsert_new_hashes[old_node.hash] = terminal_node_hash continue insert_result = await self.autoinsert( key, value, store_id, True, Status.COMMITTED, root=latest_local_root @@ -2228,13 +2226,12 @@ async def get_subscriptions(self) -> list[Subscription]: ) else: subscriptions.append(Subscription(store_id, [])) - else: - if url is not None and num_consecutive_failures is not None and ignore_till is not None: - new_servers_info = subscription.servers_info - new_servers_info.append(ServerInfo(url, num_consecutive_failures, ignore_till)) - new_subscription = replace(subscription, servers_info=new_servers_info) - subscriptions.remove(subscription) - subscriptions.append(new_subscription) + elif url is not None and num_consecutive_failures is not None and ignore_till is not None: + new_servers_info = subscription.servers_info + new_servers_info.append(ServerInfo(url, num_consecutive_failures, ignore_till)) + new_subscription = replace(subscription, servers_info=new_servers_info) + subscriptions.remove(subscription) + subscriptions.append(new_subscription) return subscriptions diff --git a/chia/full_node/full_node_api.py b/chia/full_node/full_node_api.py index dc435c436ced..4d0a5dd1b2c8 100644 --- a/chia/full_node/full_node_api.py +++ b/chia/full_node/full_node_api.py @@ -1393,14 +1393,11 @@ async def send_transaction( error_name = error.name if error is not None else None if status == MempoolInclusionStatus.SUCCESS: response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name) + # If it failed/pending, but it previously succeeded (in mempool), this is idempotence, return SUCCESS + elif self.full_node.mempool_manager.get_spendbundle(spend_name) is not None: + response = wallet_protocol.TransactionAck(spend_name, uint8(MempoolInclusionStatus.SUCCESS.value), None) else: - # If it failed/pending, but it previously succeeded (in mempool), this is idempotence, return SUCCESS - if self.full_node.mempool_manager.get_spendbundle(spend_name) is not None: - response = wallet_protocol.TransactionAck( - spend_name, uint8(MempoolInclusionStatus.SUCCESS.value), None - ) - else: - response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name) + response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name) return make_msg(ProtocolMessageTypes.transaction_ack, response) @metadata.request() diff --git a/chia/full_node/weight_proof.py b/chia/full_node/weight_proof.py index 71886ad759fe..98954878121f 100644 --- a/chia/full_node/weight_proof.py +++ b/chia/full_node/weight_proof.py @@ -411,17 +411,16 @@ async def __first_sub_slot_vdfs( while not curr_sub_rec.sub_epoch_summary_included: curr_sub_rec = blocks[curr_sub_rec.prev_hash] first_rc_end_of_slot_vdf = self.first_rc_end_of_slot_vdf(header_block, blocks, header_blocks) + elif header_block_sub_rec.overflow and header_block_sub_rec.first_in_sub_slot: + sub_slots_num = 2 + while sub_slots_num > 0 and curr_sub_rec.height > 0: + if curr_sub_rec.first_in_sub_slot: + assert curr_sub_rec.finished_challenge_slot_hashes is not None + sub_slots_num -= len(curr_sub_rec.finished_challenge_slot_hashes) + curr_sub_rec = blocks[curr_sub_rec.prev_hash] else: - if header_block_sub_rec.overflow and header_block_sub_rec.first_in_sub_slot: - sub_slots_num = 2 - while sub_slots_num > 0 and curr_sub_rec.height > 0: - if curr_sub_rec.first_in_sub_slot: - assert curr_sub_rec.finished_challenge_slot_hashes is not None - sub_slots_num -= len(curr_sub_rec.finished_challenge_slot_hashes) - curr_sub_rec = blocks[curr_sub_rec.prev_hash] - else: - while not curr_sub_rec.first_in_sub_slot and curr_sub_rec.height > 0: - curr_sub_rec = blocks[curr_sub_rec.prev_hash] + while not curr_sub_rec.first_in_sub_slot and curr_sub_rec.height > 0: + curr_sub_rec = blocks[curr_sub_rec.prev_hash] curr = header_blocks[curr_sub_rec.header_hash] sub_slots_data: list[SubSlotData] = [] diff --git a/chia/plotting/create_plots.py b/chia/plotting/create_plots.py index 08369c9c2c54..14b056c88046 100644 --- a/chia/plotting/create_plots.py +++ b/chia/plotting/create_plots.py @@ -83,10 +83,9 @@ async def resolve(self) -> PlotKeys: if self.pool_contract_address is not None: raise RuntimeError("Choose one of pool_contract_address and pool_public_key") pool_public_key = G1Element.from_bytes(bytes.fromhex(self.pool_public_key)) - else: - if self.pool_contract_address is None: - # If nothing is set, farms to the provided key (or the first key) - pool_public_key = await self.get_pool_public_key(keychain_proxy) + elif self.pool_contract_address is None: + # If nothing is set, farms to the provided key (or the first key) + pool_public_key = await self.get_pool_public_key(keychain_proxy) self.resolved_keys = PlotKeys(farmer_public_key, pool_public_key, self.pool_contract_address) finally: diff --git a/chia/seeder/crawl_store.py b/chia/seeder/crawl_store.py index dde1a7a18df8..d3e566e27158 100644 --- a/chia/seeder/crawl_store.py +++ b/chia/seeder/crawl_store.py @@ -195,11 +195,10 @@ async def get_peers_to_crawl(self, min_batch_size: int, max_batch_size: int) -> counter += 1 if reliability.ignore_till < now and reliability.ban_till < now: add = True - else: - if reliability.ban_till >= now: - self.banned_peers += 1 - elif reliability.ignore_till >= now: - self.ignored_peers += 1 + elif reliability.ban_till >= now: + self.banned_peers += 1 + elif reliability.ignore_till >= now: + self.ignored_peers += 1 record = self.host_to_records[peer_id] if record.last_try_timestamp == 0 and record.connected_timestamp == 0: add = True diff --git a/chia/server/address_manager.py b/chia/server/address_manager.py index 4a3cb6f9f913..3ebf0250a7cf 100644 --- a/chia/server/address_manager.py +++ b/chia/server/address_manager.py @@ -371,9 +371,8 @@ def _set_new_matrix(self, row: int, col: int, value: int) -> None: if value == -1: if (row, col) in self.used_new_matrix_positions: self.used_new_matrix_positions.remove((row, col)) - else: - if (row, col) not in self.used_new_matrix_positions: - self.used_new_matrix_positions.add((row, col)) + elif (row, col) not in self.used_new_matrix_positions: + self.used_new_matrix_positions.add((row, col)) # Use only this method for modifying tried matrix. def _set_tried_matrix(self, row: int, col: int, value: int) -> None: @@ -381,9 +380,8 @@ def _set_tried_matrix(self, row: int, col: int, value: int) -> None: if value == -1: if (row, col) in self.used_tried_matrix_positions: self.used_tried_matrix_positions.remove((row, col)) - else: - if (row, col) not in self.used_tried_matrix_positions: - self.used_tried_matrix_positions.add((row, col)) + elif (row, col) not in self.used_tried_matrix_positions: + self.used_tried_matrix_positions.add((row, col)) def load_used_table_positions(self) -> None: self.used_new_matrix_positions = set() @@ -587,10 +585,9 @@ def add_to_new_table_(self, addr: TimestampedPeerInfo, source: Optional[PeerInfo info.ref_count += 1 if node_id is not None: self._set_new_matrix(new_bucket, new_bucket_pos, node_id) - else: - if info.ref_count == 0: - if node_id is not None: - self.delete_new_entry_(node_id) + elif info.ref_count == 0: + if node_id is not None: + self.delete_new_entry_(node_id) return is_unique def attempt_(self, addr: PeerInfo, count_failures: bool, timestamp: int) -> None: diff --git a/chia/server/node_discovery.py b/chia/server/node_discovery.py index b34c4bbb0d02..2d5593cca2f2 100644 --- a/chia/server/node_discovery.py +++ b/chia/server/node_discovery.py @@ -228,12 +228,11 @@ async def start_client_async(self, addr: PeerInfo, is_feeler: bool) -> None: if self.server.is_duplicate_or_self_connection(addr): # Mark it as a softer attempt, without counting the failures. await self.address_manager.attempt(addr, False) + elif client_connected is True: + await self.address_manager.mark_good(addr) + await self.address_manager.connect(addr) else: - if client_connected is True: - await self.address_manager.mark_good(addr) - await self.address_manager.connect(addr) - else: - await self.address_manager.attempt(addr, True) + await self.address_manager.attempt(addr, True) self.pending_outbound_connections.remove(addr.host) except Exception as e: if addr.host in self.pending_outbound_connections: diff --git a/chia/server/server.py b/chia/server/server.py index 7b64c7877afa..876e79406a36 100644 --- a/chia/server/server.py +++ b/chia/server/server.py @@ -260,9 +260,8 @@ async def garbage_collect_connections_task(self) -> None: if is_crawler is not None: if time.time() - connection.creation_time > 5: to_remove.append(connection) - else: - if time.time() - connection.last_message_time > 1800: - to_remove.append(connection) + elif time.time() - connection.last_message_time > 1800: + to_remove.append(connection) for connection in to_remove: self.log.debug(f"Garbage collecting connection {connection.peer_info.host} due to inactivity") if connection.closed: diff --git a/chia/simulator/block_tools.py b/chia/simulator/block_tools.py index e51143c08e16..79678a4924fe 100644 --- a/chia/simulator/block_tools.py +++ b/chia/simulator/block_tools.py @@ -898,11 +898,10 @@ def get_consecutive_blocks( # address, so continue until a proof of space tied to a pk is found continue pool_target = PoolTarget(proof_of_space.pool_contract_puzzle_hash, uint32(0)) + elif pool_reward_puzzle_hash is not None: + pool_target = PoolTarget(pool_reward_puzzle_hash, uint32(0)) else: - if pool_reward_puzzle_hash is not None: - pool_target = PoolTarget(pool_reward_puzzle_hash, uint32(0)) - else: - pool_target = PoolTarget(self.pool_ph, uint32(0)) + pool_target = PoolTarget(self.pool_ph, uint32(0)) new_gen = self.setup_new_gen( tx_block_heights, @@ -1193,11 +1192,10 @@ def get_consecutive_blocks( # address, so continue until a proof of space tied to a pk is found continue pool_target = PoolTarget(proof_of_space.pool_contract_puzzle_hash, uint32(0)) + elif pool_reward_puzzle_hash is not None: + pool_target = PoolTarget(pool_reward_puzzle_hash, uint32(0)) else: - if pool_reward_puzzle_hash is not None: - pool_target = PoolTarget(pool_reward_puzzle_hash, uint32(0)) - else: - pool_target = PoolTarget(self.pool_ph, uint32(0)) + pool_target = PoolTarget(self.pool_ph, uint32(0)) new_gen = self.setup_new_gen( tx_block_heights, diff --git a/chia/timelord/timelord.py b/chia/timelord/timelord.py index f9f490cb9f55..aa559a32f2c5 100644 --- a/chia/timelord/timelord.py +++ b/chia/timelord/timelord.py @@ -160,20 +160,19 @@ async def manage(self) -> AsyncIterator[None]: slow_bluebox = self.config.get("slow_bluebox", False) if not self.bluebox_mode: self.main_loop = create_referenced_task(self._manage_chains()) + elif os.name == "nt" or slow_bluebox: + # `vdf_client` doesn't build on windows, use `prove()` from chiavdf. + workers = self.config.get("slow_bluebox_process_count", 1) + self._executor_shutdown_tempfile = _create_shutdown_file() + self.bluebox_pool = ThreadPoolExecutor( + max_workers=workers, + thread_name_prefix="blue-box-", + ) + self.main_loop = create_referenced_task( + self._start_manage_discriminant_queue_sanitizer_slow(self.bluebox_pool, workers) + ) else: - if os.name == "nt" or slow_bluebox: - # `vdf_client` doesn't build on windows, use `prove()` from chiavdf. - workers = self.config.get("slow_bluebox_process_count", 1) - self._executor_shutdown_tempfile = _create_shutdown_file() - self.bluebox_pool = ThreadPoolExecutor( - max_workers=workers, - thread_name_prefix="blue-box-", - ) - self.main_loop = create_referenced_task( - self._start_manage_discriminant_queue_sanitizer_slow(self.bluebox_pool, workers) - ) - else: - self.main_loop = create_referenced_task(self._manage_discriminant_queue_sanitizer()) + self.main_loop = create_referenced_task(self._manage_discriminant_queue_sanitizer()) log.info(f"Started timelord, listening on port {self.get_vdf_server_port()}") try: yield diff --git a/chia/util/db_wrapper.py b/chia/util/db_wrapper.py index da19b0db961f..2591eb637865 100644 --- a/chia/util/db_wrapper.py +++ b/chia/util/db_wrapper.py @@ -124,15 +124,14 @@ def get_host_parameter_limit() -> int: limit_number = sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER host_parameter_limit = connection.getlimit(limit_number) - else: - # guessing based on defaults, seems you can't query + # guessing based on defaults, seems you can't query - # https://www.sqlite.org/changes.html#version_3_32_0 - # Increase the default upper bound on the number of parameters from 999 to 32766. - if sqlite3.sqlite_version_info >= (3, 32, 0): - host_parameter_limit = 32766 - else: - host_parameter_limit = 999 + # https://www.sqlite.org/changes.html#version_3_32_0 + # Increase the default upper bound on the number of parameters from 999 to 32766. + elif sqlite3.sqlite_version_info >= (3, 32, 0): + host_parameter_limit = 32766 + else: + host_parameter_limit = 999 return host_parameter_limit diff --git a/chia/wallet/puzzle_drivers.py b/chia/wallet/puzzle_drivers.py index 746c304f9efc..f99f60de7322 100644 --- a/chia/wallet/puzzle_drivers.py +++ b/chia/wallet/puzzle_drivers.py @@ -65,15 +65,14 @@ def check_type(self, types: list[str]) -> bool: return True else: return False - else: - if self.type() == types[0]: - types.pop(0) - if self.also(): - return self.also().check_type(types) # type: ignore - else: - return self.check_type(types) + elif self.type() == types[0]: + types.pop(0) + if self.also(): + return self.also().check_type(types) # type: ignore else: - return False + return self.check_type(types) + else: + return False @dataclass(frozen=True) diff --git a/chia/wallet/vc_wallet/vc_wallet.py b/chia/wallet/vc_wallet/vc_wallet.py index e56578484afe..bf37b2bd57a2 100644 --- a/chia/wallet/vc_wallet/vc_wallet.py +++ b/chia/wallet/vc_wallet/vc_wallet.py @@ -455,12 +455,10 @@ async def add_vc_authorization( crcat_spends.append(crcat_spend) if spend in offer._bundle.coin_spends: spends_to_fix[spend.coin.name()] = spend - else: - if spend in offer._bundle.coin_spends: # pragma: no cover - other_spends.append(spend) - else: - if spend in offer._bundle.coin_spends: + elif spend in offer._bundle.coin_spends: # pragma: no cover other_spends.append(spend) + elif spend in offer._bundle.coin_spends: + other_spends.append(spend) # Figure out what VC announcements are needed announcements_to_make: dict[bytes32, list[CreatePuzzleAnnouncement]] = {} diff --git a/chia/wallet/wallet_node.py b/chia/wallet/wallet_node.py index 88dee1ef39b1..99a87c50b57a 100644 --- a/chia/wallet/wallet_node.py +++ b/chia/wallet/wallet_node.py @@ -911,14 +911,13 @@ async def add_states_from_peer( ): # only one peer told us to rollback so only clear for that peer await self.perform_atomic_rollback(fork_height, cache=cache) - else: - if fork_height is not None: - # only one peer told us to rollback so only clear for that peer - cache.clear_after_height(fork_height) - self.log.info(f"clear_after_height {fork_height} for peer {peer}") - if not trusted: - # Rollback race_cache not in clear_after_height to avoid applying rollbacks from new peak processing - cache.rollback_race_cache(fork_height=fork_height) + elif fork_height is not None: + # only one peer told us to rollback so only clear for that peer + cache.clear_after_height(fork_height) + self.log.info(f"clear_after_height {fork_height} for peer {peer}") + if not trusted: + # Rollback race_cache not in clear_after_height to avoid applying rollbacks from new peak processing + cache.rollback_race_cache(fork_height=fork_height) all_tasks: list[asyncio.Task[None]] = [] target_concurrent_tasks: int = 30 @@ -989,18 +988,17 @@ async def validate_and_add(inner_states: list[CoinState], inner_idx_start: int) ) if not await self.wallet_state_manager.add_coin_states(batch.entries, peer, fork_height): return False + elif fork_height is not None: + cache.add_states_to_race_cache(batch.entries) else: - if fork_height is not None: - cache.add_states_to_race_cache(batch.entries) - else: - while len(all_tasks) >= target_concurrent_tasks: - all_tasks = [task for task in all_tasks if not task.done()] - await asyncio.sleep(0.1) - if self._shut_down: - self.log.info("Terminating receipt and validation due to shut down request") - await asyncio.gather(*all_tasks) - return False - all_tasks.append(create_referenced_task(validate_and_add(batch.entries, idx))) + while len(all_tasks) >= target_concurrent_tasks: + all_tasks = [task for task in all_tasks if not task.done()] + await asyncio.sleep(0.1) + if self._shut_down: + self.log.info("Terminating receipt and validation due to shut down request") + await asyncio.gather(*all_tasks) + return False + all_tasks.append(create_referenced_task(validate_and_add(batch.entries, idx))) idx += len(batch.entries) still_connected = self._server is not None and peer.peer_node_id in self.server.all_connections @@ -1158,9 +1156,8 @@ async def new_peak_wallet(self, new_peak: NewPeakWallet, peer: WSChiaConnection) await self.new_peak_from_trusted( new_peak_hb, latest_timestamp, peer, new_peak.fork_point_with_previous_peak ) - else: - if not await self.new_peak_from_untrusted(new_peak_hb, peer): - return + elif not await self.new_peak_from_untrusted(new_peak_hb, peer): + return # todo why do we call this if there was an exception / the sync is not finished async with self.wallet_state_manager.lock: @@ -1272,10 +1269,9 @@ async def sync_from_untrusted_close_to_peak(self, new_peak_hb: HeaderBlock, peer ) if success: self.synced_peers.add(peer.peer_node_id) - else: - if peak_hb is not None and new_peak_hb.weight <= peak_hb.weight: - # Don't process blocks at the same weight - return False + elif peak_hb is not None and new_peak_hb.weight <= peak_hb.weight: + # Don't process blocks at the same weight + return False # For every block, we need to apply the cache from race_cache for potential_height in range(backtrack_fork_height + 1, new_peak_hb.height + 1): @@ -1663,10 +1659,9 @@ async def validate_block_inclusion( if not prev_block_rc_hash == reversed_slots[-1].reward_chain.end_of_slot_vdf.challenge: self.log.error("Failed validation 7") return False - else: - if not prev_block_rc_hash == reward_chain_hash: - self.log.error("Failed validation 8") - return False + elif not prev_block_rc_hash == reward_chain_hash: + self.log.error("Failed validation 8") + return False blocks_to_cache.append((reward_chain_hash, en_block.height)) agg_sig: G2Element = AugSchemeMPL.aggregate([sig for (_, _, sig) in pk_m_sig]) diff --git a/chia/wallet/wallet_transaction_store.py b/chia/wallet/wallet_transaction_store.py index 9e1b764306e2..30c0e8c13749 100644 --- a/chia/wallet/wallet_transaction_store.py +++ b/chia/wallet/wallet_transaction_store.py @@ -262,10 +262,9 @@ async def get_not_sent(self, *, include_accepted_txs=False) -> list[TransactionR if time_submitted < current_time - (60 * 10): records.append(record) self.tx_submitted[record.name] = current_time, 1 - else: - if count < minimum_send_attempts: - records.append(record) - self.tx_submitted[record.name] = time_submitted, (count + 1) + elif count < minimum_send_attempts: + records.append(record) + self.tx_submitted[record.name] = time_submitted, (count + 1) else: records.append(record) self.tx_submitted[record.name] = current_time, 1 diff --git a/ruff.toml b/ruff.toml index fa1163ab8375..a62e25000cb1 100644 --- a/ruff.toml +++ b/ruff.toml @@ -61,7 +61,6 @@ ignore = [ # Should probably fix these "PLR6301", # no-self-use "PLR2004", # magic-value-comparison - "PLR5501", # collapsible-else-if # Pylint warning "PLW1641", # eq-without-hash