diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3f0f648e3..f93c7ee1f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,7 +4,7 @@ on: pull_request: push: branches: - - main + - '**' env: PYTHON_VERSION: "3.12" diff --git a/resources/scenarios/commander.py b/resources/scenarios/commander.py index 5d8badf9e..be146492d 100644 --- a/resources/scenarios/commander.py +++ b/resources/scenarios/commander.py @@ -11,6 +11,7 @@ import sys import tempfile import threading +import types from time import sleep from kubernetes import client, config @@ -240,6 +241,10 @@ def setup(self): self.lns: dict[str, LNNode] = {} self.channels = WARNET["channels"] + self.binary_paths = types.SimpleNamespace() + self.binary_paths.bitcoin_cmd = None + self.binary_paths.bitcoind = None + for i, tank in enumerate(WARNET["tanks"]): self.log.info( f"Adding TestNode #{i} from pod {tank['tank']} with IP {tank['rpc_host']}" @@ -251,13 +256,12 @@ def setup(self): rpchost=tank["rpc_host"], timewait=60, timeout_factor=self.options.timeout_factor, - bitcoind=None, - bitcoin_cli=None, + binaries=self.get_binaries(), cwd=self.options.tmpdir, coverage_dir=self.options.coveragedir, ) node.tank = tank["tank"] - node.rpc = get_rpc_proxy( + node._rpc = get_rpc_proxy( f"http://{tank['rpc_user']}:{tank['rpc_password']}@{tank['rpc_host']}:{tank['rpc_port']}", i, timeout=60, @@ -317,7 +321,7 @@ def setup(self): except Exception as e: self.log.info(f"Failed to get signet network magic bytes from {node.tank}: {e}") - def parse_args(self): + def parse_args(self, _): # Only print "outer" args from parent class when using --help help_parser = argparse.ArgumentParser(usage="%(prog)s [options]") self.add_options(help_parser) @@ -450,6 +454,12 @@ def parse_args(self): action="store_true", help="use BIP324 v2 connections between all nodes by default", ) + parser.add_argument( + "--test_methods", + dest="test_methods", + nargs="*", + help="Run specified test methods sequentially instead of the full test. Use only for methods that do not depend on any context set up in run_test or other methods.", + ) self.add_options(parser) # Running TestShell in a Jupyter notebook causes an additional -f argument @@ -565,7 +575,7 @@ def connect_nodes(self, a, b, *, peer_advertises_v2=None, wait_for_connect: bool def generatetoaddress(self, generator, n, addr, sync_fun=None, **kwargs): if generator.chain == "regtest": - blocks = generator.generatetoaddress(n, addr, invalid_call=False, **kwargs) + blocks = generator.generatetoaddress(n, addr, called_by_framework=True, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks if generator.chain == "signet": diff --git a/resources/scenarios/ln_init.py b/resources/scenarios/ln_init.py index 42c6cbcc4..31445d442 100644 --- a/resources/scenarios/ln_init.py +++ b/resources/scenarios/ln_init.py @@ -511,7 +511,7 @@ def matching_graph(self, expected, ln): def main(): - LNInit().main() + LNInit("").main() if __name__ == "__main__": diff --git a/resources/scenarios/miner_std.py b/resources/scenarios/miner_std.py index 3fa06c7d3..d39f83913 100755 --- a/resources/scenarios/miner_std.py +++ b/resources/scenarios/miner_std.py @@ -75,7 +75,7 @@ def run_test(self): def main(): - MinerStd().main() + MinerStd("").main() if __name__ == "__main__": diff --git a/resources/scenarios/reconnaissance.py b/resources/scenarios/reconnaissance.py index 8c3f683cb..df14bbe2b 100755 --- a/resources/scenarios/reconnaissance.py +++ b/resources/scenarios/reconnaissance.py @@ -81,7 +81,7 @@ def run_test(self): def main(): - Reconnaissance().main() + Reconnaissance("").main() if __name__ == "__main__": diff --git a/resources/scenarios/signet_miner.py b/resources/scenarios/signet_miner.py index e4375515b..f3f68a7ed 100644 --- a/resources/scenarios/signet_miner.py +++ b/resources/scenarios/signet_miner.py @@ -563,7 +563,7 @@ def get_args(parser): return args def main(): - SignetMinerScenario().main() + SignetMinerScenario("").main() if __name__ == "__main__": main() diff --git a/resources/scenarios/test_framework/address.py b/resources/scenarios/test_framework/address.py index 5b2e3289a..2c754e35a 100644 --- a/resources/scenarios/test_framework/address.py +++ b/resources/scenarios/test_framework/address.py @@ -47,18 +47,20 @@ class AddressType(enum.Enum): b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' -def create_deterministic_address_bcrt1_p2tr_op_true(): +def create_deterministic_address_bcrt1_p2tr_op_true(explicit_internal_key=None): """ Generates a deterministic bech32m address (segwit v1 output) that can be spent with a witness stack of OP_TRUE and the control block with internal public key (script-path spending). - Returns a tuple with the generated address and the internal key. + Returns a tuple with the generated address and the TaprootInfo object. """ - internal_key = (1).to_bytes(32, 'big') - address = output_key_to_p2tr(taproot_construct(internal_key, [(None, CScript([OP_TRUE]))]).output_pubkey) - assert_equal(address, 'bcrt1p9yfmy5h72durp7zrhlw9lf7jpwjgvwdg0jr0lqmmjtgg83266lqsekaqka') - return (address, internal_key) + internal_key = explicit_internal_key or (1).to_bytes(32, 'big') + taproot_info = taproot_construct(internal_key, [("only-path", CScript([OP_TRUE]))]) + address = output_key_to_p2tr(taproot_info.output_pubkey) + if explicit_internal_key is None: + assert_equal(address, 'bcrt1p9yfmy5h72durp7zrhlw9lf7jpwjgvwdg0jr0lqmmjtgg83266lqsekaqka') + return (address, taproot_info) def byte_to_base58(b, version): @@ -153,6 +155,9 @@ def output_key_to_p2tr(key, main=False): assert len(key) == 32 return program_to_witness(1, key, main) +def p2a(main=False): + return program_to_witness(1, "4e73", main) + def check_key(key): if (type(key) is str): key = bytes.fromhex(key) # Assuming this is hex string diff --git a/resources/scenarios/test_framework/authproxy.py b/resources/scenarios/test_framework/authproxy.py index 03042877b..9b2fc0f7f 100644 --- a/resources/scenarios/test_framework/authproxy.py +++ b/resources/scenarios/test_framework/authproxy.py @@ -26,7 +26,7 @@ - HTTP connections persist for the life of the AuthServiceProxy object (if server supports HTTP/1.1) -- sends protocol 'version', per JSON-RPC 1.1 +- sends "jsonrpc":"2.0", per JSON-RPC 2.0 - sends proper, incrementing 'id' - sends Basic HTTP authentication headers - parses all JSON numbers that look like floats as Decimal @@ -75,6 +75,7 @@ def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connect self.__service_url = service_url self._service_name = service_name self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests + self.reuse_http_connections = True self.__url = urllib.parse.urlparse(service_url) user = None if self.__url.username is None else self.__url.username.encode('utf8') passwd = None if self.__url.password is None else self.__url.password.encode('utf8') @@ -92,6 +93,8 @@ def __getattr__(self, name): raise AttributeError if self._service_name is not None: name = "%s.%s" % (self._service_name, name) + if not self.reuse_http_connections: + self._set_conn() return AuthServiceProxy(self.__service_url, name, connection=self.__conn) def _request(self, method, path, postdata): @@ -102,47 +105,67 @@ def _request(self, method, path, postdata): 'User-Agent': USER_AGENT, 'Authorization': self.__auth_header, 'Content-type': 'application/json'} + if not self.reuse_http_connections: + self._set_conn() self.__conn.request(method, path, postdata, headers) return self._get_response() + def _json_dumps(self, obj): + return json.dumps(obj, default=serialization_fallback, ensure_ascii=self.ensure_ascii) + def get_request(self, *args, **argsn): AuthServiceProxy.__id_count += 1 - log.debug("-{}-> {} {}".format( + log.debug("-{}-> {} {} {}".format( AuthServiceProxy.__id_count, self._service_name, - json.dumps(args or argsn, default=serialization_fallback, ensure_ascii=self.ensure_ascii), + self._json_dumps(args), + self._json_dumps(argsn), )) + if args and argsn: params = dict(args=args, **argsn) else: params = args or argsn - return {'version': '1.1', + return {'jsonrpc': '2.0', 'method': self._service_name, 'params': params, 'id': AuthServiceProxy.__id_count} def __call__(self, *args, **argsn): - postdata = json.dumps(self.get_request(*args, **argsn), default=serialization_fallback, ensure_ascii=self.ensure_ascii) + postdata = self._json_dumps(self.get_request(*args, **argsn)) response, status = self._request('POST', self.__url.path, postdata.encode('utf-8')) - if response['error'] is not None: - raise JSONRPCException(response['error'], status) - elif 'result' not in response: - raise JSONRPCException({ - 'code': -343, 'message': 'missing JSON-RPC result'}, status) - elif status != HTTPStatus.OK: - raise JSONRPCException({ - 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status) + # For backwards compatibility tests, accept JSON RPC 1.1 responses + if 'jsonrpc' not in response: + if response['error'] is not None: + raise JSONRPCException(response['error'], status) + elif 'result' not in response: + raise JSONRPCException({ + 'code': -343, 'message': 'missing JSON-RPC result'}, status) + elif status != HTTPStatus.OK: + raise JSONRPCException({ + 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status) + else: + return response['result'] else: + assert response['jsonrpc'] == '2.0' + if status != HTTPStatus.OK: + raise JSONRPCException({ + 'code': -342, 'message': 'non-200 HTTP status code'}, status) + if 'error' in response: + raise JSONRPCException(response['error'], status) + elif 'result' not in response: + raise JSONRPCException({ + 'code': -343, 'message': 'missing JSON-RPC 2.0 result and error'}, status) return response['result'] def batch(self, rpc_call_list): - postdata = json.dumps(list(rpc_call_list), default=serialization_fallback, ensure_ascii=self.ensure_ascii) + postdata = self._json_dumps(list(rpc_call_list)) log.debug("--> " + postdata) response, status = self._request('POST', self.__url.path, postdata.encode('utf-8')) if status != HTTPStatus.OK: raise JSONRPCException({ - 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status) + 'code': -342, 'message': 'non-200 HTTP status code'}, status) return response def _get_response(self): @@ -160,17 +183,31 @@ def _get_response(self): raise JSONRPCException({ 'code': -342, 'message': 'missing HTTP response from server'}) + # Check for no-content HTTP status code, which can be returned when an + # RPC client requests a JSON-RPC 2.0 "notification" with no response. + # Currently this is only possible if clients call the _request() method + # directly to send a raw request. + if http_response.status == HTTPStatus.NO_CONTENT: + if len(http_response.read()) != 0: + raise JSONRPCException({'code': -342, 'message': 'Content received with NO CONTENT status code'}) + return None, http_response.status + content_type = http_response.getheader('Content-Type') if content_type != 'application/json': raise JSONRPCException( {'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)}, http_response.status) - responsedata = http_response.read().decode('utf8') + data = http_response.read() + try: + responsedata = data.decode('utf8') + except UnicodeDecodeError as e: + raise JSONRPCException({ + 'code': -342, 'message': f'Cannot decode response in utf8 format, content: {data}, exception: {e}'}) response = json.loads(responsedata, parse_float=decimal.Decimal) elapsed = time.time() - req_start_time if "error" in response and response["error"] is None: - log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=serialization_fallback, ensure_ascii=self.ensure_ascii))) + log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, self._json_dumps(response["result"]))) else: log.debug("<-- [%.6f] %s" % (elapsed, responsedata)) return response, http_response.status diff --git a/resources/scenarios/test_framework/bdb.py b/resources/scenarios/test_framework/bdb.py deleted file mode 100644 index 41886c09f..000000000 --- a/resources/scenarios/test_framework/bdb.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2020-2021 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -""" -Utilities for working directly with the wallet's BDB database file - -This is specific to the configuration of BDB used in this project: - - pagesize: 4096 bytes - - Outer database contains single subdatabase named 'main' - - btree - - btree leaf pages - -Each key-value pair is two entries in a btree leaf. The first is the key, the one that follows -is the value. And so on. Note that the entry data is itself not in the correct order. Instead -entry offsets are stored in the correct order and those offsets are needed to then retrieve -the data itself. - -Page format can be found in BDB source code dbinc/db_page.h -This only implements the deserialization of btree metadata pages and normal btree pages. Overflow -pages are not implemented but may be needed in the future if dealing with wallets with large -transactions. - -`db_dump -da wallet.dat` is useful to see the data in a wallet.dat BDB file -""" - -import struct - -# Important constants -PAGESIZE = 4096 -OUTER_META_PAGE = 0 -INNER_META_PAGE = 2 - -# Page type values -BTREE_INTERNAL = 3 -BTREE_LEAF = 5 -BTREE_META = 9 - -# Some magic numbers for sanity checking -BTREE_MAGIC = 0x053162 -DB_VERSION = 9 - -# Deserializes a leaf page into a dict. -# Btree internal pages have the same header, for those, return None. -# For the btree leaf pages, deserialize them and put all the data into a dict -def dump_leaf_page(data): - page_info = {} - page_header = data[0:26] - _, pgno, prev_pgno, next_pgno, entries, hf_offset, level, pg_type = struct.unpack('QIIIHHBB', page_header) - page_info['pgno'] = pgno - page_info['prev_pgno'] = prev_pgno - page_info['next_pgno'] = next_pgno - page_info['hf_offset'] = hf_offset - page_info['level'] = level - page_info['pg_type'] = pg_type - page_info['entry_offsets'] = struct.unpack('{}H'.format(entries), data[26:26 + entries * 2]) - page_info['entries'] = [] - - if pg_type == BTREE_INTERNAL: - # Skip internal pages. These are the internal nodes of the btree and don't contain anything relevant to us - return None - - assert pg_type == BTREE_LEAF, 'A non-btree leaf page has been encountered while dumping leaves' - - for i in range(0, entries): - offset = page_info['entry_offsets'][i] - entry = {'offset': offset} - page_data_header = data[offset:offset + 3] - e_len, pg_type = struct.unpack('HB', page_data_header) - entry['len'] = e_len - entry['pg_type'] = pg_type - entry['data'] = data[offset + 3:offset + 3 + e_len] - page_info['entries'].append(entry) - - return page_info - -# Deserializes a btree metadata page into a dict. -# Does a simple sanity check on the magic value, type, and version -def dump_meta_page(page): - # metadata page - # general metadata - metadata = {} - meta_page = page[0:72] - _, pgno, magic, version, pagesize, encrypt_alg, pg_type, metaflags, _, free, last_pgno, nparts, key_count, record_count, flags, uid = struct.unpack('QIIIIBBBBIIIIII20s', meta_page) - metadata['pgno'] = pgno - metadata['magic'] = magic - metadata['version'] = version - metadata['pagesize'] = pagesize - metadata['encrypt_alg'] = encrypt_alg - metadata['pg_type'] = pg_type - metadata['metaflags'] = metaflags - metadata['free'] = free - metadata['last_pgno'] = last_pgno - metadata['nparts'] = nparts - metadata['key_count'] = key_count - metadata['record_count'] = record_count - metadata['flags'] = flags - metadata['uid'] = uid.hex().encode() - - assert magic == BTREE_MAGIC, 'bdb magic does not match bdb btree magic' - assert pg_type == BTREE_META, 'Metadata page is not a btree metadata page' - assert version == DB_VERSION, 'Database too new' - - # btree metadata - btree_meta_page = page[72:512] - _, minkey, re_len, re_pad, root, _, crypto_magic, _, iv, chksum = struct.unpack('IIIII368sI12s16s20s', btree_meta_page) - metadata['minkey'] = minkey - metadata['re_len'] = re_len - metadata['re_pad'] = re_pad - metadata['root'] = root - metadata['crypto_magic'] = crypto_magic - metadata['iv'] = iv.hex().encode() - metadata['chksum'] = chksum.hex().encode() - - return metadata - -# Given the dict from dump_leaf_page, get the key-value pairs and put them into a dict -def extract_kv_pairs(page_data): - out = {} - last_key = None - for i, entry in enumerate(page_data['entries']): - # By virtue of these all being pairs, even number entries are keys, and odd are values - if i % 2 == 0: - out[entry['data']] = b'' - last_key = entry['data'] - else: - out[last_key] = entry['data'] - return out - -# Extract the key-value pairs of the BDB file given in filename -def dump_bdb_kv(filename): - # Read in the BDB file and start deserializing it - pages = [] - with open(filename, 'rb') as f: - data = f.read(PAGESIZE) - while len(data) > 0: - pages.append(data) - data = f.read(PAGESIZE) - - # Sanity check the meta pages - dump_meta_page(pages[OUTER_META_PAGE]) - dump_meta_page(pages[INNER_META_PAGE]) - - # Fetch the kv pairs from the leaf pages - kv = {} - for i in range(3, len(pages)): - info = dump_leaf_page(pages[i]) - if info is not None: - info_kv = extract_kv_pairs(info) - kv = {**kv, **info_kv} - return kv diff --git a/resources/scenarios/test_framework/bip340_test_vectors.csv b/resources/scenarios/test_framework/bip340_test_vectors.csv index e068322de..aa317a3b3 100644 --- a/resources/scenarios/test_framework/bip340_test_vectors.csv +++ b/resources/scenarios/test_framework/bip340_test_vectors.csv @@ -14,3 +14,7 @@ index,secret key,public key,aux_rand,message,signature,verification result,comme 12,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F69E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,sig[0:32] is equal to field size 13,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E177769FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141,FALSE,sig[32:64] is equal to curve order 14,,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,public key is not a valid X coordinate because it exceeds the field size +15,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,,71535DB165ECD9FBBC046E5FFAEA61186BB6AD436732FCCC25291A55895464CF6069CE26BF03466228F19A3A62DB8A649F2D560FAC652827D1AF0574E427AB63,TRUE,message of size 0 (added 2022-12) +16,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,11,08A20A0AFEF64124649232E0693C583AB1B9934AE63B4C3511F3AE1134C6A303EA3173BFEA6683BD101FA5AA5DBC1996FE7CACFC5A577D33EC14564CEC2BACBF,TRUE,message of size 1 (added 2022-12) +17,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,0102030405060708090A0B0C0D0E0F1011,5130F39A4059B43BC7CAC09A19ECE52B5D8699D1A71E3C52DA9AFDB6B50AC370C4A482B77BF960F8681540E25B6771ECE1E5A37FD80E5A51897C5566A97EA5A5,TRUE,message of size 17 (added 2022-12) +18,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999,403B12B0D8555A344175EA7EC746566303321E5DBFA8BE6F091635163ECA79A8585ED3E3170807E7C03B720FC54C7B23897FCBA0E9D0B4A06894CFD249F22367,TRUE,message of size 100 (added 2022-12) diff --git a/resources/scenarios/test_framework/blockfilter.py b/resources/scenarios/test_framework/blockfilter.py index a30e37ea5..a16aa3d34 100644 --- a/resources/scenarios/test_framework/blockfilter.py +++ b/resources/scenarios/test_framework/blockfilter.py @@ -4,7 +4,7 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Helper routines relevant for compact block filters (BIP158). """ -from .siphash import siphash +from .crypto.siphash import siphash def bip158_basic_element_hash(script_pub_key, N, block_hash): @@ -29,7 +29,7 @@ def bip158_basic_element_hash(script_pub_key, N, block_hash): def bip158_relevant_scriptpubkeys(node, block_hash): - """ Determines the basic filter relvant scriptPubKeys as defined in BIP158: + """ Determines the basic filter relevant scriptPubKeys as defined in BIP158: 'A basic filter MUST contain exactly the following items for each transaction in a block: - The previous output script (the script being spent) for each input, except for diff --git a/resources/scenarios/test_framework/blocktools.py b/resources/scenarios/test_framework/blocktools.py index cfd923bab..eb1d3b054 100644 --- a/resources/scenarios/test_framework/blocktools.py +++ b/resources/scenarios/test_framework/blocktools.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2015-2022 The Bitcoin Core developers +# Copyright (c) 2015-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for manipulating blocks and transactions.""" @@ -27,13 +27,15 @@ hash256, ser_uint256, tx_from_hex, - uint256_from_str, + uint256_from_compact, + WITNESS_SCALE_FACTOR, + MAX_SEQUENCE_NONFINAL, ) from .script import ( CScript, CScriptNum, CScriptOp, - OP_1, + OP_0, OP_RETURN, OP_TRUE, ) @@ -45,9 +47,10 @@ ) from .util import assert_equal -WITNESS_SCALE_FACTOR = 4 MAX_BLOCK_SIGOPS = 20000 MAX_BLOCK_SIGOPS_WEIGHT = MAX_BLOCK_SIGOPS * WITNESS_SCALE_FACTOR +MAX_STANDARD_TX_SIGOPS = 4000 +MAX_STANDARD_TX_WEIGHT = 400000 # Genesis block time (regtest) TIME_GENESIS_BLOCK = 1296688602 @@ -64,6 +67,28 @@ VERSIONBITS_LAST_OLD_BLOCK_VERSION = 4 MIN_BLOCKS_TO_KEEP = 288 +REGTEST_RETARGET_PERIOD = 150 + +REGTEST_N_BITS = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams" +REGTEST_TARGET = 0x7fffff0000000000000000000000000000000000000000000000000000000000 +assert_equal(uint256_from_compact(REGTEST_N_BITS), REGTEST_TARGET) + +DIFF_1_N_BITS = 0x1d00ffff +DIFF_1_TARGET = 0x00000000ffff0000000000000000000000000000000000000000000000000000 +assert_equal(uint256_from_compact(DIFF_1_N_BITS), DIFF_1_TARGET) + +DIFF_4_N_BITS = 0x1c3fffc0 +DIFF_4_TARGET = int(DIFF_1_TARGET / 4) +assert_equal(uint256_from_compact(DIFF_4_N_BITS), DIFF_4_TARGET) + +# From BIP325 +SIGNET_HEADER = b"\xec\xc7\xda\xa2" + +def nbits_str(nbits): + return f"{nbits:08x}" + +def target_str(target): + return f"{target:064x}" def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl=None, txlist=None): """Create a block (with regtest difficulty).""" @@ -73,25 +98,24 @@ def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl block.nVersion = version or tmpl.get('version') or VERSIONBITS_LAST_OLD_BLOCK_VERSION block.nTime = ntime or tmpl.get('curtime') or int(time.time() + 600) block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10) - if tmpl and not tmpl.get('bits') is None: + if tmpl and tmpl.get('bits') is not None: block.nBits = struct.unpack('>I', bytes.fromhex(tmpl['bits']))[0] else: - block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams + block.nBits = REGTEST_N_BITS if coinbase is None: coinbase = create_coinbase(height=tmpl['height']) block.vtx.append(coinbase) if txlist: for tx in txlist: - if not hasattr(tx, 'calc_sha256'): + if type(tx) is str: tx = tx_from_hex(tx) block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() - block.calc_sha256() return block def get_witness_script(witness_root, witness_nonce): - witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce))) - output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment) + witness_commitment = hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)) + output_data = WITNESS_COMMITMENT_HEADER + witness_commitment return CScript([OP_RETURN, output_data]) def add_witness_commitment(block, nonce=0): @@ -109,20 +133,18 @@ def add_witness_commitment(block, nonce=0): # witness commitment is the last OP_RETURN output in coinbase block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce))) - block.vtx[0].rehash() block.hashMerkleRoot = block.calc_merkle_root() - block.rehash() def script_BIP34_coinbase_height(height): if height <= 16: res = CScriptOp.encode_op_n(height) - # Append dummy to increase scriptSig size above 2 (see bad-cb-length consensus rule) - return CScript([res, OP_1]) + # Append dummy to increase scriptSig size to 2 (see bad-cb-length consensus rule) + return CScript([res, OP_0]) return CScript([CScriptNum(height)]) -def create_coinbase(height, pubkey=None, *, script_pubkey=None, extra_output_script=None, fees=0, nValue=50): +def create_coinbase(height, pubkey=None, *, script_pubkey=None, extra_output_script=None, fees=0, nValue=50, halving_period=REGTEST_RETARGET_PERIOD): """Create a coinbase transaction. If pubkey is passed in, the coinbase output will be a P2PK output; @@ -131,11 +153,12 @@ def create_coinbase(height, pubkey=None, *, script_pubkey=None, extra_output_scr If extra_output_script is given, make a 0-value output to that script. This is useful to pad block weight/sigops as needed. """ coinbase = CTransaction() - coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), SEQUENCE_FINAL)) + coinbase.nLockTime = height - 1 + coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), MAX_SEQUENCE_NONFINAL)) coinbaseoutput = CTxOut() coinbaseoutput.nValue = nValue * COIN if nValue == 50: - halvings = int(height / 150) # regtest + halvings = int(height / halving_period) coinbaseoutput.nValue >>= halvings coinbaseoutput.nValue += fees if pubkey is not None: @@ -150,20 +173,20 @@ def create_coinbase(height, pubkey=None, *, script_pubkey=None, extra_output_scr coinbaseoutput2.nValue = 0 coinbaseoutput2.scriptPubKey = extra_output_script coinbase.vout.append(coinbaseoutput2) - coinbase.calc_sha256() return coinbase -def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript()): +def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, output_script=None): """Return one-input, one-output transaction object spending the prevtx's n-th output with the given amount. Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output. """ + if output_script is None: + output_script = CScript() tx = CTransaction() assert n < len(prevtx.vout) - tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, SEQUENCE_FINAL)) - tx.vout.append(CTxOut(amount, script_pub_key)) - tx.calc_sha256() + tx.vin.append(CTxIn(COutPoint(prevtx.txid_int, n), script_sig, SEQUENCE_FINAL)) + tx.vout.append(CTxOut(amount, output_script)) return tx def get_legacy_sigopcount_block(block, accurate=True): diff --git a/resources/scenarios/test_framework/compressor.py b/resources/scenarios/test_framework/compressor.py new file mode 100644 index 000000000..1c30d749d --- /dev/null +++ b/resources/scenarios/test_framework/compressor.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Routines for compressing transaction output amounts and scripts.""" +import unittest + +from .messages import COIN + + +def compress_amount(n): + if n == 0: + return 0 + e = 0 + while ((n % 10) == 0) and (e < 9): + n //= 10 + e += 1 + if e < 9: + d = n % 10 + assert (d >= 1 and d <= 9) + n //= 10 + return 1 + (n*9 + d - 1)*10 + e + else: + return 1 + (n - 1)*10 + 9 + + +def decompress_amount(x): + if x == 0: + return 0 + x -= 1 + e = x % 10 + x //= 10 + n = 0 + if e < 9: + d = (x % 9) + 1 + x //= 9 + n = x * 10 + d + else: + n = x + 1 + while e > 0: + n *= 10 + e -= 1 + return n + + +class TestFrameworkCompressor(unittest.TestCase): + def test_amount_compress_decompress(self): + def check_amount(amount, expected_compressed): + self.assertEqual(compress_amount(amount), expected_compressed) + self.assertEqual(decompress_amount(expected_compressed), amount) + + # test cases from compress_tests.cpp:compress_amounts + check_amount(0, 0x0) + check_amount(1, 0x1) + check_amount(1000000, 0x7) + check_amount(COIN, 0x9) + check_amount(50*COIN, 0x32) + check_amount(21000000*COIN, 0x1406f40) diff --git a/resources/scenarios/test_framework/crypto/bip324_cipher.py b/resources/scenarios/test_framework/crypto/bip324_cipher.py new file mode 100644 index 000000000..c9f0fa015 --- /dev/null +++ b/resources/scenarios/test_framework/crypto/bip324_cipher.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Test-only implementation of ChaCha20 Poly1305 AEAD Construction in RFC 8439 and FSChaCha20Poly1305 for BIP 324 + +It is designed for ease of understanding, not performance. + +WARNING: This code is slow and trivially vulnerable to side channel attacks. Do not use for +anything but tests. +""" + +import unittest + +from .chacha20 import chacha20_block, REKEY_INTERVAL +from .poly1305 import Poly1305 + + +def pad16(x): + if len(x) % 16 == 0: + return b'' + return b'\x00' * (16 - (len(x) % 16)) + + +def aead_chacha20_poly1305_encrypt(key, nonce, aad, plaintext): + """Encrypt a plaintext using ChaCha20Poly1305.""" + if plaintext is None: + return None + ret = bytearray() + msg_len = len(plaintext) + for i in range((msg_len + 63) // 64): + now = min(64, msg_len - 64 * i) + keystream = chacha20_block(key, nonce, i + 1) + for j in range(now): + ret.append(plaintext[j + 64 * i] ^ keystream[j]) + poly1305 = Poly1305(chacha20_block(key, nonce, 0)[:32]) + mac_data = aad + pad16(aad) + mac_data += ret + pad16(ret) + mac_data += len(aad).to_bytes(8, 'little') + msg_len.to_bytes(8, 'little') + ret += poly1305.tag(mac_data) + return bytes(ret) + + +def aead_chacha20_poly1305_decrypt(key, nonce, aad, ciphertext): + """Decrypt a ChaCha20Poly1305 ciphertext.""" + if ciphertext is None or len(ciphertext) < 16: + return None + msg_len = len(ciphertext) - 16 + poly1305 = Poly1305(chacha20_block(key, nonce, 0)[:32]) + mac_data = aad + pad16(aad) + mac_data += ciphertext[:-16] + pad16(ciphertext[:-16]) + mac_data += len(aad).to_bytes(8, 'little') + msg_len.to_bytes(8, 'little') + if ciphertext[-16:] != poly1305.tag(mac_data): + return None + ret = bytearray() + for i in range((msg_len + 63) // 64): + now = min(64, msg_len - 64 * i) + keystream = chacha20_block(key, nonce, i + 1) + for j in range(now): + ret.append(ciphertext[j + 64 * i] ^ keystream[j]) + return bytes(ret) + + +class FSChaCha20Poly1305: + """Rekeying wrapper AEAD around ChaCha20Poly1305.""" + def __init__(self, initial_key): + self._key = initial_key + self._packet_counter = 0 + + def _crypt(self, aad, text, is_decrypt): + nonce = ((self._packet_counter % REKEY_INTERVAL).to_bytes(4, 'little') + + (self._packet_counter // REKEY_INTERVAL).to_bytes(8, 'little')) + if is_decrypt: + ret = aead_chacha20_poly1305_decrypt(self._key, nonce, aad, text) + else: + ret = aead_chacha20_poly1305_encrypt(self._key, nonce, aad, text) + if (self._packet_counter + 1) % REKEY_INTERVAL == 0: + rekey_nonce = b"\xFF\xFF\xFF\xFF" + nonce[4:] + self._key = aead_chacha20_poly1305_encrypt(self._key, rekey_nonce, b"", b"\x00" * 32)[:32] + self._packet_counter += 1 + return ret + + def decrypt(self, aad, ciphertext): + return self._crypt(aad, ciphertext, True) + + def encrypt(self, aad, plaintext): + return self._crypt(aad, plaintext, False) + + +# Test vectors from RFC8439 consisting of plaintext, aad, 32 byte key, 12 byte nonce and ciphertext +AEAD_TESTS = [ + # RFC 8439 Example from section 2.8.2 + ["4c616469657320616e642047656e746c656d656e206f662074686520636c6173" + "73206f66202739393a204966204920636f756c64206f6666657220796f75206f" + "6e6c79206f6e652074697020666f7220746865206675747572652c2073756e73" + "637265656e20776f756c642062652069742e", + "50515253c0c1c2c3c4c5c6c7", + "808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9f", + [7, 0x4746454443424140], + "d31a8d34648e60db7b86afbc53ef7ec2a4aded51296e08fea9e2b5a736ee62d6" + "3dbea45e8ca9671282fafb69da92728b1a71de0a9e060b2905d6a5b67ecd3b36" + "92ddbd7f2d778b8c9803aee328091b58fab324e4fad675945585808b4831d7bc" + "3ff4def08e4b7a9de576d26586cec64b61161ae10b594f09e26a7e902ecbd060" + "0691"], + # RFC 8439 Test vector A.5 + ["496e7465726e65742d4472616674732061726520647261667420646f63756d65" + "6e74732076616c696420666f722061206d6178696d756d206f6620736978206d" + "6f6e74687320616e64206d617920626520757064617465642c207265706c6163" + "65642c206f72206f62736f6c65746564206279206f7468657220646f63756d65" + "6e747320617420616e792074696d652e20497420697320696e617070726f7072" + "6961746520746f2075736520496e7465726e65742d4472616674732061732072" + "65666572656e6365206d6174657269616c206f7220746f206369746520746865" + "6d206f74686572207468616e206173202fe2809c776f726b20696e2070726f67" + "726573732e2fe2809d", + "f33388860000000000004e91", + "1c9240a5eb55d38af333888604f6b5f0473917c1402b80099dca5cbc207075c0", + [0, 0x0807060504030201], + "64a0861575861af460f062c79be643bd5e805cfd345cf389f108670ac76c8cb2" + "4c6cfc18755d43eea09ee94e382d26b0bdb7b73c321b0100d4f03b7f355894cf" + "332f830e710b97ce98c8a84abd0b948114ad176e008d33bd60f982b1ff37c855" + "9797a06ef4f0ef61c186324e2b3506383606907b6a7c02b0f9f6157b53c867e4" + "b9166c767b804d46a59b5216cde7a4e99040c5a40433225ee282a1b0a06c523e" + "af4534d7f83fa1155b0047718cbc546a0d072b04b3564eea1b422273f548271a" + "0bb2316053fa76991955ebd63159434ecebb4e466dae5a1073a6727627097a10" + "49e617d91d361094fa68f0ff77987130305beaba2eda04df997b714d6c6f2c29" + "a6ad5cb4022b02709beead9d67890cbb22392336fea1851f38"], + # Test vectors exercising aad and plaintext which are multiples of 16 bytes. + ["8d2d6a8befd9716fab35819eaac83b33269afb9f1a00fddf66095a6c0cd91951" + "a6b7ad3db580be0674c3f0b55f618e34", + "", + "72ddc73f07101282bbbcf853b9012a9f9695fc5d36b303a97fd0845d0314e0c3", + [0x3432b75f, 0xb3585537eb7f4024], + "f760b8224fb2a317b1b07875092606131232a5b86ae142df5df1c846a7f6341a" + "f2564483dd77f836be45e6230808ffe402a6f0a3e8be074b3d1f4ea8a7b09451"], + ["", + "36970d8a704c065de16250c18033de5a400520ac1b5842b24551e5823a3314f3" + "946285171e04a81ebfbe3566e312e74ab80e94c7dd2ff4e10de0098a58d0f503", + "77adda51d6730b9ad6c995658cbd49f581b2547e7c0c08fcc24ceec797461021", + [0x1f90da88, 0x75dafa3ef84471a4], + "aaae5bb81e8407c94b2ae86ae0c7efbe"], +] + +FSAEAD_TESTS = [ + ["d6a4cb04ef0f7c09c1866ed29dc24d820e75b0491032a51b4c3366f9ca35c19e" + "a3047ec6be9d45f9637b63e1cf9eb4c2523a5aab7b851ebeba87199db0e839cf" + "0d5c25e50168306377aedbe9089fd2463ded88b83211cf51b73b150608cc7a60" + "0d0f11b9a742948482e1b109d8faf15b450aa7322e892fa2208c6691e3fecf4c" + "711191b14d75a72147", + "786cb9b6ebf44288974cf0", + "5c9e1c3951a74fba66708bf9d2c217571684556b6a6a3573bff2847d38612654", + 500, + "9dcebbd3281ea3dd8e9a1ef7d55a97abd6743e56ebc0c190cb2c4e14160b385e" + "0bf508dddf754bd02c7c208447c131ce23e47a4a14dfaf5dd8bc601323950f75" + "4e05d46e9232f83fc5120fbbef6f5347a826ec79a93820718d4ec7a2b7cfaaa4" + "4b21e16d726448b62f803811aff4f6d827ed78e738ce8a507b81a8ae13131192" + "8039213de18a5120dc9b7370baca878f50ff254418de3da50c"], + ["8349b7a2690b63d01204800c288ff1138a1d473c832c90ea8b3fc102d0bb3adc" + "44261b247c7c3d6760bfbe979d061c305f46d94c0582ac3099f0bf249f8cb234", + "", + "3bd2093fcbcb0d034d8c569583c5425c1a53171ea299f8cc3bbf9ae3530adfce", + 60000, + "30a6757ff8439b975363f166a0fa0e36722ab35936abd704297948f45083f4d4" + "99433137ce931f7fca28a0acd3bc30f57b550acbc21cbd45bbef0739d9caf30c" + "14b94829deb27f0b1923a2af704ae5d6"], +] + + +class TestFrameworkAEAD(unittest.TestCase): + def test_aead(self): + """ChaCha20Poly1305 AEAD test vectors.""" + for test_vector in AEAD_TESTS: + hex_plain, hex_aad, hex_key, hex_nonce, hex_cipher = test_vector + plain = bytes.fromhex(hex_plain) + aad = bytes.fromhex(hex_aad) + key = bytes.fromhex(hex_key) + nonce = hex_nonce[0].to_bytes(4, 'little') + hex_nonce[1].to_bytes(8, 'little') + + ciphertext = aead_chacha20_poly1305_encrypt(key, nonce, aad, plain) + self.assertEqual(hex_cipher, ciphertext.hex()) + plaintext = aead_chacha20_poly1305_decrypt(key, nonce, aad, ciphertext) + self.assertEqual(plain, plaintext) + + def test_fschacha20poly1305aead(self): + "FSChaCha20Poly1305 AEAD test vectors." + for test_vector in FSAEAD_TESTS: + hex_plain, hex_aad, hex_key, msg_idx, hex_cipher = test_vector + plain = bytes.fromhex(hex_plain) + aad = bytes.fromhex(hex_aad) + key = bytes.fromhex(hex_key) + + enc_aead = FSChaCha20Poly1305(key) + dec_aead = FSChaCha20Poly1305(key) + + for _ in range(msg_idx): + enc_aead.encrypt(b"", None) + ciphertext = enc_aead.encrypt(aad, plain) + self.assertEqual(hex_cipher, ciphertext.hex()) + + for _ in range(msg_idx): + dec_aead.decrypt(b"", None) + plaintext = dec_aead.decrypt(aad, ciphertext) + self.assertEqual(plain, plaintext) diff --git a/resources/scenarios/test_framework/crypto/chacha20.py b/resources/scenarios/test_framework/crypto/chacha20.py new file mode 100644 index 000000000..19b6698df --- /dev/null +++ b/resources/scenarios/test_framework/crypto/chacha20.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Test-only implementation of ChaCha20 cipher and FSChaCha20 for BIP 324 + +It is designed for ease of understanding, not performance. + +WARNING: This code is slow and trivially vulnerable to side channel attacks. Do not use for +anything but tests. +""" + +import unittest + +CHACHA20_INDICES = ( + (0, 4, 8, 12), (1, 5, 9, 13), (2, 6, 10, 14), (3, 7, 11, 15), + (0, 5, 10, 15), (1, 6, 11, 12), (2, 7, 8, 13), (3, 4, 9, 14) +) + +CHACHA20_CONSTANTS = (0x61707865, 0x3320646e, 0x79622d32, 0x6b206574) +REKEY_INTERVAL = 224 # packets + + +def rotl32(v, bits): + """Rotate the 32-bit value v left by bits bits.""" + bits %= 32 # Make sure the term below does not throw an exception + return ((v << bits) & 0xffffffff) | (v >> (32 - bits)) + + +def chacha20_doubleround(s): + """Apply a ChaCha20 double round to 16-element state array s. + See https://cr.yp.to/chacha/chacha-20080128.pdf and https://tools.ietf.org/html/rfc8439 + """ + for a, b, c, d in CHACHA20_INDICES: + s[a] = (s[a] + s[b]) & 0xffffffff + s[d] = rotl32(s[d] ^ s[a], 16) + s[c] = (s[c] + s[d]) & 0xffffffff + s[b] = rotl32(s[b] ^ s[c], 12) + s[a] = (s[a] + s[b]) & 0xffffffff + s[d] = rotl32(s[d] ^ s[a], 8) + s[c] = (s[c] + s[d]) & 0xffffffff + s[b] = rotl32(s[b] ^ s[c], 7) + + +def chacha20_block(key, nonce, cnt): + """Compute the 64-byte output of the ChaCha20 block function. + Takes as input a 32-byte key, 12-byte nonce, and 32-bit integer counter. + """ + # Initial state. + init = [0] * 16 + init[:4] = CHACHA20_CONSTANTS[:4] + init[4:12] = [int.from_bytes(key[i:i+4], 'little') for i in range(0, 32, 4)] + init[12] = cnt + init[13:16] = [int.from_bytes(nonce[i:i+4], 'little') for i in range(0, 12, 4)] + # Perform 20 rounds. + state = list(init) + for _ in range(10): + chacha20_doubleround(state) + # Add initial values back into state. + for i in range(16): + state[i] = (state[i] + init[i]) & 0xffffffff + # Produce byte output + return b''.join(state[i].to_bytes(4, 'little') for i in range(16)) + +class FSChaCha20: + """Rekeying wrapper stream cipher around ChaCha20.""" + def __init__(self, initial_key, rekey_interval=REKEY_INTERVAL): + self._key = initial_key + self._rekey_interval = rekey_interval + self._block_counter = 0 + self._chunk_counter = 0 + self._keystream = b'' + + def _get_keystream_bytes(self, nbytes): + while len(self._keystream) < nbytes: + nonce = ((0).to_bytes(4, 'little') + (self._chunk_counter // self._rekey_interval).to_bytes(8, 'little')) + self._keystream += chacha20_block(self._key, nonce, self._block_counter) + self._block_counter += 1 + ret = self._keystream[:nbytes] + self._keystream = self._keystream[nbytes:] + return ret + + def crypt(self, chunk): + ks = self._get_keystream_bytes(len(chunk)) + ret = bytes([ks[i] ^ chunk[i] for i in range(len(chunk))]) + if ((self._chunk_counter + 1) % self._rekey_interval) == 0: + self._key = self._get_keystream_bytes(32) + self._block_counter = 0 + self._keystream = b'' + self._chunk_counter += 1 + return ret + + +# Test vectors from RFC7539/8439 consisting of 32 byte key, 12 byte nonce, block counter +# and 64 byte output after applying `chacha20_block` function +CHACHA20_TESTS = [ + ["000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", [0x09000000, 0x4a000000], 1, + "10f1e7e4d13b5915500fdd1fa32071c4c7d1f4c733c068030422aa9ac3d46c4e" + "d2826446079faa0914c2d705d98b02a2b5129cd1de164eb9cbd083e8a2503c4e"], + ["0000000000000000000000000000000000000000000000000000000000000000", [0, 0], 0, + "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7" + "da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586"], + ["0000000000000000000000000000000000000000000000000000000000000000", [0, 0], 1, + "9f07e7be5551387a98ba977c732d080dcb0f29a048e3656912c6533e32ee7aed" + "29b721769ce64e43d57133b074d839d531ed1f28510afb45ace10a1f4b794d6f"], + ["0000000000000000000000000000000000000000000000000000000000000001", [0, 0], 1, + "3aeb5224ecf849929b9d828db1ced4dd832025e8018b8160b82284f3c949aa5a" + "8eca00bbb4a73bdad192b5c42f73f2fd4e273644c8b36125a64addeb006c13a0"], + ["00ff000000000000000000000000000000000000000000000000000000000000", [0, 0], 2, + "72d54dfbf12ec44b362692df94137f328fea8da73990265ec1bbbea1ae9af0ca" + "13b25aa26cb4a648cb9b9d1be65b2c0924a66c54d545ec1b7374f4872e99f096"], + ["0000000000000000000000000000000000000000000000000000000000000000", [0, 0x200000000000000], 0, + "c2c64d378cd536374ae204b9ef933fcd1a8b2288b3dfa49672ab765b54ee27c7" + "8a970e0e955c14f3a88e741b97c286f75f8fc299e8148362fa198a39531bed6d"], + ["000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", [0, 0x4a000000], 1, + "224f51f3401bd9e12fde276fb8631ded8c131f823d2c06e27e4fcaec9ef3cf78" + "8a3b0aa372600a92b57974cded2b9334794cba40c63e34cdea212c4cf07d41b7"], + ["0000000000000000000000000000000000000000000000000000000000000001", [0, 0], 0, + "4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952ed432d41" + "bbe2a0b6ea7566d2a5d1e7e20d42af2c53d792b1c43fea817e9ad275ae546963"], + ["0000000000000000000000000000000000000000000000000000000000000000", [0, 1], 0, + "ef3fdfd6c61578fbf5cf35bd3dd33b8009631634d21e42ac33960bd138e50d32" + "111e4caf237ee53ca8ad6426194a88545ddc497a0b466e7d6bbdb0041b2f586b"], + ["000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", [0, 0x0706050403020100], 0, + "f798a189f195e66982105ffb640bb7757f579da31602fc93ec01ac56f85ac3c1" + "34a4547b733b46413042c9440049176905d3be59ea1c53f15916155c2be8241a"], +] + +FSCHACHA20_TESTS = [ + ["000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", + "0000000000000000000000000000000000000000000000000000000000000000", 256, + "a93df4ef03011f3db95f60d996e1785df5de38fc39bfcb663a47bb5561928349"], + ["01", "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", 5, "ea"], + ["e93fdb5c762804b9a706816aca31e35b11d2aa3080108ef46a5b1f1508819c0a", + "8ec4c3ccdaea336bdeb245636970be01266509b33f3d2642504eaf412206207a", 4096, + "8bfaa4eacff308fdb4a94a5ff25bd9d0c1f84b77f81239f67ff39d6e1ac280c9"], +] + + +class TestFrameworkChacha(unittest.TestCase): + def test_chacha20(self): + """ChaCha20 test vectors.""" + for test_vector in CHACHA20_TESTS: + hex_key, nonce, counter, hex_output = test_vector + key = bytes.fromhex(hex_key) + nonce_bytes = nonce[0].to_bytes(4, 'little') + nonce[1].to_bytes(8, 'little') + keystream = chacha20_block(key, nonce_bytes, counter) + self.assertEqual(hex_output, keystream.hex()) + + def test_fschacha20(self): + """FSChaCha20 test vectors.""" + for test_vector in FSCHACHA20_TESTS: + hex_plaintext, hex_key, rekey_interval, hex_ciphertext_after_rotation = test_vector + plaintext = bytes.fromhex(hex_plaintext) + key = bytes.fromhex(hex_key) + fsc20 = FSChaCha20(key, rekey_interval) + for _ in range(rekey_interval): + fsc20.crypt(plaintext) + + ciphertext = fsc20.crypt(plaintext) + self.assertEqual(hex_ciphertext_after_rotation, ciphertext.hex()) diff --git a/resources/scenarios/test_framework/ellswift.py b/resources/scenarios/test_framework/crypto/ellswift.py similarity index 99% rename from resources/scenarios/test_framework/ellswift.py rename to resources/scenarios/test_framework/crypto/ellswift.py index 97b10118e..429b7b9f4 100644 --- a/resources/scenarios/test_framework/ellswift.py +++ b/resources/scenarios/test_framework/crypto/ellswift.py @@ -12,7 +12,7 @@ import random import unittest -from test_framework.secp256k1 import FE, G, GE +from test_framework.crypto.secp256k1 import FE, G, GE # Precomputed constant square root of -3 (mod p). MINUS_3_SQRT = FE(-3).sqrt() diff --git a/resources/scenarios/test_framework/ellswift_decode_test_vectors.csv b/resources/scenarios/test_framework/crypto/ellswift_decode_test_vectors.csv similarity index 100% rename from resources/scenarios/test_framework/ellswift_decode_test_vectors.csv rename to resources/scenarios/test_framework/crypto/ellswift_decode_test_vectors.csv diff --git a/resources/scenarios/test_framework/crypto/hkdf.py b/resources/scenarios/test_framework/crypto/hkdf.py new file mode 100644 index 000000000..7e8958733 --- /dev/null +++ b/resources/scenarios/test_framework/crypto/hkdf.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +# Copyright (c) 2023 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Test-only HKDF-SHA256 implementation + +It is designed for ease of understanding, not performance. + +WARNING: This code is slow and trivially vulnerable to side channel attacks. Do not use for +anything but tests. +""" + +import hashlib +import hmac + + +def hmac_sha256(key, data): + """Compute HMAC-SHA256 from specified byte arrays key and data.""" + return hmac.new(key, data, hashlib.sha256).digest() + + +def hkdf_sha256(length, ikm, salt, info): + """Derive a key using HKDF-SHA256.""" + if len(salt) == 0: + salt = bytes([0] * 32) + prk = hmac_sha256(salt, ikm) + t = b"" + okm = b"" + for i in range((length + 32 - 1) // 32): + t = hmac_sha256(prk, t + info + bytes([i + 1])) + okm += t + return okm[:length] diff --git a/resources/scenarios/test_framework/crypto/muhash.py b/resources/scenarios/test_framework/crypto/muhash.py new file mode 100644 index 000000000..09241f620 --- /dev/null +++ b/resources/scenarios/test_framework/crypto/muhash.py @@ -0,0 +1,55 @@ +# Copyright (c) 2020 Pieter Wuille +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Native Python MuHash3072 implementation.""" + +import hashlib +import unittest + +from .chacha20 import chacha20_block + +def data_to_num3072(data): + """Hash a 32-byte array data to a 3072-bit number using 6 Chacha20 operations.""" + bytes384 = b"" + for counter in range(6): + bytes384 += chacha20_block(data, bytes(12), counter) + return int.from_bytes(bytes384, 'little') + +class MuHash3072: + """Class representing the MuHash3072 computation of a set. + + See https://cseweb.ucsd.edu/~mihir/papers/inchash.pdf and https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2017-May/014337.html + """ + + MODULUS = 2**3072 - 1103717 + + def __init__(self): + """Initialize for an empty set.""" + self.numerator = 1 + self.denominator = 1 + + def insert(self, data): + """Insert a byte array data in the set.""" + data_hash = hashlib.sha256(data).digest() + self.numerator = (self.numerator * data_to_num3072(data_hash)) % self.MODULUS + + def remove(self, data): + """Remove a byte array from the set.""" + data_hash = hashlib.sha256(data).digest() + self.denominator = (self.denominator * data_to_num3072(data_hash)) % self.MODULUS + + def digest(self): + """Extract the final hash. Does not modify this object.""" + val = (self.numerator * pow(self.denominator, -1, self.MODULUS)) % self.MODULUS + bytes384 = val.to_bytes(384, 'little') + return hashlib.sha256(bytes384).digest() + +class TestFrameworkMuhash(unittest.TestCase): + def test_muhash(self): + muhash = MuHash3072() + muhash.insert(b'\x00' * 32) + muhash.insert((b'\x01' + b'\x00' * 31)) + muhash.remove((b'\x02' + b'\x00' * 31)) + finalized = muhash.digest() + # This mirrors the result in the C++ MuHash3072 unit test + self.assertEqual(finalized[::-1].hex(), "10d312b100cbd32ada024a6646e40d3482fcff103668d2625f10002a607d5863") diff --git a/resources/scenarios/test_framework/crypto/poly1305.py b/resources/scenarios/test_framework/crypto/poly1305.py new file mode 100644 index 000000000..967b90254 --- /dev/null +++ b/resources/scenarios/test_framework/crypto/poly1305.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Test-only implementation of Poly1305 authenticator + +It is designed for ease of understanding, not performance. + +WARNING: This code is slow and trivially vulnerable to side channel attacks. Do not use for +anything but tests. +""" + +import unittest + + +class Poly1305: + """Class representing a running poly1305 computation.""" + MODULUS = 2**130 - 5 + + def __init__(self, key): + self.r = int.from_bytes(key[:16], 'little') & 0xffffffc0ffffffc0ffffffc0fffffff + self.s = int.from_bytes(key[16:], 'little') + + def tag(self, data): + """Compute the poly1305 tag.""" + acc, length = 0, len(data) + for i in range((length + 15) // 16): + chunk = data[i * 16:min(length, (i + 1) * 16)] + val = int.from_bytes(chunk, 'little') + 256**len(chunk) + acc = (self.r * (acc + val)) % Poly1305.MODULUS + return ((acc + self.s) & 0xffffffffffffffffffffffffffffffff).to_bytes(16, 'little') + + +# Test vectors from RFC7539/8439 consisting of message to be authenticated, 32 byte key and computed 16 byte tag +POLY1305_TESTS = [ + # RFC 7539, section 2.5.2. + ["43727970746f6772617068696320466f72756d2052657365617263682047726f7570", + "85d6be7857556d337f4452fe42d506a80103808afb0db2fd4abff6af4149f51b", + "a8061dc1305136c6c22b8baf0c0127a9"], + # RFC 7539, section A.3. + ["00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "000000000000000000000000000", + "0000000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000"], + ["416e79207375626d697373696f6e20746f20746865204945544620696e74656e6465642062792074686520436f6e747269627" + "5746f7220666f72207075626c69636174696f6e20617320616c6c206f722070617274206f6620616e204945544620496e7465" + "726e65742d4472616674206f722052464320616e6420616e792073746174656d656e74206d6164652077697468696e2074686" + "520636f6e74657874206f6620616e204945544620616374697669747920697320636f6e7369646572656420616e2022494554" + "4620436f6e747269627574696f6e222e20537563682073746174656d656e747320696e636c756465206f72616c20737461746" + "56d656e747320696e20494554462073657373696f6e732c2061732077656c6c206173207772697474656e20616e6420656c65" + "6374726f6e696320636f6d6d756e69636174696f6e73206d61646520617420616e792074696d65206f7220706c6163652c207" + "768696368206172652061646472657373656420746f", + "0000000000000000000000000000000036e5f6b5c5e06070f0efca96227a863e", + "36e5f6b5c5e06070f0efca96227a863e"], + ["416e79207375626d697373696f6e20746f20746865204945544620696e74656e6465642062792074686520436f6e747269627" + "5746f7220666f72207075626c69636174696f6e20617320616c6c206f722070617274206f6620616e204945544620496e7465" + "726e65742d4472616674206f722052464320616e6420616e792073746174656d656e74206d6164652077697468696e2074686" + "520636f6e74657874206f6620616e204945544620616374697669747920697320636f6e7369646572656420616e2022494554" + "4620436f6e747269627574696f6e222e20537563682073746174656d656e747320696e636c756465206f72616c20737461746" + "56d656e747320696e20494554462073657373696f6e732c2061732077656c6c206173207772697474656e20616e6420656c65" + "6374726f6e696320636f6d6d756e69636174696f6e73206d61646520617420616e792074696d65206f7220706c6163652c207" + "768696368206172652061646472657373656420746f", + "36e5f6b5c5e06070f0efca96227a863e00000000000000000000000000000000", + "f3477e7cd95417af89a6b8794c310cf0"], + ["2754776173206272696c6c69672c20616e642074686520736c6974687920746f7665730a446964206779726520616e6420676" + "96d626c6520696e2074686520776162653a0a416c6c206d696d737920776572652074686520626f726f676f7665732c0a416e" + "6420746865206d6f6d65207261746873206f757467726162652e", + "1c9240a5eb55d38af333888604f6b5f0473917c1402b80099dca5cbc207075c0", + "4541669a7eaaee61e708dc7cbcc5eb62"], + ["ffffffffffffffffffffffffffffffff", + "0200000000000000000000000000000000000000000000000000000000000000", + "03000000000000000000000000000000"], + ["02000000000000000000000000000000", + "02000000000000000000000000000000ffffffffffffffffffffffffffffffff", + "03000000000000000000000000000000"], + ["fffffffffffffffffffffffffffffffff0ffffffffffffffffffffffffffffff11000000000000000000000000000000", + "0100000000000000000000000000000000000000000000000000000000000000", + "05000000000000000000000000000000"], + ["fffffffffffffffffffffffffffffffffbfefefefefefefefefefefefefefefe01010101010101010101010101010101", + "0100000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000"], + ["fdffffffffffffffffffffffffffffff", + "0200000000000000000000000000000000000000000000000000000000000000", + "faffffffffffffffffffffffffffffff"], + ["e33594d7505e43b900000000000000003394d7505e4379cd01000000000000000000000000000000000000000000000001000000000000000000000000000000", + "0100000000000000040000000000000000000000000000000000000000000000", + "14000000000000005500000000000000"], + ["e33594d7505e43b900000000000000003394d7505e4379cd010000000000000000000000000000000000000000000000", + "0100000000000000040000000000000000000000000000000000000000000000", + "13000000000000000000000000000000"], +] + + +class TestFrameworkPoly1305(unittest.TestCase): + def test_poly1305(self): + """Poly1305 test vectors.""" + for test_vector in POLY1305_TESTS: + hex_message, hex_key, hex_tag = test_vector + message = bytes.fromhex(hex_message) + key = bytes.fromhex(hex_key) + tag = bytes.fromhex(hex_tag) + comp_tag = Poly1305(key).tag(message) + self.assertEqual(tag, comp_tag) diff --git a/resources/scenarios/test_framework/ripemd160.py b/resources/scenarios/test_framework/crypto/ripemd160.py similarity index 100% rename from resources/scenarios/test_framework/ripemd160.py rename to resources/scenarios/test_framework/crypto/ripemd160.py diff --git a/resources/scenarios/test_framework/secp256k1.py b/resources/scenarios/test_framework/crypto/secp256k1.py similarity index 96% rename from resources/scenarios/test_framework/secp256k1.py rename to resources/scenarios/test_framework/crypto/secp256k1.py index 2e9e419da..9d85d557a 100644 --- a/resources/scenarios/test_framework/secp256k1.py +++ b/resources/scenarios/test_framework/crypto/secp256k1.py @@ -15,6 +15,9 @@ * G: the secp256k1 generator point """ +import unittest +from hashlib import sha256 +from test_framework.util import assert_not_equal class FE: """Objects of this class represent elements of the field GF(2**256 - 2**32 - 977). @@ -38,7 +41,7 @@ def __init__(self, a=0, b=1): num = (num * b._den) % FE.SIZE else: den = (den * b) % FE.SIZE - assert den != 0 + assert_not_equal(den, 0) if num == 0: den = 1 self._num = num @@ -344,3 +347,9 @@ def mul(self, a): # Precomputed table with multiples of G for fast multiplication FAST_G = FastGEMul(G) + +class TestFrameworkSecp256k1(unittest.TestCase): + def test_H(self): + H = sha256(G.to_bytes_uncompressed()).digest() + assert GE.lift_x(FE.from_bytes(H)) is not None + self.assertEqual(H.hex(), "50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0") diff --git a/resources/scenarios/test_framework/siphash.py b/resources/scenarios/test_framework/crypto/siphash.py similarity index 100% rename from resources/scenarios/test_framework/siphash.py rename to resources/scenarios/test_framework/crypto/siphash.py diff --git a/resources/scenarios/test_framework/xswiftec_inv_test_vectors.csv b/resources/scenarios/test_framework/crypto/xswiftec_inv_test_vectors.csv similarity index 100% rename from resources/scenarios/test_framework/xswiftec_inv_test_vectors.csv rename to resources/scenarios/test_framework/crypto/xswiftec_inv_test_vectors.csv diff --git a/resources/scenarios/test_framework/key.py b/resources/scenarios/test_framework/key.py index 6c1892539..558dcbf23 100644 --- a/resources/scenarios/test_framework/key.py +++ b/resources/scenarios/test_framework/key.py @@ -13,7 +13,8 @@ import random import unittest -from test_framework import secp256k1 +from test_framework.crypto import secp256k1 +from test_framework.util import assert_not_equal, random_bitflip # Point with no known discrete log. H_POINT = "50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0" @@ -241,10 +242,9 @@ def verify_schnorr(key, sig, msg): - key is a 32-byte xonly pubkey (computed using compute_xonly_pubkey). - sig is a 64-byte Schnorr signature - - msg is a 32-byte message + - msg is a variable-length message """ assert len(key) == 32 - assert len(msg) == 32 assert len(sig) == 64 P = secp256k1.GE.from_bytes_xonly(key) @@ -271,7 +271,6 @@ def sign_schnorr(key, msg, aux=None, flip_p=False, flip_r=False): aux = bytes(32) assert len(key) == 32 - assert len(msg) == 32 assert len(aux) == 32 sec = int.from_bytes(key, 'big') @@ -282,7 +281,7 @@ def sign_schnorr(key, msg, aux=None, flip_p=False, flip_r=False): sec = ORDER - sec t = (sec ^ int.from_bytes(TaggedHash("BIP0340/aux", aux), 'big')).to_bytes(32, 'big') kp = int.from_bytes(TaggedHash("BIP0340/nonce", t + P.to_bytes_xonly() + msg), 'big') % ORDER - assert kp != 0 + assert_not_equal(kp, 0) R = kp * secp256k1.G k = kp if R.y.is_even() != flip_r else ORDER - kp e = int.from_bytes(TaggedHash("BIP0340/challenge", R.to_bytes_xonly() + P.to_bytes_xonly() + msg), 'big') % ORDER @@ -292,11 +291,6 @@ def sign_schnorr(key, msg, aux=None, flip_p=False, flip_r=False): class TestFrameworkKey(unittest.TestCase): def test_ecdsa_and_schnorr(self): """Test the Python ECDSA and Schnorr implementations.""" - def random_bitflip(sig): - sig = list(sig) - sig[random.randrange(len(sig))] ^= (1 << (random.randrange(8))) - return bytes(sig) - byte_arrays = [generate_privkey() for _ in range(3)] + [v.to_bytes(32, 'big') for v in [0, ORDER - 1, ORDER, 2**256 - 1]] keys = {} for privkey_bytes in byte_arrays: # build array of key/pubkey pairs diff --git a/resources/scenarios/test_framework/mempool_util.py b/resources/scenarios/test_framework/mempool_util.py new file mode 100644 index 000000000..3c4609c0b --- /dev/null +++ b/resources/scenarios/test_framework/mempool_util.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Helpful routines for mempool testing.""" +import random + +from .blocktools import ( + COINBASE_MATURITY, +) +from .messages import ( + COutPoint, + CTransaction, + CTxIn, + CTxInWitness, + CTxOut, +) +from .script import ( + CScript, + OP_RETURN, +) +from .util import ( + assert_equal, + assert_greater_than, + create_lots_of_big_transactions, + gen_return_txouts, +) +from .wallet import ( + MiniWallet, +) + +# Default for -minrelaytxfee in sat/kvB +DEFAULT_MIN_RELAY_TX_FEE = 100 +# Default for -incrementalrelayfee in sat/kvB +DEFAULT_INCREMENTAL_RELAY_FEE = 100 + +TRUC_MAX_VSIZE = 10000 +TRUC_CHILD_MAX_VSIZE = 1000 + +def assert_mempool_contents(test_framework, node, expected=None, sync=True): + """Assert that all transactions in expected are in the mempool, + and no additional ones exist. 'expected' is an array of + CTransaction objects + """ + if sync: + test_framework.sync_mempools() + if not expected: + expected = [] + assert_equal(len(expected), len(set(expected))) + mempool = node.getrawmempool(verbose=False) + assert_equal(len(mempool), len(expected)) + for tx in expected: + assert tx.txid_hex in mempool + + +def fill_mempool(test_framework, node, *, tx_sync_fun=None): + """Fill mempool until eviction. + + Allows for simpler testing of scenarios with floating mempoolminfee > minrelay + Requires -maxmempool=5. + To avoid unintentional tx dependencies, the mempool filling txs are created with a + tagged ephemeral miniwallet instance. + """ + test_framework.log.info("Fill the mempool until eviction is triggered and the mempoolminfee rises") + txouts = gen_return_txouts() + minrelayfee = node.getnetworkinfo()['relayfee'] + + tx_batch_size = 1 + num_of_batches = 75 + # Generate UTXOs to flood the mempool + # 1 to create a tx initially that will be evicted from the mempool later + # 75 transactions each with a fee rate higher than the previous one + ephemeral_miniwallet = MiniWallet(node, tag_name="fill_mempool_ephemeral_wallet") + test_framework.generate(ephemeral_miniwallet, 1 + num_of_batches * tx_batch_size) + + # Mine enough blocks so that the UTXOs are allowed to be spent + test_framework.generate(node, COINBASE_MATURITY - 1) + + # Get all UTXOs up front to ensure none of the transactions spend from each other, as that may + # change their effective feerate and thus the order in which they are selected for eviction. + confirmed_utxos = [ephemeral_miniwallet.get_utxo(confirmed_only=True) for _ in range(num_of_batches * tx_batch_size + 1)] + assert_equal(len(confirmed_utxos), num_of_batches * tx_batch_size + 1) + + test_framework.log.debug("Create a mempool tx that will be evicted") + tx_to_be_evicted_id = ephemeral_miniwallet.send_self_transfer( + from_node=node, utxo_to_spend=confirmed_utxos.pop(0), fee_rate=minrelayfee)["txid"] + + def send_batch(fee): + utxos = confirmed_utxos[:tx_batch_size] + create_lots_of_big_transactions(ephemeral_miniwallet, node, fee, tx_batch_size, txouts, utxos) + del confirmed_utxos[:tx_batch_size] + + # Increase the tx fee rate to give the subsequent transactions a higher priority in the mempool + # The tx has an approx. vsize of 65k, i.e. multiplying the previous fee rate (in sats/kvB) + # by 130 should result in a fee that corresponds to 2x of that fee rate + base_fee = minrelayfee * 130 + batch_fees = [(i + 1) * base_fee for i in range(num_of_batches)] + + test_framework.log.debug("Fill up the mempool with txs with higher fee rate") + for fee in batch_fees[:-3]: + send_batch(fee) + tx_sync_fun() if tx_sync_fun else test_framework.sync_mempools() # sync before any eviction + assert_equal(node.getmempoolinfo()["mempoolminfee"], minrelayfee) + for fee in batch_fees[-3:]: + send_batch(fee) + tx_sync_fun() if tx_sync_fun else test_framework.sync_mempools() # sync after all evictions + + test_framework.log.debug("The tx should be evicted by now") + # The number of transactions created should be greater than the ones present in the mempool + assert_greater_than(tx_batch_size * num_of_batches, len(node.getrawmempool())) + # Initial tx created should not be present in the mempool anymore as it had a lower fee rate + assert tx_to_be_evicted_id not in node.getrawmempool() + + test_framework.log.debug("Check that mempoolminfee is larger than minrelaytxfee") + assert_equal(node.getmempoolinfo()['minrelaytxfee'], minrelayfee) + assert_greater_than(node.getmempoolinfo()['mempoolminfee'], minrelayfee) + +def tx_in_orphanage(node, tx: CTransaction) -> bool: + """Returns true if the transaction is in the orphanage.""" + found = [o for o in node.getorphantxs(verbosity=1) if o["txid"] == tx.txid_hex and o["wtxid"] == tx.wtxid_hex] + return len(found) == 1 + +def create_large_orphan(): + """Create huge orphan transaction""" + tx = CTransaction() + # Nonexistent UTXO + tx.vin = [CTxIn(COutPoint(random.randrange(1 << 256), random.randrange(1, 100)))] + tx.wit.vtxinwit = [CTxInWitness()] + tx.wit.vtxinwit[0].scriptWitness.stack = [CScript(b'X' * 390000)] + tx.vout = [CTxOut(100, CScript([OP_RETURN, b'a' * 20]))] + return tx diff --git a/resources/scenarios/test_framework/messages.py b/resources/scenarios/test_framework/messages.py index 8f3aea878..ebb306a82 100755 --- a/resources/scenarios/test_framework/messages.py +++ b/resources/scenarios/test_framework/messages.py @@ -25,15 +25,16 @@ import math import random import socket -import struct import time import unittest -from test_framework.siphash import siphash256 +from test_framework.crypto.siphash import siphash256 from test_framework.util import assert_equal MAX_LOCATOR_SZ = 101 MAX_BLOCK_WEIGHT = 4000000 +DEFAULT_BLOCK_RESERVED_WEIGHT = 8000 +MINIMUM_BLOCK_RESERVED_WEIGHT = 2000 MAX_BLOOM_FILTER_SIZE = 36000 MAX_BLOOM_HASH_FUNCS = 50 @@ -41,12 +42,14 @@ MAX_MONEY = 21000000 * COIN MAX_BIP125_RBF_SEQUENCE = 0xfffffffd # Sequence number that is rbf-opt-in (BIP 125) and csv-opt-out (BIP 68) +MAX_SEQUENCE_NONFINAL = 0xfffffffe # Sequence number that is csv-opt-out (BIP 68) SEQUENCE_FINAL = 0xffffffff # Sequence number that disables nLockTime if set for every input of a tx MAX_PROTOCOL_MESSAGE_LENGTH = 4000000 # Maximum length of incoming protocol messages MAX_HEADERS_RESULTS = 2000 # Number of headers sent in one getheaders result MAX_INV_SIZE = 50000 # Maximum number of entries in an 'inv' protocol message +NODE_NONE = 0 NODE_NETWORK = (1 << 0) NODE_BLOOM = (1 << 2) NODE_WITNESS = (1 << 3) @@ -70,11 +73,23 @@ DEFAULT_ANCESTOR_LIMIT = 25 # default max number of in-mempool ancestors DEFAULT_DESCENDANT_LIMIT = 25 # default max number of in-mempool descendants -# Default setting for -datacarriersize. 80 bytes of data, +1 for OP_RETURN, +2 for the pushdata opcodes. -MAX_OP_RETURN_RELAY = 83 + +# Default setting for -datacarriersize. +MAX_OP_RETURN_RELAY = 100_000 + DEFAULT_MEMPOOL_EXPIRY_HOURS = 336 # hours +TX_MIN_STANDARD_VERSION = 1 +TX_MAX_STANDARD_VERSION = 3 + +MAGIC_BYTES = { + "mainnet": b"\xf9\xbe\xb4\xd9", + "testnet4": b"\x1c\x16\x3f\x28", + "regtest": b"\xfa\xbf\xb5\xda", + "signet": b"\x0a\x03\xcf\x40", +} + def sha256(s): return hashlib.sha256(s).digest() @@ -90,27 +105,47 @@ def hash256(s): def ser_compact_size(l): r = b"" if l < 253: - r = struct.pack("B", l) + r = l.to_bytes(1, "little") elif l < 0x10000: - r = struct.pack(" 0 else 0x00)]) + r + if l <= 0x7f: + return r + l = (l >> 7) - 1 + + +def deser_varint(f): + n = 0 + while True: + dat = f.read(1)[0] + n = (n << 7) | (dat & 0x7f) + if (dat & 0x80) > 0: + n += 1 + else: + return n + + def deser_string(f): nit = deser_compact_size(f) return f.read(nit) @@ -198,6 +233,11 @@ def ser_string_vector(l): return r +def deser_block_spent_outputs(f): + nit = deser_compact_size(f) + return [deser_vector(f, CTxOut) for _ in range(nit)] + + def from_hex(obj, hex_string): """Deserialize from a hex string representation (e.g. from RPC) @@ -272,13 +312,13 @@ def deserialize(self, f, *, with_time=True): """Deserialize from addrv1 format (pre-BIP155)""" if with_time: # VERSION messages serialize CAddress objects without time - self.time = struct.unpack("H", f.read(2))[0] + self.port = int.from_bytes(f.read(2), "big") def serialize(self, *, with_time=True): """Serialize in addrv1 format (pre-BIP155)""" @@ -286,20 +326,20 @@ def serialize(self, *, with_time=True): r = b"" if with_time: # VERSION messages serialize CAddress objects without time - r += struct.pack("H", self.port) + r += self.port.to_bytes(2, "big") return r def deserialize_v2(self, f): """Deserialize from addrv2 format (BIP155)""" - self.time = struct.unpack("H", f.read(2))[0] + self.port = int.from_bytes(f.read(2), "big") def serialize_v2(self): """Serialize in addrv2 format (BIP155)""" assert self.net in self.ADDRV2_NET_NAME r = b"" - r += struct.pack("H", self.port) + raise Exception("Address type not supported") + r += self.port.to_bytes(2, "big") return r def __repr__(self): @@ -375,12 +415,12 @@ def __init__(self, t=0, h=0): self.hash = h def deserialize(self, f): - self.type = struct.unpack(" 21000000 * COIN: return False @@ -670,13 +702,13 @@ def get_vsize(self): return math.ceil(self.get_weight() / WITNESS_SCALE_FACTOR) def __repr__(self): - return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \ - % (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime) + return "CTransaction(version=%i vin=%s vout=%s wit=%s nLockTime=%i)" \ + % (self.version, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime) class CBlockHeader: - __slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce", - "nTime", "nVersion", "sha256") + __slots__ = ("hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce", + "nTime", "nVersion") def __init__(self, header=None): if header is None: @@ -688,9 +720,6 @@ def __init__(self, header=None): self.nTime = header.nTime self.nBits = header.nBits self.nNonce = header.nNonce - self.sha256 = header.sha256 - self.hash = header.hash - self.calc_sha256() def set_null(self): self.nVersion = 4 @@ -699,45 +728,37 @@ def set_null(self): self.nTime = 0 self.nBits = 0 self.nNonce = 0 - self.sha256 = None - self.hash = None def deserialize(self, f): - self.nVersion = struct.unpack(" target: + if self.hash_int > target: return False for tx in self.vtx: if not tx.is_valid(): @@ -809,11 +828,9 @@ def is_valid(self): return True def solve(self): - self.rehash() target = uint256_from_compact(self.nBits) - while self.sha256 > target: + while self.hash_int > target: self.nNonce += 1 - self.rehash() # Calculate the block weight using witness and non-witness # serialization size (does NOT use sigops). @@ -874,12 +891,12 @@ def __init__(self): def deserialize(self, f): self.header.deserialize(f) - self.nonce = struct.unpack("> (32 - bits)) - -def chacha20_doubleround(s): - """Apply a ChaCha20 double round to 16-element state array s. - - See https://cr.yp.to/chacha/chacha-20080128.pdf and https://tools.ietf.org/html/rfc8439 - """ - QUARTER_ROUNDS = [(0, 4, 8, 12), - (1, 5, 9, 13), - (2, 6, 10, 14), - (3, 7, 11, 15), - (0, 5, 10, 15), - (1, 6, 11, 12), - (2, 7, 8, 13), - (3, 4, 9, 14)] - - for a, b, c, d in QUARTER_ROUNDS: - s[a] = (s[a] + s[b]) & 0xffffffff - s[d] = rot32(s[d] ^ s[a], 16) - s[c] = (s[c] + s[d]) & 0xffffffff - s[b] = rot32(s[b] ^ s[c], 12) - s[a] = (s[a] + s[b]) & 0xffffffff - s[d] = rot32(s[d] ^ s[a], 8) - s[c] = (s[c] + s[d]) & 0xffffffff - s[b] = rot32(s[b] ^ s[c], 7) - -def chacha20_32_to_384(key32): - """Specialized ChaCha20 implementation with 32-byte key, 0 IV, 384-byte output.""" - # See RFC 8439 section 2.3 for chacha20 parameters - CONSTANTS = [0x61707865, 0x3320646e, 0x79622d32, 0x6b206574] - - key_bytes = [0]*8 - for i in range(8): - key_bytes[i] = int.from_bytes(key32[(4 * i):(4 * (i+1))], 'little') - - INITIALIZATION_VECTOR = [0] * 4 - init = CONSTANTS + key_bytes + INITIALIZATION_VECTOR - out = bytearray() - for counter in range(6): - init[12] = counter - s = init.copy() - for _ in range(10): - chacha20_doubleround(s) - for i in range(16): - out.extend(((s[i] + init[i]) & 0xffffffff).to_bytes(4, 'little')) - return bytes(out) - -def data_to_num3072(data): - """Hash a 32-byte array data to a 3072-bit number using 6 Chacha20 operations.""" - bytes384 = chacha20_32_to_384(data) - return int.from_bytes(bytes384, 'little') - -class MuHash3072: - """Class representing the MuHash3072 computation of a set. - - See https://cseweb.ucsd.edu/~mihir/papers/inchash.pdf and https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2017-May/014337.html - """ - - MODULUS = 2**3072 - 1103717 - - def __init__(self): - """Initialize for an empty set.""" - self.numerator = 1 - self.denominator = 1 - - def insert(self, data): - """Insert a byte array data in the set.""" - data_hash = hashlib.sha256(data).digest() - self.numerator = (self.numerator * data_to_num3072(data_hash)) % self.MODULUS - - def remove(self, data): - """Remove a byte array from the set.""" - data_hash = hashlib.sha256(data).digest() - self.denominator = (self.denominator * data_to_num3072(data_hash)) % self.MODULUS - - def digest(self): - """Extract the final hash. Does not modify this object.""" - val = (self.numerator * pow(self.denominator, -1, self.MODULUS)) % self.MODULUS - bytes384 = val.to_bytes(384, 'little') - return hashlib.sha256(bytes384).digest() - -class TestFrameworkMuhash(unittest.TestCase): - def test_muhash(self): - muhash = MuHash3072() - muhash.insert(b'\x00' * 32) - muhash.insert((b'\x01' + b'\x00' * 31)) - muhash.remove((b'\x02' + b'\x00' * 31)) - finalized = muhash.digest() - # This mirrors the result in the C++ MuHash3072 unit test - self.assertEqual(finalized[::-1].hex(), "10d312b100cbd32ada024a6646e40d3482fcff103668d2625f10002a607d5863") - - def test_chacha20(self): - def chacha_check(key, result): - self.assertEqual(chacha20_32_to_384(key)[:64].hex(), result) - - # Test vectors from https://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04#section-7 - # Since the nonce is hardcoded to 0 in our function we only use those vectors. - chacha_check([0]*32, "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586") - chacha_check([0]*31 + [1], "4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952ed432d41bbe2a0b6ea7566d2a5d1e7e20d42af2c53d792b1c43fea817e9ad275ae546963") diff --git a/resources/scenarios/test_framework/netutil.py b/resources/scenarios/test_framework/netutil.py index 838f40fca..f79a8c361 100644 --- a/resources/scenarios/test_framework/netutil.py +++ b/resources/scenarios/test_framework/netutil.py @@ -4,7 +4,7 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Linux network utilities. -Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal +Roughly based on https://web.archive.org/web/20190424172231/http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal """ import sys @@ -13,6 +13,10 @@ import array import os +# Easily unreachable address. Attempts to connect to it will stay within the machine. +# Used to avoid non-loopback traffic or DNS queries. +UNREACHABLE_PROXY_ARG = '-proxy=127.0.0.1:1' + # STATE_ESTABLISHED = '01' # STATE_SYN_SENT = '02' # STATE_SYN_RECV = '03' @@ -37,9 +41,12 @@ def get_socket_inodes(pid): base = '/proc/%i/fd' % pid inodes = [] for item in os.listdir(base): - target = os.readlink(os.path.join(base, item)) - if target.startswith('socket:'): - inodes.append(int(target[8:-1])) + try: + target = os.readlink(os.path.join(base, item)) + if target.startswith('socket:'): + inodes.append(int(target[8:-1])) + except FileNotFoundError: + pass return inodes def _remove_empty(array): @@ -158,3 +165,19 @@ def test_ipv6_local(): except socket.error: have_ipv6 = False return have_ipv6 + +def test_unix_socket(): + '''Return True if UNIX sockets are available on this platform.''' + try: + socket.AF_UNIX + except AttributeError: + return False + else: + return True + +def format_addr_port(addr, port): + '''Return either "addr:port" or "[addr]:port" based on whether addr looks like an IPv6 address.''' + if ":" in addr: + return f"[{addr}]:{port}" + else: + return f"{addr}:{port}" diff --git a/resources/scenarios/test_framework/p2p.py b/resources/scenarios/test_framework/p2p.py index be4ed624f..c2e773656 100755 --- a/resources/scenarios/test_framework/p2p.py +++ b/resources/scenarios/test_framework/p2p.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik -# Copyright (c) 2010-2022 The Bitcoin Core developers +# Copyright (c) 2010-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test objects for interacting with a bitcoind node over the p2p protocol. @@ -24,6 +24,7 @@ from collections import defaultdict from io import BytesIO import logging +import platform import struct import sys import threading @@ -72,13 +73,20 @@ msg_wtxidrelay, NODE_NETWORK, NODE_WITNESS, + MAGIC_BYTES, sha256, ) from test_framework.util import ( + assert_not_equal, MAX_NODES, p2p_port, wait_until_helper_internal, ) +from test_framework.v2_p2p import ( + EncryptedP2PState, + MSGTYPE_TO_SHORTID, + SHORTID, +) logger = logging.getLogger("TestFramework.p2p") @@ -140,13 +148,6 @@ b"wtxidrelay": msg_wtxidrelay, } -MAGIC_BYTES = { - "mainnet": b"\xf9\xbe\xb4\xd9", # mainnet - "testnet3": b"\x0b\x11\x09\x07", # testnet3 - "regtest": b"\xfa\xbf\xb5\xda", # regtest - "signet": b"\x0a\x03\xcf\x40", # signet -} - class P2PConnection(asyncio.Protocol): """A low-level connection object to a node's P2P interface. @@ -165,11 +166,20 @@ def __init__(self): # The underlying transport of the connection. # Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe self._transport = None + # This lock is acquired before sending messages over the socket. There's an implied lock order and + # p2p_lock must not be acquired after _send_lock as it could result in deadlocks. + self._send_lock = threading.Lock() + self.v2_state = None # EncryptedP2PState object needed for v2 p2p connections + self.reconnect = False # set if reconnection needs to happen @property def is_connected(self): return self._transport is not None + @property + def supports_v2_p2p(self): + return self.v2_state is not None + def peer_connect_helper(self, dstaddr, dstport, net, timeout_factor): assert not self.is_connected self.timeout_factor = timeout_factor @@ -179,17 +189,23 @@ def peer_connect_helper(self, dstaddr, dstport, net, timeout_factor): self.on_connection_send_msg = None self.recvbuf = b"" self.magic_bytes = MAGIC_BYTES[net] + self.p2p_connected_to_node = dstport != 0 - def peer_connect(self, dstaddr, dstport, *, net, timeout_factor): + def peer_connect(self, dstaddr, dstport, *, net, timeout_factor, supports_v2_p2p): self.peer_connect_helper(dstaddr, dstport, net, timeout_factor) + if supports_v2_p2p: + self.v2_state = EncryptedP2PState(initiating=True, net=net) loop = NetworkThread.network_event_loop logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport)) coroutine = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport) return lambda: loop.call_soon_threadsafe(loop.create_task, coroutine) - def peer_accept_connection(self, connect_id, connect_cb=lambda: None, *, net, timeout_factor): + def peer_accept_connection(self, connect_id, connect_cb=lambda: None, *, net, timeout_factor, supports_v2_p2p, reconnect): self.peer_connect_helper('0', 0, net, timeout_factor) + self.reconnect = reconnect + if supports_v2_p2p: + self.v2_state = EncryptedP2PState(initiating=False, net=net) logger.debug('Listening for Bitcoin Node with id: {}'.format(connect_id)) return lambda: NetworkThread.listen(self, connect_cb, idx=connect_id) @@ -203,16 +219,29 @@ def peer_disconnect(self): def connection_made(self, transport): """asyncio callback when a connection is opened.""" assert not self._transport - logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport)) + info = transport.get_extra_info("socket") + us = info.getsockname() + them = info.getpeername() + logger.debug(f"Connected: us={us[0]}:{us[1]}, them={them[0]}:{them[1]}") + self.dstaddr = them[0] + self.dstport = them[1] self._transport = transport - if self.on_connection_send_msg: - self.send_message(self.on_connection_send_msg) - self.on_connection_send_msg = None # Never used again + # in an inbound connection to the TestNode with P2PConnection as the initiator, [TestNode <---- P2PConnection] + # send the initial handshake immediately + if self.supports_v2_p2p and self.v2_state.initiating and not self.v2_state.tried_v2_handshake: + send_handshake_bytes = self.v2_state.initiate_v2_handshake() + logger.debug(f"sending {len(self.v2_state.sent_garbage)} bytes of garbage data") + self.send_raw_message(send_handshake_bytes) + # for v1 outbound connections, send version message immediately after opening + # (for v2 outbound connections, send it after the initial v2 handshake) + if self.p2p_connected_to_node and not self.supports_v2_p2p: + self.send_version() self.on_open() def connection_lost(self, exc): """asyncio callback when a connection is closed.""" - if exc: + # don't display warning if reconnection needs to be attempted using v1 P2P + if exc and not self.reconnect: logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc)) else: logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport)) @@ -220,13 +249,67 @@ def connection_lost(self, exc): self.recvbuf = b"" self.on_close() + # v2 handshake method + def _on_data_v2_handshake(self): + """v2 handshake performed before P2P messages are exchanged (see BIP324). P2PConnection is the initiator + (in inbound connections to TestNode) and the responder (in outbound connections from TestNode). + Performed by: + * initiator using `initiate_v2_handshake()`, `complete_handshake()` and `authenticate_handshake()` + * responder using `respond_v2_handshake()`, `complete_handshake()` and `authenticate_handshake()` + + `initiate_v2_handshake()` is immediately done by the initiator when the connection is established in + `connection_made()`. The rest of the initial v2 handshake functions are handled here. + """ + if not self.v2_state.peer: + if not self.v2_state.initiating and not self.v2_state.sent_garbage: + # if the responder hasn't sent garbage yet, the responder is still reading ellswift bytes + # reads ellswift bytes till the first mismatch from 12 bytes V1_PREFIX + length, send_handshake_bytes = self.v2_state.respond_v2_handshake(BytesIO(self.recvbuf)) + self.recvbuf = self.recvbuf[length:] + if send_handshake_bytes == -1: + self.v2_state = None + return + elif send_handshake_bytes: + logger.debug(f"sending {len(self.v2_state.sent_garbage)} bytes of garbage data") + self.send_raw_message(send_handshake_bytes) + elif send_handshake_bytes == b"": + return # only after send_handshake_bytes are sent can `complete_handshake()` be done + + # `complete_handshake()` reads the remaining ellswift bytes from recvbuf + # and sends response after deriving shared ECDH secret using received ellswift bytes + length, response = self.v2_state.complete_handshake(BytesIO(self.recvbuf)) + self.recvbuf = self.recvbuf[length:] + if response: + self.send_raw_message(response) + else: + return # only after response is sent can `authenticate_handshake()` be done + + # `self.v2_state.peer` is instantiated only after shared ECDH secret/BIP324 derived keys and ciphers + # is derived in `complete_handshake()`. + # so `authenticate_handshake()` which uses the BIP324 derived ciphers gets called after `complete_handshake()`. + assert self.v2_state.peer + length, is_mac_auth = self.v2_state.authenticate_handshake(self.recvbuf) + if not is_mac_auth: + raise ValueError("invalid v2 mac tag in handshake authentication") + self.recvbuf = self.recvbuf[length:] + if self.v2_state.tried_v2_handshake: + # for v2 outbound connections, send version message immediately after v2 handshake + if self.p2p_connected_to_node: + self.send_version() + # process post-v2-handshake data immediately, if available + if len(self.recvbuf) > 0: + self._on_data() + # Socket read methods def data_received(self, t): """asyncio callback when data is read from the socket.""" if len(t) > 0: self.recvbuf += t - self._on_data() + if self.supports_v2_p2p and not self.v2_state.tried_v2_handshake: + self._on_data_v2_handshake() + else: + self._on_data() def _on_data(self): """Try to read P2P messages from the recv buffer. @@ -236,23 +319,48 @@ def _on_data(self): the on_message callback for processing.""" try: while True: - if len(self.recvbuf) < 4: - return - if self.recvbuf[:4] != self.magic_bytes: - raise ValueError("magic bytes mismatch: {} != {}".format(repr(self.magic_bytes), repr(self.recvbuf))) - if len(self.recvbuf) < 4 + 12 + 4 + 4: - return - msgtype = self.recvbuf[4:4+12].split(b"\x00", 1)[0] - msglen = struct.unpack("= MIN_P2P_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_P2P_VERSION_SUPPORTED) + # for inbound connections, reply to version with own version message + # (could be due to v1 reconnect after a failed v2 handshake) + if not self.p2p_connected_to_node: + self.send_version() + self.reconnect = False if message.nVersion >= 70016 and self.wtxidrelay: - self.send_message(msg_wtxidrelay()) + self.send_without_ping(msg_wtxidrelay()) if self.support_addrv2: - self.send_message(msg_sendaddrv2()) - self.send_message(msg_verack()) + self.send_without_ping(msg_sendaddrv2()) + self.send_without_ping(msg_verack()) self.nServices = message.nServices self.relay = message.relay - self.send_message(msg_getaddr()) + if self.p2p_connected_to_node: + self.send_without_ping(msg_getaddr()) # Connection helper methods - def wait_until(self, test_function_in, *, timeout=60, check_connected=True): + def wait_until(self, test_function_in, *, timeout=60, check_connected=True, check_interval=0.05): def test_function(): if check_connected: assert self.is_connected return test_function_in() - wait_until_helper_internal(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor) + wait_until_helper_internal(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor, check_interval=check_interval) - def wait_for_connect(self, timeout=60): + def wait_for_connect(self, *, timeout=60): test_function = lambda: self.is_connected self.wait_until(test_function, timeout=timeout, check_connected=False) - def wait_for_disconnect(self, timeout=60): + def wait_for_disconnect(self, *, timeout=60): test_function = lambda: not self.is_connected self.wait_until(test_function, timeout=timeout, check_connected=False) + def wait_for_reconnect(self, *, timeout=60): + def test_function(): + return self.is_connected and self.last_message.get('version') and not self.supports_v2_p2p + self.wait_until(test_function, timeout=timeout, check_connected=False) + # Message receiving helper methods - def wait_for_tx(self, txid, timeout=60): + def wait_for_tx(self, txid, *, timeout=60): def test_function(): if not self.last_message.get('tx'): return False - return self.last_message['tx'].tx.rehash() == txid + return self.last_message['tx'].tx.txid_hex == txid self.wait_until(test_function, timeout=timeout) - def wait_for_block(self, blockhash, timeout=60): + def wait_for_block(self, blockhash, *, timeout=60): def test_function(): - return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash + return self.last_message.get("block") and self.last_message["block"].block.hash_int == blockhash self.wait_until(test_function, timeout=timeout) - def wait_for_header(self, blockhash, timeout=60): + def wait_for_header(self, blockhash, *, timeout=60): def test_function(): last_headers = self.last_message.get('headers') if not last_headers: return False - return last_headers.headers[0].rehash() == int(blockhash, 16) + return last_headers.headers[0].hash_int == int(blockhash, 16) self.wait_until(test_function, timeout=timeout) - def wait_for_merkleblock(self, blockhash, timeout=60): + def wait_for_merkleblock(self, blockhash, *, timeout=60): def test_function(): last_filtered_block = self.last_message.get('merkleblock') if not last_filtered_block: return False - return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16) + return last_filtered_block.merkleblock.header.hash_int == int(blockhash, 16) self.wait_until(test_function, timeout=timeout) - def wait_for_getdata(self, hash_list, timeout=60): + def wait_for_getdata(self, hash_list, *, timeout=60): """Waits for a getdata message. The object hashes in the inventory vector must match the provided hash_list.""" @@ -522,19 +659,21 @@ def test_function(): self.wait_until(test_function, timeout=timeout) - def wait_for_getheaders(self, timeout=60): - """Waits for a getheaders message. + def wait_for_getheaders(self, block_hash=None, *, timeout=60): + """Waits for a getheaders message containing a specific block hash. - Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"] - value must be explicitly cleared before calling this method, or this will return - immediately with success. TODO: change this method to take a hash value and only - return true if the correct block header has been requested.""" + If no block hash is provided, checks whether any getheaders message has been received by the node.""" def test_function(): - return self.last_message.get("getheaders") + last_getheaders = self.last_message.pop("getheaders", None) + if block_hash is None: + return last_getheaders + if last_getheaders is None: + return False + return block_hash == last_getheaders.locator.vHave[0] self.wait_until(test_function, timeout=timeout) - def wait_for_inv(self, expected_inv, timeout=60): + def wait_for_inv(self, expected_inv, *, timeout=60): """Waits for an INV message and checks that the first inv object in the message was as expected.""" if len(expected_inv) > 1: raise NotImplementedError("wait_for_inv() will only verify the first inv object") @@ -546,7 +685,7 @@ def test_function(): self.wait_until(test_function, timeout=timeout) - def wait_for_verack(self, timeout=60): + def wait_for_verack(self, *, timeout=60): def test_function(): return "verack" in self.last_message @@ -554,17 +693,22 @@ def test_function(): # Message sending helper functions - def send_and_ping(self, message, timeout=60): - self.send_message(message) + def send_version(self): + if self.on_connection_send_msg: + self.send_without_ping(self.on_connection_send_msg) + self.on_connection_send_msg = None # Never used again + + def send_and_ping(self, message, *, timeout=60): + self.send_without_ping(message) self.sync_with_ping(timeout=timeout) - def sync_with_ping(self, timeout=60): + def sync_with_ping(self, *, timeout=60): """Ensure ProcessMessages and SendMessages is called on this connection""" # Sending two pings back-to-back, requires that the node calls # `ProcessMessage` twice, and thus ensures `SendMessages` must have # been called at least once - self.send_message(msg_ping(nonce=0)) - self.send_message(msg_ping(nonce=self.ping_counter)) + self.send_without_ping(msg_ping(nonce=0)) + self.send_without_ping(msg_ping(nonce=self.ping_counter)) def test_function(): return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter @@ -591,7 +735,7 @@ def __init__(self): NetworkThread.listeners = {} NetworkThread.protos = {} - if sys.platform == 'win32': + if platform.system() == 'Windows': asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) NetworkThread.network_event_loop = asyncio.new_event_loop() @@ -599,7 +743,7 @@ def run(self): """Start the network thread.""" self.network_event_loop.run_forever() - def close(self, timeout=10): + def close(self, *, timeout=10): """Close the connections and network event loop.""" self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop) wait_until_helper_internal(lambda: not self.network_event_loop.is_running(), timeout=timeout) @@ -620,6 +764,11 @@ def listen(cls, p2p, callback, port=None, addr=None, idx=1): if addr is None: addr = '127.0.0.1' + def exception_handler(loop, context): + if not p2p.reconnect: + loop.default_exception_handler(context) + + cls.network_event_loop.set_exception_handler(exception_handler) coroutine = cls.create_listen_server(addr, port, callback, p2p) cls.network_event_loop.call_soon_threadsafe(cls.network_event_loop.create_task, coroutine) @@ -633,7 +782,9 @@ def peer_protocol(): protocol function from that dict, and returns it so the event loop can start executing it.""" response = cls.protos.get((addr, port)) - cls.protos[(addr, port)] = None + # remove protocol function from dict only when reconnection doesn't need to happen/already happened + if not proto.reconnect: + cls.protos[(addr, port)] = None return response if (addr, port) not in cls.listeners: @@ -665,13 +816,14 @@ def __init__(self): self.getdata_requests = [] def on_getdata(self, message): - """Check for the tx/block in our stores and if found, reply with an inv message.""" + """Check for the tx/block in our stores and if found, reply with MSG_TX or MSG_BLOCK.""" for inv in message.inv: self.getdata_requests.append(inv.hash) - if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys(): - self.send_message(msg_tx(self.tx_store[inv.hash])) - elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys(): - self.send_message(msg_block(self.block_store[inv.hash])) + invtype = inv.type & MSG_TYPE_MASK + if (invtype == MSG_TX or invtype == MSG_WTX) and inv.hash in self.tx_store.keys(): + self.send_without_ping(msg_tx(self.tx_store[inv.hash])) + elif invtype == MSG_BLOCK and inv.hash in self.block_store.keys(): + self.send_without_ping(msg_block(self.block_store[inv.hash])) else: logger.debug('getdata message type {} received.'.format(hex(inv.type))) @@ -685,14 +837,14 @@ def on_getheaders(self, message): return headers_list = [self.block_store[self.last_block_hash]] - while headers_list[-1].sha256 not in locator.vHave: + while headers_list[-1].hash_int not in locator.vHave: # Walk back through the block store, adding headers to headers_list # as we go. prev_block_hash = headers_list[-1].hashPrevBlock if prev_block_hash in self.block_store: prev_block_header = CBlockHeader(self.block_store[prev_block_hash]) headers_list.append(prev_block_header) - if prev_block_header.sha256 == hash_stop: + if prev_block_header.hash_int == hash_stop: # if this is the hashstop header, stop here break else: @@ -704,9 +856,9 @@ def on_getheaders(self, message): response = msg_headers(headers_list) if response is not None: - self.send_message(response) + self.send_without_ping(response) - def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60): + def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60, is_decoy=False): """Send blocks to test node and test whether the tip advances. - add all blocks to our block_store @@ -720,18 +872,20 @@ def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, with p2p_lock: for block in blocks: - self.block_store[block.sha256] = block - self.last_block_hash = block.sha256 + self.block_store[block.hash_int] = block + self.last_block_hash = block.hash_int reject_reason = [reject_reason] if reject_reason else [] with node.assert_debug_log(expected_msgs=reject_reason): + if is_decoy: # since decoy messages are ignored by the recipient - no need to wait for response + force_send = True if force_send: for b in blocks: - self.send_message(msg_block(block=b)) + self.send_without_ping(msg_block(block=b), is_decoy) else: - self.send_message(msg_headers([CBlockHeader(block) for block in blocks])) + self.send_without_ping(msg_headers([CBlockHeader(block) for block in blocks])) self.wait_until( - lambda: blocks[-1].sha256 in self.getdata_requests, + lambda: blocks[-1].hash_int in self.getdata_requests, timeout=timeout, check_connected=success, ) @@ -742,47 +896,43 @@ def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, self.sync_with_ping(timeout=timeout) if success: - self.wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout) + self.wait_until(lambda: node.getbestblockhash() == blocks[-1].hash_hex, timeout=timeout) else: - assert node.getbestblockhash() != blocks[-1].hash + assert_not_equal(node.getbestblockhash(), blocks[-1].hash_hex) - def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None): + def send_txs_and_test(self, txs, node, *, success=True, reject_reason=None): """Send txs to test node and test whether they're accepted to the mempool. - add all txs to our tx_store - send tx messages for all txs - if success is True/False: assert that the txs are/are not accepted to the mempool - - if expect_disconnect is True: Skip the sync with ping - if reject_reason is set: assert that the correct reject message is logged.""" with p2p_lock: for tx in txs: - self.tx_store[tx.sha256] = tx + self.tx_store[tx.txid_int] = tx reject_reason = [reject_reason] if reject_reason else [] with node.assert_debug_log(expected_msgs=reject_reason): for tx in txs: - self.send_message(msg_tx(tx)) + self.send_without_ping(msg_tx(tx)) - if expect_disconnect: - self.wait_for_disconnect() - else: - self.sync_with_ping() + self.sync_with_ping() raw_mempool = node.getrawmempool() if success: # Check that all txs are now in the mempool for tx in txs: - assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash) + assert tx.txid_hex in raw_mempool, "{} not found in mempool".format(tx.txid_hex) else: # Check that none of the txs are now in the mempool for tx in txs: - assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash) + assert tx.txid_hex not in raw_mempool, "{} tx found in mempool".format(tx.txid_hex) class P2PTxInvStore(P2PInterface): """A P2PInterface which stores a count of how many times each txid has been announced.""" - def __init__(self): - super().__init__() + def __init__(self, **kwargs): + super().__init__(**kwargs) self.tx_invs_received = defaultdict(int) def on_inv(self, message): @@ -797,7 +947,7 @@ def get_invs(self): with p2p_lock: return list(self.tx_invs_received.keys()) - def wait_for_broadcast(self, txns, timeout=60): + def wait_for_broadcast(self, txns, *, timeout=60): """Waits for the txns (list of txids) to complete initial broadcast. The mempool should mark unbroadcast=False for these transactions. """ diff --git a/resources/scenarios/test_framework/psbt.py b/resources/scenarios/test_framework/psbt.py index 1eff4a250..4fe688ec0 100644 --- a/resources/scenarios/test_framework/psbt.py +++ b/resources/scenarios/test_framework/psbt.py @@ -50,6 +50,9 @@ PSBT_IN_TAP_BIP32_DERIVATION = 0x16 PSBT_IN_TAP_INTERNAL_KEY = 0x17 PSBT_IN_TAP_MERKLE_ROOT = 0x18 +PSBT_IN_MUSIG2_PARTICIPANT_PUBKEYS = 0x1a +PSBT_IN_MUSIG2_PUB_NONCE = 0x1b +PSBT_IN_MUSIG2_PARTIAL_SIG = 0x1c PSBT_IN_PROPRIETARY = 0xfc # per-output types @@ -61,6 +64,7 @@ PSBT_OUT_TAP_INTERNAL_KEY = 0x05 PSBT_OUT_TAP_TREE = 0x06 PSBT_OUT_TAP_BIP32_DERIVATION = 0x07 +PSBT_OUT_MUSIG2_PARTICIPANT_PUBKEYS = 0x08 PSBT_OUT_PROPRIETARY = 0xfc @@ -88,6 +92,9 @@ def serialize(self): for k,v in self.map.items(): if isinstance(k, int) and 0 <= k and k <= 255: k = bytes([k]) + if isinstance(v, list): + assert all(type(elem) is bytes for elem in v) + v = b"".join(v) # simply concatenate the byte-strings w/o size prefixes m += ser_compact_size(len(k)) + k m += ser_compact_size(len(v)) + v m += b"\x00" diff --git a/resources/scenarios/test_framework/script.py b/resources/scenarios/test_framework/script.py index 17a954cb2..f3cee7c66 100644 --- a/resources/scenarios/test_framework/script.py +++ b/resources/scenarios/test_framework/script.py @@ -8,9 +8,7 @@ """ from collections import namedtuple -import struct import unittest -from typing import List, Dict from .key import TaggedHash, tweak_add_pubkey, compute_xonly_pubkey @@ -19,14 +17,13 @@ CTxOut, hash256, ser_string, - ser_uint256, sha256, - uint256_from_str, ) -from .ripemd160 import ripemd160 +from .crypto.ripemd160 import ripemd160 MAX_SCRIPT_ELEMENT_SIZE = 520 +MAX_SCRIPT_SIZE = 10000 MAX_PUBKEYS_PER_MULTI_A = 999 LOCKTIME_THRESHOLD = 500000000 ANNEX_TAG = 0x50 @@ -59,9 +56,9 @@ def encode_op_pushdata(d): elif len(d) <= 0xff: return b'\x4c' + bytes([len(d)]) + d # OP_PUSHDATA1 elif len(d) <= 0xffff: - return b'\x4d' + struct.pack(b' OP_PUSHDATA4: @@ -591,7 +588,7 @@ def GetSigOpCount(self, fAccurate): n += 1 elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): if fAccurate and (OP_1 <= lastOpcode <= OP_16): - n += opcode.decode_op_n() + n += lastOpcode.decode_op_n() else: n += 20 lastOpcode = opcode @@ -671,7 +668,7 @@ def LegacySignatureMsg(script, txTo, inIdx, hashtype): txtmp.vin.append(tmp) s = txtmp.serialize_without_witness() - s += struct.pack(b" 16 + max_ms_script = keys_to_multisig_script([fake_pubkey]*20, k=19) + self.assertEqual(len(max_ms_script), 2 + 20*34 + 2 + 1) + self.assertTrue(max_ms_script.startswith(bytes([1, 19]))) # using OP_PUSH1 + self.assertTrue(max_ms_script.endswith(bytes([1, 20, OP_CHECKMULTISIG]))) diff --git a/resources/scenarios/test_framework/socks5.py b/resources/scenarios/test_framework/socks5.py index 0ca06a739..0cd16a3ff 100644 --- a/resources/scenarios/test_framework/socks5.py +++ b/resources/scenarios/test_framework/socks5.py @@ -4,11 +4,16 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Dummy Socks5 server for testing.""" +import select import socket import threading import queue import logging +from .netutil import ( + format_addr_port +) + logger = logging.getLogger("TestFramework.socks5") # Protocol constants @@ -32,6 +37,42 @@ def recvall(s, n): n -= len(d) return rv +def sendall(s, data): + """Send all data to a socket, or fail.""" + sent = 0 + while sent < len(data): + _, wlist, _ = select.select([], [s], []) + if len(wlist) > 0: + n = s.send(data[sent:]) + if n == 0: + raise IOError('send() on socket returned 0') + sent += n + +def forward_sockets(a, b): + """Forward data received on socket a to socket b and vice versa, until EOF is received on one of the sockets.""" + # Mark as non-blocking so that we do not end up in a deadlock-like situation + # where we block and wait on data from `a` while there is data ready to be + # received on `b` and forwarded to `a`. And at the same time the application + # at `a` is not sending anything because it waits for the data from `b` to + # respond. + a.setblocking(False) + b.setblocking(False) + sockets = [a, b] + done = False + while not done: + rlist, _, xlist = select.select(sockets, [], sockets) + if len(xlist) > 0: + raise IOError('Exceptional condition on socket') + for s in rlist: + data = s.recv(4096) + if data is None or len(data) == 0: + done = True + break + if s == a: + sendall(b, data) + else: + sendall(a, data) + # Implementation classes class Socks5Configuration(): """Proxy configuration.""" @@ -41,6 +82,19 @@ def __init__(self): self.unauth = False # Support unauthenticated self.auth = False # Support authentication self.keep_alive = False # Do not automatically close connections + # This function is called whenever a new connection arrives to the proxy + # and it decides where the connection is redirected to. It is passed: + # - the address the client requested to connect to + # - the port the client requested to connect to + # It is supposed to return an object like: + # { + # "actual_to_addr": "127.0.0.1" + # "actual_to_port": 28276 + # } + # or None. + # If it returns an object then the connection is redirected to actual_to_addr:actual_to_port. + # If it returns None, or destinations_factory itself is None then the connection is closed. + self.destinations_factory = None class Socks5Command(): """Information about an incoming socks5 command.""" @@ -60,7 +114,7 @@ def __init__(self, serv, conn): self.conn = conn def handle(self): - """Handle socks5 request according to RFC192.""" + """Handle socks5 request according to RFC1928.""" try: # Verify socks version ver = recvall(self.conn, 1)[0] @@ -117,6 +171,22 @@ def handle(self): cmdin = Socks5Command(cmd, atyp, addr, port, username, password) self.serv.queue.put(cmdin) logger.debug('Proxy: %s', cmdin) + + requested_to_addr = addr.decode("utf-8") + requested_to = format_addr_port(requested_to_addr, port) + + if self.serv.conf.destinations_factory is not None: + dest = self.serv.conf.destinations_factory(requested_to_addr, port) + if dest is not None: + logger.debug(f"Serving connection to {requested_to}, will redirect it to " + f"{dest['actual_to_addr']}:{dest['actual_to_port']} instead") + with socket.create_connection((dest["actual_to_addr"], dest["actual_to_port"])) as conn_to: + forward_sockets(self.conn, conn_to) + else: + logger.debug(f"Can't serve the connection to {requested_to}: the destinations factory returned None") + else: + logger.debug(f"Can't serve the connection to {requested_to}: no destinations factory") + # Fall through to disconnect except Exception as e: logger.exception("socks5 request handling failed.") @@ -124,6 +194,8 @@ def handle(self): finally: if not self.serv.keep_alive: self.conn.close() + else: + logger.debug("Keeping client connection alive") class Socks5Server(): def __init__(self, conf): diff --git a/resources/scenarios/test_framework/test_framework.py b/resources/scenarios/test_framework/test_framework.py index 4e6d245b5..0b9295cef 100755 --- a/resources/scenarios/test_framework/test_framework.py +++ b/resources/scenarios/test_framework/test_framework.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2022 The Bitcoin Core developers +# Copyright (c) 2014-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Base class for RPC testing.""" @@ -7,19 +7,22 @@ import configparser from enum import Enum import argparse +from datetime import datetime, timezone +import json import logging import os import platform import pdb import random import re +import shlex import shutil import subprocess import sys import tempfile import time +import types -from typing import List from .address import create_deterministic_address_bcrt1_p2tr_op_true from .authproxy import JSONRPCException from . import coverage @@ -30,10 +33,12 @@ PortSeed, assert_equal, check_json_precision, + find_vout_for_address, get_datadir_path, initialize_datadir, p2p_port, wait_until_helper_internal, + wallet_importprivkey, ) @@ -56,6 +61,63 @@ def __init__(self, message): self.message = message +class Binaries: + """Helper class to provide information about bitcoin binaries + + Attributes: + paths: Object returned from get_binary_paths() containing information + which binaries and command lines to use from environment variables and + the config file. + bin_dir: An optional string containing a directory path to look for + binaries, which takes precedence over the paths above, if specified. + This is used by tests calling binaries from previous releases. + """ + def __init__(self, paths, bin_dir): + self.paths = paths + self.bin_dir = bin_dir + + def node_argv(self, **kwargs): + "Return argv array that should be used to invoke bitcoind" + return self._argv("node", self.paths.bitcoind, **kwargs) + + def rpc_argv(self): + "Return argv array that should be used to invoke bitcoin-cli" + # Add -nonamed because "bitcoin rpc" enables -named by default, but bitcoin-cli doesn't + return self._argv("rpc", self.paths.bitcoincli) + ["-nonamed"] + + def tx_argv(self): + "Return argv array that should be used to invoke bitcoin-tx" + return self._argv("tx", self.paths.bitcointx) + + def util_argv(self): + "Return argv array that should be used to invoke bitcoin-util" + return self._argv("util", self.paths.bitcoinutil) + + def wallet_argv(self): + "Return argv array that should be used to invoke bitcoin-wallet" + return self._argv("wallet", self.paths.bitcoinwallet) + + def chainstate_argv(self): + "Return argv array that should be used to invoke bitcoin-chainstate" + return self._argv("chainstate", self.paths.bitcoinchainstate) + + def _argv(self, command, bin_path, need_ipc=False): + """Return argv array that should be used to invoke the command. It + either uses the bitcoin wrapper executable (if BITCOIN_CMD is set or + need_ipc is True), or the direct binary path (bitcoind, etc). When + bin_dir is set (by tests calling binaries from previous releases) it + always uses the direct path.""" + if self.bin_dir is not None: + return [os.path.join(self.bin_dir, os.path.basename(bin_path))] + elif self.paths.bitcoin_cmd is not None or need_ipc: + # If the current test needs IPC functionality, use the bitcoin + # wrapper binary and append -m so it calls multiprocess binaries. + bitcoin_cmd = self.paths.bitcoin_cmd or [self.paths.bitcoin_bin] + return bitcoin_cmd + (["-m"] if need_ipc else []) + [command] + else: + return [bin_path] + + class BitcoinTestMetaClass(type): """Metaclass for BitcoinTestFramework. @@ -92,18 +154,20 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): This class also contains various public and private helper methods.""" - def __init__(self) -> None: + def __init__(self, test_file) -> None: """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method""" self.chain: str = 'regtest' self.setup_clean_chain: bool = False - self.nodes: List[TestNode] = [] + self.noban_tx_relay: bool = False + self.nodes: list[TestNode] = [] self.extra_args = None + self.extra_init = None self.network_thread = None self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond self.supports_cli = True self.bind_to_localhost_only = True - self.parse_args() - self.default_wallet_name = "default_wallet" if self.options.descriptors else "" + self.parse_args(test_file) + self.default_wallet_name = "default_wallet" self.wallet_data_filename = "wallet.dat" # Optional list of wallet names that can be set in set_test_params to # create and import keys to. If unset, default is len(nodes) * @@ -112,8 +176,9 @@ def __init__(self) -> None: # are not imported. self.wallet_names = None # By default the wallet is not required. Set to true by skip_if_no_wallet(). - # When False, we ignore wallet_names regardless of what it is. - self._requires_wallet = False + # Can also be set to None to indicate that the wallet will be used if available. + # When False or None, we ignore wallet_names in setup_nodes(). + self.uses_wallet = False # Disable ThreadOpenConnections by default, so that adding entries to # addrman will not result in automatic connections to them. self.disable_autoconnect = True @@ -128,42 +193,39 @@ def main(self): try: self.setup() - self.run_test() - except JSONRPCException: - self.log.exception("JSONRPC error") - self.success = TestStatus.FAILED + if self.options.test_methods: + self.run_test_methods() + else: + self.run_test() + except SkipTest as e: self.log.warning("Test Skipped: %s" % e.message) self.success = TestStatus.SKIPPED - except AssertionError: - self.log.exception("Assertion failed") - self.success = TestStatus.FAILED - except KeyError: - self.log.exception("Key error") - self.success = TestStatus.FAILED except subprocess.CalledProcessError as e: - self.log.exception("Called Process failed with '{}'".format(e.output)) + self.log.exception(f"Called Process failed with stdout='{e.stdout}'; stderr='{e.stderr}';") self.success = TestStatus.FAILED - except Exception: - self.log.exception("Unexpected exception caught during testing") - self.success = TestStatus.FAILED - except KeyboardInterrupt: - self.log.warning("Exiting after keyboard interrupt") + except BaseException: + self.log.exception("Unexpected exception") self.success = TestStatus.FAILED finally: exit_code = self.shutdown() sys.exit(exit_code) - def parse_args(self): + def run_test_methods(self): + for method_name in self.options.test_methods: + self.log.info(f"Attempting to execute method: {method_name}") + method = getattr(self, method_name) + method() + self.log.info(f"Method '{method_name}' executed successfully.") + + def parse_args(self, test_file): previous_releases_path = os.getenv("PREVIOUS_RELEASES_DIR") or os.getcwd() + "/releases" parser = argparse.ArgumentParser(usage="%(prog)s [options]") parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave bitcoinds and test.* datadir on exit or error") - parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true", - help="Don't stop bitcoinds after the test execution") - parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"), + parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(test_file) + "/../cache"), help="Directory for caching pregenerated datadirs (default: %(default)s)") - parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs") + parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs (must not exist)") parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO", help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true", @@ -176,7 +238,7 @@ def parse_args(self): parser.add_argument("--coveragedir", dest="coveragedir", help="Write tested RPC commands into this directory") parser.add_argument("--configfile", dest="configfile", - default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"), + default=os.path.abspath(os.path.dirname(test_file) + "/../config.ini"), help="Location of the test framework config file (default: %(default)s)") parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true", help="Attach a python debugger if test fails") @@ -191,6 +253,10 @@ def parse_args(self): parser.add_argument("--timeout-factor", dest="timeout_factor", type=float, help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts") parser.add_argument("--v2transport", dest="v2transport", default=False, action="store_true", help="use BIP324 v2 connections between all nodes by default") + parser.add_argument("--v1transport", dest="v1transport", default=False, action="store_true", + help="Explicitly use v1 transport (can be used to overwrite global --v2transport option)") + parser.add_argument("--test_methods", dest="test_methods", nargs='*', + help="Run specified test methods sequentially instead of the full test. Use only for methods that do not depend on any context set up in run_test or other methods.") self.add_options(parser) # Running TestShell in a Jupyter notebook causes an additional -f argument @@ -199,50 +265,47 @@ def parse_args(self): parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1") self.options = parser.parse_args() if self.options.timeout_factor == 0: - self.options.timeout_factor = 99999 + self.options.timeout_factor = 999 self.options.timeout_factor = self.options.timeout_factor or (4 if self.options.valgrind else 1) self.options.previous_releases_path = previous_releases_path - config = configparser.ConfigParser() - config.read_file(open(self.options.configfile)) - self.config = config - - if "descriptors" not in self.options: - # Wallet is not required by the test at all and the value of self.options.descriptors won't matter. - # It still needs to exist and be None in order for tests to work however. - # So set it to None to force -disablewallet, because the wallet is not needed. - self.options.descriptors = None - elif self.options.descriptors is None: - # Some wallet is either required or optionally used by the test. - # Prefer SQLite unless it isn't available - if self.is_sqlite_compiled(): - self.options.descriptors = True - elif self.is_bdb_compiled(): - self.options.descriptors = False - else: - # If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter - # It still needs to exist and be None in order for tests to work however. - # So set it to None, which will also set -disablewallet. - self.options.descriptors = None + self.config = configparser.ConfigParser() + self.config.read_file(open(self.options.configfile)) + self.binary_paths = self.get_binary_paths() + if self.options.v1transport: + self.options.v2transport=False PortSeed.n = self.options.port_seed - def set_binary_paths(self): - """Update self.options with the paths of all binaries from environment variables or their default values""" + def get_binary_paths(self): + """Get paths of all binaries from environment variables or their default values""" + paths = types.SimpleNamespace() binaries = { - "bitcoind": ("bitcoind", "BITCOIND"), - "bitcoin-cli": ("bitcoincli", "BITCOINCLI"), - "bitcoin-util": ("bitcoinutil", "BITCOINUTIL"), - "bitcoin-wallet": ("bitcoinwallet", "BITCOINWALLET"), + "bitcoin": "BITCOIN_BIN", + "bitcoind": "BITCOIND", + "bitcoin-cli": "BITCOINCLI", + "bitcoin-util": "BITCOINUTIL", + "bitcoin-tx": "BITCOINTX", + "bitcoin-chainstate": "BITCOINCHAINSTATE", + "bitcoin-wallet": "BITCOINWALLET", } - for binary, [attribute_name, env_variable_name] in binaries.items(): + # Set paths to bitcoin core binaries allowing overrides with environment + # variables. + for binary, env_variable_name in binaries.items(): default_filename = os.path.join( self.config["environment"]["BUILDDIR"], - "src", + "bin", binary + self.config["environment"]["EXEEXT"], ) - setattr(self.options, attribute_name, os.getenv(env_variable_name, default=default_filename)) + setattr(paths, env_variable_name.lower(), os.getenv(env_variable_name, default=default_filename)) + # BITCOIN_CMD environment variable can be specified to invoke bitcoin + # wrapper binary instead of other executables. + paths.bitcoin_cmd = shlex.split(os.getenv("BITCOIN_CMD", "")) or None + return paths + + def get_binaries(self, bin_dir=None): + return Binaries(self.binary_paths, bin_dir) def setup(self): """Call this method to start up the test framework object with options set.""" @@ -251,13 +314,9 @@ def setup(self): self.options.cachedir = os.path.abspath(self.options.cachedir) - config = self.config - - self.set_binary_paths() - os.environ['PATH'] = os.pathsep.join([ - os.path.join(config['environment']['BUILDDIR'], 'src'), - os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'), os.environ['PATH'] + os.path.join(self.config["environment"]["BUILDDIR"], "bin"), + os.environ['PATH'] ]) # Set up temp directory and start logging @@ -307,18 +366,15 @@ def shutdown(self): self.log.debug('Closing down network thread') self.network_thread.close() - if not self.options.noshutdown: + if self.success == TestStatus.FAILED: + self.log.info("Not stopping nodes as test failed. The dangling processes will be cleaned up later.") + else: self.log.info("Stopping nodes") if self.nodes: self.stop_nodes() - else: - for node in self.nodes: - node.cleanup_on_exit = False - self.log.info("Note: bitcoinds were not stopped and may still be running") should_clean_up = ( not self.options.nocleanup and - not self.options.noshutdown and self.success != TestStatus.FAILED and not self.options.perf ) @@ -344,7 +400,7 @@ def shutdown(self): self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir)) self.log.error("") self.log.error("If this failure happened unexpectedly or intermittently, please file a bug and provide a link or upload of the combined log.") - self.log.error(self.config['environment']['PACKAGE_BUGREPORT']) + self.log.error(self.config['environment']['CLIENT_BUGREPORT']) self.log.error("") exit_code = TEST_EXIT_FAILED # Logging.shutdown will not remove stream- and filehandlers, so we must @@ -411,7 +467,7 @@ def setup_nodes(self): """Override this method to customize test node setup""" self.add_nodes(self.num_nodes, self.extra_args) self.start_nodes() - if self._requires_wallet: + if self.uses_wallet: self.import_deterministic_coinbase_privkeys() if not self.setup_clean_chain: for n in self.nodes: @@ -436,8 +492,8 @@ def init_wallet(self, *, node): if wallet_name is not False: n = self.nodes[node] if wallet_name is not None: - n.createwallet(wallet_name=wallet_name, descriptors=self.options.descriptors, load_on_startup=True) - n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase', rescan=True) + n.createwallet(wallet_name=wallet_name, load_on_startup=True) + wallet_importprivkey(n.get_wallet_rpc(wallet_name), n.get_deterministic_priv_key().key, 0, label="coinbase") def run_test(self): """Tests must override this method to define test logic""" @@ -445,29 +501,14 @@ def run_test(self): # Public helper methods. These can be accessed by the subclass test scripts. - def add_wallet_options(self, parser, *, descriptors=True, legacy=True): - kwargs = {} - if descriptors + legacy == 1: - # If only one type can be chosen, set it as default - kwargs["default"] = descriptors - group = parser.add_mutually_exclusive_group( - # If only one type is allowed, require it to be set in test_runner.py - required=os.getenv("REQUIRE_WALLET_TYPE_SET") == "1" and "default" in kwargs) - if descriptors: - group.add_argument("--descriptors", action='store_const', const=True, **kwargs, - help="Run test using a descriptor wallet", dest='descriptors') - if legacy: - group.add_argument("--legacy-wallet", action='store_const', const=False, **kwargs, - help="Run test using legacy wallets", dest='descriptors') - - def add_nodes(self, num_nodes: int, extra_args=None, *, rpchost=None, binary=None, binary_cli=None, versions=None): + def add_nodes(self, num_nodes: int, extra_args=None, *, rpchost=None, versions=None): """Instantiate TestNode objects. Should only be called once after the nodes have been specified in set_test_params().""" - def get_bin_from_version(version, bin_name, bin_default): + def bin_dir_from_version(version): if not version: - return bin_default + return None if version > 219999: # Starting at client version 220000 the first two digits represent # the major version, e.g. v22.0 instead of v0.22.0. @@ -485,7 +526,6 @@ def get_bin_from_version(version, bin_name, bin_default): ), ), 'bin', - bin_name, ) if self.bind_to_localhost_only: @@ -494,30 +534,43 @@ def get_bin_from_version(version, bin_name, bin_default): extra_confs = [[]] * num_nodes if extra_args is None: extra_args = [[]] * num_nodes + # Whitelist peers to speed up tx relay / mempool sync. Don't use it if testing tx relay or timing. + if self.noban_tx_relay: + for i in range(len(extra_args)): + extra_args[i] = extra_args[i] + ["-whitelist=noban,in,out@127.0.0.1"] if versions is None: versions = [None] * num_nodes - if binary is None: - binary = [get_bin_from_version(v, 'bitcoind', self.options.bitcoind) for v in versions] - if binary_cli is None: - binary_cli = [get_bin_from_version(v, 'bitcoin-cli', self.options.bitcoincli) for v in versions] + bin_dirs = [] + for v in versions: + bin_dir = bin_dir_from_version(v) + + # Fail test if any of the needed release binaries is missing + for bin_path in (argv[0] for binaries in (self.get_binaries(bin_dir),) + for argv in (binaries.node_argv(), binaries.rpc_argv())): + + if shutil.which(bin_path) is None: + self.log.error(f"Binary not found: {bin_path}") + if v is None: + raise AssertionError("At least one binary is missing, did you compile?") + raise AssertionError("At least one release binary is missing. " + "Previous releases binaries can be downloaded via `test/get_previous_releases.py`.") + + bin_dirs.append(bin_dir) + + extra_init = [{}] * num_nodes if self.extra_init is None else self.extra_init # type: ignore[var-annotated] + assert_equal(len(extra_init), num_nodes) assert_equal(len(extra_confs), num_nodes) assert_equal(len(extra_args), num_nodes) assert_equal(len(versions), num_nodes) - assert_equal(len(binary), num_nodes) - assert_equal(len(binary_cli), num_nodes) + assert_equal(len(bin_dirs), num_nodes) for i in range(num_nodes): args = list(extra_args[i]) - if self.options.v2transport and ("-v2transport=0" not in args): - args.append("-v2transport=1") - test_node_i = TestNode( - i, - get_datadir_path(self.options.tmpdir, i), + init = dict( chain=self.chain, rpchost=rpchost, timewait=self.rpc_timeout, timeout_factor=self.options.timeout_factor, - bitcoind=binary[i], - bitcoin_cli=binary_cli[i], + binaries=self.get_binaries(bin_dirs[i]), version=versions[i], coverage_dir=self.options.coveragedir, cwd=self.options.tmpdir, @@ -526,8 +579,14 @@ def get_bin_from_version(version, bin_name, bin_default): use_cli=self.options.usecli, start_perf=self.options.perf, use_valgrind=self.options.valgrind, - descriptors=self.options.descriptors, + v2transport=self.options.v2transport, + uses_wallet=self.uses_wallet, ) + init.update(extra_init[i]) + test_node_i = TestNode( + i, + get_datadir_path(self.options.tmpdir, i), + **init) self.nodes.append(test_node_i) if not test_node_i.version_is_at_least(170000): # adjust conf for pre 17 @@ -542,7 +601,7 @@ def start_node(self, i, *args, **kwargs): node.wait_for_rpc_connection() if self.options.coveragedir is not None: - coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) + coverage.write_all_rpc_commands(self.options.coveragedir, node._rpc) def start_nodes(self, extra_args=None, *args, **kwargs): """Start multiple bitcoinds""" @@ -550,19 +609,14 @@ def start_nodes(self, extra_args=None, *args, **kwargs): if extra_args is None: extra_args = [None] * self.num_nodes assert_equal(len(extra_args), self.num_nodes) - try: - for i, node in enumerate(self.nodes): - node.start(extra_args[i], *args, **kwargs) - for node in self.nodes: - node.wait_for_rpc_connection() - except Exception: - # If one node failed to start, stop the others - self.stop_nodes() - raise + for i, node in enumerate(self.nodes): + node.start(extra_args[i], *args, **kwargs) + for node in self.nodes: + node.wait_for_rpc_connection() if self.options.coveragedir is not None: for node in self.nodes: - coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) + coverage.write_all_rpc_commands(self.options.coveragedir, node._rpc) def stop_node(self, i, expected_stderr='', wait=0): """Stop a bitcoind test node""" @@ -578,10 +632,16 @@ def stop_nodes(self, wait=0): # Wait for nodes to stop node.wait_until_stopped() - def restart_node(self, i, extra_args=None): + def restart_node(self, i, extra_args=None, clear_addrman=False, *, expected_stderr=''): """Stop and start a test node""" - self.stop_node(i) - self.start_node(i, extra_args) + self.stop_node(i, expected_stderr=expected_stderr) + if clear_addrman: + peers_dat = self.nodes[i].chain_path / "peers.dat" + os.remove(peers_dat) + with self.nodes[i].assert_debug_log(expected_msgs=[f'Creating peers.dat because the file was not found ("{peers_dat}")']): + self.start_node(i, extra_args) + else: + self.start_node(i, extra_args) def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) @@ -596,36 +656,43 @@ def connect_nodes(self, a, b, *, peer_advertises_v2=None, wait_for_connect: bool """ from_connection = self.nodes[a] to_connection = self.nodes[b] - from_num_peers = 1 + len(from_connection.getpeerinfo()) - to_num_peers = 1 + len(to_connection.getpeerinfo()) ip_port = "127.0.0.1:" + str(p2p_port(b)) if peer_advertises_v2 is None: - peer_advertises_v2 = self.options.v2transport + peer_advertises_v2 = from_connection.use_v2transport - if peer_advertises_v2: - from_connection.addnode(node=ip_port, command="onetry", v2transport=True) + if peer_advertises_v2 != from_connection.use_v2transport: + from_connection.addnode(node=ip_port, command="onetry", v2transport=peer_advertises_v2) else: - # skip the optional third argument (default false) for + # skip the optional third argument if it matches the default, for # compatibility with older clients from_connection.addnode(ip_port, "onetry") if not wait_for_connect: return - # poll until version handshake complete to avoid race conditions - # with transaction relaying - # See comments in net_processing: - # * Must have a version message before anything else - # * Must have a verack message before anything else - self.wait_until(lambda: sum(peer['version'] != 0 for peer in from_connection.getpeerinfo()) == from_num_peers) - self.wait_until(lambda: sum(peer['version'] != 0 for peer in to_connection.getpeerinfo()) == to_num_peers) - self.wait_until(lambda: sum(peer['bytesrecv_per_msg'].pop('verack', 0) >= 21 for peer in from_connection.getpeerinfo()) == from_num_peers) - self.wait_until(lambda: sum(peer['bytesrecv_per_msg'].pop('verack', 0) >= 21 for peer in to_connection.getpeerinfo()) == to_num_peers) - # The message bytes are counted before processing the message, so make - # sure it was fully processed by waiting for a ping. - self.wait_until(lambda: sum(peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 for peer in from_connection.getpeerinfo()) == from_num_peers) - self.wait_until(lambda: sum(peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 for peer in to_connection.getpeerinfo()) == to_num_peers) + # Use subversion as peer id. Test nodes have their node number appended to the user agent string + from_connection_subver = from_connection.getnetworkinfo()['subversion'] + to_connection_subver = to_connection.getnetworkinfo()['subversion'] + + def find_conn(node, peer_subversion, inbound): + return next(filter(lambda peer: peer['subver'] == peer_subversion and peer['inbound'] == inbound, node.getpeerinfo()), None) + + self.wait_until(lambda: find_conn(from_connection, to_connection_subver, inbound=False) is not None) + self.wait_until(lambda: find_conn(to_connection, from_connection_subver, inbound=True) is not None) + + def check_bytesrecv(peer, msg_type, min_bytes_recv): + assert peer is not None, "Error: peer disconnected" + return peer['bytesrecv_per_msg'].pop(msg_type, 0) >= min_bytes_recv + + # Poll until version handshake (fSuccessfullyConnected) is complete to + # avoid race conditions, because some message types are blocked from + # being sent or received before fSuccessfullyConnected. + # + # As the flag fSuccessfullyConnected is not exposed, check it by + # waiting for a pong, which can only happen after the flag was set. + self.wait_until(lambda: check_bytesrecv(find_conn(from_connection, to_connection_subver, inbound=False), 'pong', 29)) + self.wait_until(lambda: check_bytesrecv(find_conn(to_connection, from_connection_subver, inbound=True), 'pong', 29)) def disconnect_nodes(self, a, b): def disconnect_nodes_helper(node_a, node_b): @@ -678,25 +745,41 @@ def no_op(self): pass def generate(self, generator, *args, sync_fun=None, **kwargs): - blocks = generator.generate(*args, invalid_call=False, **kwargs) + blocks = generator.generate(*args, called_by_framework=True, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def generateblock(self, generator, *args, sync_fun=None, **kwargs): - blocks = generator.generateblock(*args, invalid_call=False, **kwargs) + blocks = generator.generateblock(*args, called_by_framework=True, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def generatetoaddress(self, generator, *args, sync_fun=None, **kwargs): - blocks = generator.generatetoaddress(*args, invalid_call=False, **kwargs) + blocks = generator.generatetoaddress(*args, called_by_framework=True, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def generatetodescriptor(self, generator, *args, sync_fun=None, **kwargs): - blocks = generator.generatetodescriptor(*args, invalid_call=False, **kwargs) + blocks = generator.generatetodescriptor(*args, called_by_framework=True, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks + def create_outpoints(self, node, *, outputs): + """Send funds to a given list of `{address: amount}` targets using the bitcoind + wallet and return the corresponding outpoints as a list of dictionaries + `[{"txid": txid, "vout": vout1}, {"txid": txid, "vout": vout2}, ...]`. + The result can be used to specify inputs for RPCs like `createrawtransaction`, + `createpsbt`, `lockunspent` etc.""" + assert all(len(output.keys()) == 1 for output in outputs) + send_res = node.send(outputs) + assert send_res["complete"] + utxos = [] + for output in outputs: + address = list(output.keys())[0] + vout = find_vout_for_address(node, send_res["txid"], address) + utxos.append({"txid": send_res["txid"], "vout": vout}) + return utxos + def sync_blocks(self, nodes=None, wait=1, timeout=60): """ Wait until everybody has the same tip. @@ -746,8 +829,8 @@ def sync_all(self, nodes=None): self.sync_blocks(nodes) self.sync_mempools(nodes) - def wait_until(self, test_function, timeout=60): - return wait_until_helper_internal(test_function, timeout=timeout, timeout_factor=self.options.timeout_factor) + def wait_until(self, test_function, timeout=60, check_interval=0.05): + return wait_until_helper_internal(test_function, timeout=timeout, timeout_factor=self.options.timeout_factor, check_interval=check_interval) # Private helper methods. These should not be accessed by the subclass test scripts. @@ -763,9 +846,16 @@ def _start_logging(self): # User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper() ch.setLevel(ll) + # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted) - formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S') - formatter.converter = time.gmtime + class MicrosecondFormatter(logging.Formatter): + def formatTime(self, record, _=None): + dt = datetime.fromtimestamp(record.created, timezone.utc) + return dt.strftime('%Y-%m-%dT%H:%M:%S.%f') + + formatter = MicrosecondFormatter( + fmt='%(asctime)sZ %(name)s (%(levelname)s): %(message)s', + ) fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger @@ -799,15 +889,14 @@ def _initialize_chain(self): cache_node_dir, chain=self.chain, extra_conf=["bind=127.0.0.1"], - extra_args=['-disablewallet'], + extra_args=[], rpchost=None, timewait=self.rpc_timeout, timeout_factor=self.options.timeout_factor, - bitcoind=self.options.bitcoind, - bitcoin_cli=self.options.bitcoincli, + binaries=self.get_binaries(), coverage_dir=None, cwd=self.options.tmpdir, - descriptors=self.options.descriptors, + uses_wallet=self.uses_wallet, )) self.start_node(CACHE_NODE_ID) cache_node = self.nodes[CACHE_NODE_ID] @@ -875,6 +964,13 @@ def skip_if_no_py_sqlite3(self): except ImportError: raise SkipTest("sqlite3 module not available.") + def skip_if_no_py_capnp(self): + """Attempt to import the capnp package and skip the test if the import fails.""" + try: + import capnp # type: ignore[import] # noqa: F401 + except ImportError: + raise SkipTest("capnp module not available.") + def skip_if_no_python_bcc(self): """Attempt to import the bcc package and skip the tests if the import fails.""" try: @@ -910,39 +1006,40 @@ def skip_if_no_bitcoind_zmq(self): def skip_if_no_wallet(self): """Skip the running test if wallet has not been compiled.""" - self._requires_wallet = True + self.uses_wallet = True if not self.is_wallet_compiled(): raise SkipTest("wallet has not been compiled.") - if self.options.descriptors: - self.skip_if_no_sqlite() - else: - self.skip_if_no_bdb() - - def skip_if_no_sqlite(self): - """Skip the running test if sqlite has not been compiled.""" - if not self.is_sqlite_compiled(): - raise SkipTest("sqlite has not been compiled.") - - def skip_if_no_bdb(self): - """Skip the running test if BDB has not been compiled.""" - if not self.is_bdb_compiled(): - raise SkipTest("BDB has not been compiled.") def skip_if_no_wallet_tool(self): """Skip the running test if bitcoin-wallet has not been compiled.""" if not self.is_wallet_tool_compiled(): raise SkipTest("bitcoin-wallet has not been compiled") + def skip_if_no_bitcoin_tx(self): + """Skip the running test if bitcoin-tx has not been compiled.""" + if not self.is_bitcoin_tx_compiled(): + raise SkipTest("bitcoin-tx has not been compiled") + def skip_if_no_bitcoin_util(self): """Skip the running test if bitcoin-util has not been compiled.""" if not self.is_bitcoin_util_compiled(): raise SkipTest("bitcoin-util has not been compiled") + def skip_if_no_bitcoin_chainstate(self): + """Skip the running test if bitcoin-chainstate has not been compiled.""" + if not self.is_bitcoin_chainstate_compiled(): + raise SkipTest("bitcoin-chainstate has not been compiled") + def skip_if_no_cli(self): """Skip the running test if bitcoin-cli has not been compiled.""" if not self.is_cli_compiled(): raise SkipTest("bitcoin-cli has not been compiled.") + def skip_if_no_ipc(self): + """Skip the running test if ipc is not compiled.""" + if not self.is_ipc_compiled(): + raise SkipTest("ipc has not been compiled.") + def skip_if_no_previous_releases(self): """Skip the running test if previous releases are not available.""" if not self.has_previous_releases(): @@ -961,6 +1058,11 @@ def skip_if_no_external_signer(self): if not self.is_external_signer_compiled(): raise SkipTest("external signer support has not been compiled.") + def skip_if_running_under_valgrind(self): + """Skip the running test if Valgrind is being used.""" + if self.options.valgrind: + raise SkipTest("This test is not compatible with Valgrind.") + def is_cli_compiled(self): """Checks whether bitcoin-cli was compiled.""" return self.config["components"].getboolean("ENABLE_CLI") @@ -973,22 +1075,22 @@ def is_wallet_compiled(self): """Checks whether the wallet module was compiled.""" return self.config["components"].getboolean("ENABLE_WALLET") - def is_specified_wallet_compiled(self): - """Checks whether wallet support for the specified type - (legacy or descriptor wallet) was compiled.""" - if self.options.descriptors: - return self.is_sqlite_compiled() - else: - return self.is_bdb_compiled() - def is_wallet_tool_compiled(self): """Checks whether bitcoin-wallet was compiled.""" return self.config["components"].getboolean("ENABLE_WALLET_TOOL") + def is_bitcoin_tx_compiled(self): + """Checks whether bitcoin-tx was compiled.""" + return self.config["components"].getboolean("BUILD_BITCOIN_TX") + def is_bitcoin_util_compiled(self): """Checks whether bitcoin-util was compiled.""" return self.config["components"].getboolean("ENABLE_BITCOIN_UTIL") + def is_bitcoin_chainstate_compiled(self): + """Checks whether bitcoin-chainstate was compiled.""" + return self.config["components"].getboolean("ENABLE_BITCOIN_CHAINSTATE") + def is_zmq_compiled(self): """Checks whether the zmq module was compiled.""" return self.config["components"].getboolean("ENABLE_ZMQ") @@ -997,14 +1099,14 @@ def is_usdt_compiled(self): """Checks whether the USDT tracepoints were compiled.""" return self.config["components"].getboolean("ENABLE_USDT_TRACEPOINTS") - def is_sqlite_compiled(self): - """Checks whether the wallet module was compiled with Sqlite support.""" - return self.config["components"].getboolean("USE_SQLITE") - - def is_bdb_compiled(self): - """Checks whether the wallet module was compiled with BDB support.""" - return self.config["components"].getboolean("USE_BDB") + def is_ipc_compiled(self): + """Checks whether ipc was compiled.""" + return self.config["components"].getboolean("ENABLE_IPC") def has_blockfile(self, node, filenum: str): - blocksdir = node.datadir_path / self.chain / 'blocks' - return (blocksdir / f"blk{filenum}.dat").is_file() + return (node.blocks_path/ f"blk{filenum}.dat").is_file() + + def convert_to_json_for_cli(self, text): + if self.options.usecli: + return json.dumps(text) + return text diff --git a/resources/scenarios/test_framework/test_node.py b/resources/scenarios/test_framework/test_node.py index efbb9001d..1ec2fe8a6 100755 --- a/resources/scenarios/test_framework/test_node.py +++ b/resources/scenarios/test_framework/test_node.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2017-2022 The Bitcoin Core developers +# Copyright (c) 2017-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for bitcoind node under test""" @@ -8,10 +8,11 @@ import decimal import errno from enum import Enum -import http.client import json import logging import os +import pathlib +import platform import re import subprocess import tempfile @@ -19,6 +20,7 @@ import urllib.parse import collections import shlex +import shutil import sys from pathlib import Path @@ -26,11 +28,12 @@ JSONRPCException, serialization_fallback, ) -from .descriptors import descsum_create -from .p2p import P2P_SUBVERSION +from .messages import NODE_P2P_V2 +from .p2p import P2P_SERVICES, P2P_SUBVERSION from .util import ( MAX_NODES, assert_equal, + assert_not_equal, append_config, delete_cookie_file, get_auth_cookie, @@ -38,9 +41,30 @@ rpc_url, wait_until_helper_internal, p2p_port, + tor_port, ) BITCOIND_PROC_WAIT_TIMEOUT = 60 +# The size of the blocks xor key +# from InitBlocksdirXorKey::xor_key.size() +NUM_XOR_BYTES = 8 +# Many systems have a 128kB limit for a command size. Depending on the +# platform, this limit may be larger or smaller. Moreover, when using the +# 'bitcoin' command, it may internally insert more args, which must be +# accounted for. There is no need to pick the largest possible value here +# anyway and it should be fine to set it to 1kB in tests. +TEST_CLI_MAX_ARG_SIZE = 1024 + +# The null blocks key (all 0s) +NULL_BLK_XOR_KEY = bytes([0] * NUM_XOR_BYTES) +BITCOIN_PID_FILENAME_DEFAULT = "bitcoind.pid" + +if sys.platform.startswith("linux"): + UNIX_PATH_MAX = 108 # includes the trailing NUL +elif sys.platform.startswith(("darwin", "freebsd", "netbsd", "openbsd")): + UNIX_PATH_MAX = 104 +else: # safest portable value + UNIX_PATH_MAX = 92 class FailedToStartError(Exception): @@ -67,7 +91,7 @@ class TestNode(): To make things easier for the test writer, any unrecognised messages will be dispatched to the RPC connection.""" - def __init__(self, i, datadir_path, *, chain, rpchost, timewait, timeout_factor, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, descriptors=False): + def __init__(self, i, datadir_path, *, chain, rpchost, timewait, timeout_factor, binaries, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, v2transport=False, uses_wallet=False, ipcbind=False): """ Kwargs: start_perf (bool): If True, begin profiling the node with `perf` as soon as @@ -83,12 +107,14 @@ def __init__(self, i, datadir_path, *, chain, rpchost, timewait, timeout_factor, self.chain = chain self.rpchost = rpchost self.rpc_timeout = timewait - self.binary = bitcoind + self.binaries = binaries self.coverage_dir = coverage_dir self.cwd = cwd - self.descriptors = descriptors + self.has_explicit_bind = False if extra_conf is not None: append_config(self.datadir_path, extra_conf) + # Remember if there is bind=... in the config file. + self.has_explicit_bind = any(e.startswith("bind=") for e in extra_conf) # Most callers will just need to add extra args to the standard list below. # For those callers that need more flexibility, they can just set the args property directly. # Note that common args are set in the config file (see initialize_datadir) @@ -97,19 +123,29 @@ def __init__(self, i, datadir_path, *, chain, rpchost, timewait, timeout_factor, # Configuration for logging is set as command-line args rather than in the bitcoin.conf file. # This means that starting a bitcoind using the temp dir to debug a failed test won't # spam debug.log. - self.args = [ - self.binary, + self.args = self.binaries.node_argv(need_ipc=ipcbind) + [ f"-datadir={self.datadir_path}", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-debugexclude=rand", - "-uacomment=testnode%d" % i, + "-uacomment=testnode%d" % i, # required for subversion uniqueness across peers ] - if self.descriptors is None: + if uses_wallet is not None and not uses_wallet: self.args.append("-disablewallet") + self.ipc_tmp_dir = None + if ipcbind: + self.ipc_socket_path = self.chain_path / "node.sock" + if len(os.fsencode(self.ipc_socket_path)) < UNIX_PATH_MAX: + self.args.append("-ipcbind=unix") + else: + # Work around default CI path exceeding maximum socket path length. + self.ipc_tmp_dir = pathlib.Path(tempfile.mkdtemp(prefix="test-ipc-")) + self.ipc_socket_path = self.ipc_tmp_dir / "node.sock" + self.args.append(f"-ipcbind=unix:{self.ipc_socket_path}") + # Use valgrind, expect for previous release binaries if use_valgrind and version is None: default_suppressions_file = Path(__file__).parents[3] / "contrib" / "valgrind.supp" @@ -125,19 +161,31 @@ def __init__(self, i, datadir_path, *, chain, rpchost, timewait, timeout_factor, self.args.append("-logsourcelocations") if self.version_is_at_least(239000): self.args.append("-loglevel=trace") + if self.version_is_at_least(299900): + self.args.append("-nologratelimit") + + # Default behavior from global -v2transport flag is added to args to persist it over restarts. + # May be overwritten in individual tests, using extra_args. + self.default_to_v2 = v2transport + if self.version_is_at_least(260000): + # 26.0 and later support v2transport + if v2transport: + self.args.append("-v2transport=1") + else: + self.args.append("-v2transport=0") + # if v2transport is requested via global flag but not supported for node version, ignore it - self.cli = TestNodeCLI(bitcoin_cli, self.datadir_path) + self.cli = TestNodeCLI(binaries, self.datadir_path) self.use_cli = use_cli self.start_perf = start_perf self.running = False self.process = None self.rpc_connected = False - self.rpc = None - self.miniwallet = None + self._rpc = None # Should usually not be accessed directly in tests to allow for --usecli mode + self.reuse_http_connections = True # Must be set before calling get_rpc_proxy() i.e. before restarting node self.url = None self.log = logging.getLogger('TestFramework.node%d' % i) - self.cleanup_on_exit = True # Whether to kill the node when this object goes away # Cache perf subprocesses here by their data output filename. self.perf_subprocesses = {} @@ -179,26 +227,41 @@ def _raise_assertion_error(self, msg: str): def __del__(self): # Ensure that we don't leave any bitcoind processes lying around after # the test ends - if self.process and self.cleanup_on_exit: + if self.process: # Should only happen on test failure # Avoid using logger, as that may have already been shutdown when # this destructor is called. - print(self._node_msg("Cleaning up leftover process")) + print(self._node_msg("Cleaning up leftover process"), file=sys.stderr) self.process.kill() + if self.ipc_tmp_dir: + print(self._node_msg(f"Cleaning up ipc directory {str(self.ipc_tmp_dir)!r}")) + shutil.rmtree(self.ipc_tmp_dir) def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" if self.use_cli: - return getattr(RPCOverloadWrapper(self.cli, True, self.descriptors), name) + return getattr(self.cli, name) else: - assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection") - return getattr(RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name) + assert self.rpc_connected and self._rpc is not None, self._node_msg("Error: no RPC connection") + return getattr(self._rpc, name) def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, env=None, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args + # If listening and no -bind is given, then bitcoind would bind P2P ports on + # 0.0.0.0:P and 127.0.0.1:P+1 (for incoming Tor connections), where P is + # a unique port chosen by the test framework and configured as port=P in + # bitcoin.conf. To avoid collisions, change it to 127.0.0.1:tor_port(). + will_listen = all(e != "-nolisten" and e != "-listen=0" for e in extra_args) + has_explicit_bind = self.has_explicit_bind or any(e.startswith("-bind=") for e in extra_args) + if will_listen and not has_explicit_bind: + extra_args.append(f"-bind=0.0.0.0:{p2p_port(self.index)}") + extra_args.append(f"-bind=127.0.0.1:{tor_port(self.index)}=onion") + + self.use_v2transport = "-v2transport=1" in extra_args or (self.default_to_v2 and "-v2transport=0" not in extra_args) + # Add a new stdout and stderr file each time bitcoind is started if stderr is None: stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) @@ -228,10 +291,17 @@ def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, env=None if self.start_perf: self._start_perf() - def wait_for_rpc_connection(self): + def wait_for_rpc_connection(self, *, wait_for_import=True): """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 + + suppressed_errors = collections.defaultdict(int) + latest_error = None + def suppress_error(category: str, e: Exception): + suppressed_errors[category] += 1 + return (category, repr(e)) + for _ in range(poll_per_s * self.rpc_timeout): if self.process.poll() is not None: # Attach abrupt shutdown error/s to the exception message @@ -248,12 +318,13 @@ def wait_for_rpc_connection(self): timeout=self.rpc_timeout // 2, # Shorter timeout to allow for one retry in case of ETIMEDOUT coveragedir=self.coverage_dir, ) + rpc.auth_service_proxy_instance.reuse_http_connections = self.reuse_http_connections rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC connection is up - if self.version_is_at_least(190000): + if self.version_is_at_least(190000) and wait_for_import: # getmempoolinfo.loaded is available since commit # bb8ae2c (version 0.19.0) - wait_until_helper_internal(lambda: rpc.getmempoolinfo()['loaded'], timeout_factor=self.timeout_factor) + self.wait_until(lambda: rpc.getmempoolinfo()['loaded']) # Wait for the node to finish reindex, block import, and # loading the mempool. Usually importing happens fast or # even "immediate" when the node is started. However, there @@ -272,33 +343,43 @@ def wait_for_rpc_connection(self): # overhead is trivial, and the added guarantees are worth # the minimal performance cost. self.log.debug("RPC successfully started") + # Set rpc_connected even if we are in use_cli mode so that we know we can call self.stop() if needed. + self.rpc_connected = True if self.use_cli: return - self.rpc = rpc - self.rpc_connected = True - self.url = self.rpc.rpc_url + self._rpc = rpc + self.url = self._rpc.rpc_url return - except JSONRPCException as e: # Initialization phase + except JSONRPCException as e: + # Suppress these as they are expected during initialization. # -28 RPC in warmup - # -342 Service unavailable, RPC server started but is shutting down due to error - if e.error['code'] != -28 and e.error['code'] != -342: + # -342 Service unavailable, could be starting up or shutting down + if e.error['code'] not in [-28, -342]: raise # unknown JSON RPC exception - except ConnectionResetError: - # This might happen when the RPC server is in warmup, but shut down before the call to getblockcount - # succeeds. Try again to properly raise the FailedToStartError - pass + latest_error = suppress_error(f"JSONRPCException {e.error['code']}", e) except OSError as e: - if e.errno == errno.ETIMEDOUT: - pass # Treat identical to ConnectionResetError - elif e.errno == errno.ECONNREFUSED: - pass # Port not yet open? - else: + error_num = e.errno + # Work around issue where socket timeouts don't have errno set. + # https://github.com/python/cpython/issues/109601 + if error_num is None and isinstance(e, TimeoutError): + error_num = errno.ETIMEDOUT + + # Suppress similarly to the above JSONRPCException errors. + if error_num not in [ + errno.ECONNRESET, # This might happen when the RPC server is in warmup, + # but shut down before the call to getblockcount succeeds. + errno.ETIMEDOUT, # Treat identical to ECONNRESET + errno.ECONNREFUSED # Port not yet open? + ]: raise # unknown OS error - except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting + latest_error = suppress_error(f"OSError {errno.errorcode[error_num]}", e) + except ValueError as e: + # Suppress if cookie file isn't generated yet and no rpcuser or rpcpassword; bitcoind may be starting. if "No RPC credentials" not in str(e): raise + latest_error = suppress_error("missing_credentials", e) time.sleep(1.0 / poll_per_s) - self._raise_assertion_error("Unable to connect to bitcoind after {}s".format(self.rpc_timeout)) + self._raise_assertion_error(f"Unable to connect to bitcoind after {self.rpc_timeout}s (ignored errors: {dict(suppressed_errors)!s}{'' if latest_error is None else f', latest: {latest_error[0]!r}/{latest_error[1]}'})") def wait_for_cookie_credentials(self): """Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up.""" @@ -319,16 +400,16 @@ def generate(self, nblocks, maxtries=1000000, **kwargs): self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`") return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries, **kwargs) - def generateblock(self, *args, invalid_call, **kwargs): - assert not invalid_call + def generateblock(self, *args, called_by_framework, **kwargs): + assert called_by_framework, "Direct call of this mining RPC is discouraged. Please use one of the self.generate* methods on the test framework, which sync the nodes to avoid intermittent test issues. You may use sync_fun=self.no_op to disable the sync explicitly." return self.__getattr__('generateblock')(*args, **kwargs) - def generatetoaddress(self, *args, invalid_call, **kwargs): - assert not invalid_call + def generatetoaddress(self, *args, called_by_framework, **kwargs): + assert called_by_framework, "Direct call of this mining RPC is discouraged. Please use one of the self.generate* methods on the test framework, which sync the nodes to avoid intermittent test issues. You may use sync_fun=self.no_op to disable the sync explicitly." return self.__getattr__('generatetoaddress')(*args, **kwargs) - def generatetodescriptor(self, *args, invalid_call, **kwargs): - assert not invalid_call + def generatetodescriptor(self, *args, called_by_framework, **kwargs): + assert called_by_framework, "Direct call of this mining RPC is discouraged. Please use one of the self.generate* methods on the test framework, which sync the nodes to avoid intermittent test issues. You may use sync_fun=self.no_op to disable the sync explicitly." return self.__getattr__('generatetodescriptor')(*args, **kwargs) def setmocktime(self, timestamp): @@ -342,11 +423,11 @@ def setmocktime(self, timestamp): def get_wallet_rpc(self, wallet_name): if self.use_cli: - return RPCOverloadWrapper(self.cli("-rpcwallet={}".format(wallet_name)), True, self.descriptors) + return self.cli("-rpcwallet={}".format(wallet_name)) else: - assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected") + assert self.rpc_connected and self._rpc, self._node_msg("RPC not connected") wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name)) - return RPCOverloadWrapper(self.rpc / wallet_path, descriptors=self.descriptors) + return self._rpc / wallet_path def version_is_at_least(self, ver): return self.version is None or self.version >= ver @@ -355,15 +436,15 @@ def stop_node(self, expected_stderr='', *, wait=0, wait_until_stopped=True): """Stop the node.""" if not self.running: return + assert self.rpc_connected, self._node_msg( + "Should only call stop_node() on a running node after wait_for_rpc_connection() succeeded. " + f"Did you forget to call the latter after start()? Not connected to process: {self.process.pid}") self.log.debug("Stopping node") - try: - # Do not use wait argument when testing older nodes, e.g. in wallet_backwards_compatibility.py - if self.version_is_at_least(180000): - self.stop(wait=wait) - else: - self.stop() - except http.client.CannotSendRequest: - self.log.exception("Unable to stop node.") + # Do not use wait argument when testing older nodes, e.g. in wallet_backwards_compatibility.py + if self.version_is_at_least(180000): + self.stop(wait=wait) + else: + self.stop() # If there are any running perf processes, stop them. for profile_name in tuple(self.perf_subprocesses.keys()): @@ -401,13 +482,19 @@ def is_node_stopped(self, *, expected_stderr="", expected_ret_code=0): self.running = False self.process = None self.rpc_connected = False - self.rpc = None + self._rpc = None self.log.debug("Node stopped") return True def wait_until_stopped(self, *, timeout=BITCOIND_PROC_WAIT_TIMEOUT, expect_error=False, **kwargs): - expected_ret_code = 1 if expect_error else 0 # Whether node shutdown return EXIT_FAILURE or EXIT_SUCCESS - wait_until_helper_internal(lambda: self.is_node_stopped(expected_ret_code=expected_ret_code, **kwargs), timeout=timeout, timeout_factor=self.timeout_factor) + if "expected_ret_code" not in kwargs: + kwargs["expected_ret_code"] = 1 if expect_error else 0 # Whether node shutdown return EXIT_FAILURE or EXIT_SUCCESS + self.wait_until(lambda: self.is_node_stopped(**kwargs), timeout=timeout) + + def kill_process(self): + self.process.kill() + self.wait_until_stopped(expected_ret_code=1 if platform.system() == "Windows" else -9) + assert self.is_node_stopped() def replace_in_config(self, replacements): """ @@ -436,6 +523,14 @@ def debug_log_path(self) -> Path: def blocks_path(self) -> Path: return self.chain_path / "blocks" + @property + def blocks_key_path(self) -> Path: + return self.blocks_path / "xor.dat" + + def read_xor_key(self) -> bytes: + with open(self.blocks_key_path, "rb") as xor_f: + return xor_f.read(NUM_XOR_BYTES) + @property def wallets_path(self) -> Path: return self.chain_path / "wallets" @@ -477,7 +572,7 @@ def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2): self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log)) @contextlib.contextmanager - def wait_for_debug_log(self, expected_msgs, timeout=60): + def busy_wait_for_debug_log(self, expected_msgs, timeout=60): """ Block until we see a particular debug log message fragment or until we exceed the timeout. Return: @@ -512,6 +607,23 @@ def wait_for_debug_log(self, expected_msgs, timeout=60): 'Expected messages "{}" does not partially match log:\n\n{}\n\n'.format( str(expected_msgs), print_log)) + @contextlib.contextmanager + def wait_for_new_peer(self, timeout=5): + """ + Wait until the node is connected to at least one new peer. We detect this + by watching for an increased highest peer id, using the `getpeerinfo` RPC call. + Note that the simpler approach of only accounting for the number of peers + suffers from race conditions, as disconnects from unrelated previous peers + could happen anytime in-between. + """ + def get_highest_peer_id(): + peer_info = self.getpeerinfo() + return peer_info[-1]["id"] if peer_info else -1 + + initial_peer_id = get_highest_peer_id() + yield + self.wait_until(lambda: get_highest_peer_id() > initial_peer_id, timeout=timeout) + @contextlib.contextmanager def profile_with_perf(self, profile_name: str): """ @@ -542,7 +654,7 @@ def test_success(cmd): cmd, shell=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0 - if not sys.platform.startswith('linux'): + if platform.system() != 'Linux': self.log.warning("Can't profile with perf; only available on Linux platforms") return None @@ -605,7 +717,7 @@ def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, mat self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs) ret = self.process.wait(timeout=self.rpc_timeout) self.log.debug(self._node_msg(f'bitcoind exited with status {ret} during initialization')) - assert ret != 0 # Exit code must indicate failure + assert_not_equal(ret, 0) # Exit code must indicate failure self.running = False self.process = None # Check stderr for expected message @@ -635,19 +747,39 @@ def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, mat assert_msg += "with expected error " + expected_msg self._raise_assertion_error(assert_msg) - def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs): + def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, send_version=True, supports_v2_p2p=None, wait_for_v2_handshake=True, expect_success=True, **kwargs): """Add an inbound p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also - returns the connection to the caller.""" + returns the connection to the caller. + + When self.use_v2transport is True, TestNode advertises NODE_P2P_V2 service flag + + An inbound connection is made from TestNode <------ P2PConnection + - if TestNode doesn't advertise NODE_P2P_V2 service, P2PConnection sends version message and v1 P2P is followed + - if TestNode advertises NODE_P2P_V2 service, (and if P2PConnections supports v2 P2P) + P2PConnection sends ellswift bytes and v2 P2P is followed + """ if 'dstport' not in kwargs: kwargs['dstport'] = p2p_port(self.index) if 'dstaddr' not in kwargs: kwargs['dstaddr'] = '127.0.0.1' + if supports_v2_p2p is None: + supports_v2_p2p = self.use_v2transport + + if self.use_v2transport: + kwargs['services'] = kwargs.get('services', P2P_SERVICES) | NODE_P2P_V2 + supports_v2_p2p = self.use_v2transport and supports_v2_p2p + p2p_conn.peer_connect(**kwargs, send_version=send_version, net=self.chain, timeout_factor=self.timeout_factor, supports_v2_p2p=supports_v2_p2p)() - p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)() self.p2ps.append(p2p_conn) + if not expect_success: + return p2p_conn p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False) + if supports_v2_p2p and wait_for_v2_handshake: + p2p_conn.wait_until(lambda: p2p_conn.v2_state.tried_v2_handshake) + if send_version: + p2p_conn.wait_until(lambda: not p2p_conn.on_connection_send_msg) if wait_for_verack: # Wait for the node to send us the version and verack p2p_conn.wait_for_verack() @@ -674,7 +806,7 @@ def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs): return p2p_conn - def add_outbound_p2p_connection(self, p2p_conn, *, wait_for_verack=True, p2p_idx, connection_type="outbound-full-relay", **kwargs): + def add_outbound_p2p_connection(self, p2p_conn, *, wait_for_verack=True, wait_for_disconnect=False, p2p_idx, connection_type="outbound-full-relay", supports_v2_p2p=None, advertise_v2_p2p=None, **kwargs): """Add an outbound p2p connection from node. Must be an "outbound-full-relay", "block-relay-only", "addr-fetch" or "feeler" connection. @@ -684,15 +816,43 @@ def add_outbound_p2p_connection(self, p2p_conn, *, wait_for_verack=True, p2p_idx p2p_idx must be different for simultaneously connected peers. When reusing it for the next peer after disconnecting the previous one, it is necessary to wait for the disconnect to finish to avoid a race condition. + + Parameters: + supports_v2_p2p: whether p2p_conn supports v2 P2P or not + advertise_v2_p2p: whether p2p_conn is advertised to support v2 P2P or not + + An outbound connection is made from TestNode -------> P2PConnection + - if P2PConnection doesn't advertise_v2_p2p, TestNode sends version message and v1 P2P is followed + - if P2PConnection both supports_v2_p2p and advertise_v2_p2p, TestNode sends ellswift bytes and v2 P2P is followed + - if P2PConnection doesn't supports_v2_p2p but advertise_v2_p2p, + TestNode sends ellswift bytes and P2PConnection disconnects, + TestNode reconnects by sending version message and v1 P2P is followed """ def addconnection_callback(address, port): self.log.debug("Connecting to %s:%d %s" % (address, port, connection_type)) - self.addconnection('%s:%d' % (address, port), connection_type) + self.addconnection('%s:%d' % (address, port), connection_type, advertise_v2_p2p) + + if supports_v2_p2p is None: + supports_v2_p2p = self.use_v2transport + if advertise_v2_p2p is None: + advertise_v2_p2p = self.use_v2transport - p2p_conn.peer_accept_connection(connect_cb=addconnection_callback, connect_id=p2p_idx + 1, net=self.chain, timeout_factor=self.timeout_factor, **kwargs)() + if advertise_v2_p2p: + kwargs['services'] = kwargs.get('services', P2P_SERVICES) | NODE_P2P_V2 + assert self.use_v2transport # only a v2 TestNode could make a v2 outbound connection - if connection_type == "feeler": + # if P2PConnection is advertised to support v2 P2P when it doesn't actually support v2 P2P, + # reconnection needs to be attempted using v1 P2P by sending version message + reconnect = advertise_v2_p2p and not supports_v2_p2p + # P2PConnection needs to be advertised to support v2 P2P so that ellswift bytes are sent instead of msg_version + supports_v2_p2p = supports_v2_p2p and advertise_v2_p2p + p2p_conn.peer_accept_connection(connect_cb=addconnection_callback, connect_id=p2p_idx + 1, net=self.chain, timeout_factor=self.timeout_factor, supports_v2_p2p=supports_v2_p2p, reconnect=reconnect, **kwargs)() + + if reconnect: + p2p_conn.wait_for_reconnect() + + if connection_type == "feeler" or wait_for_disconnect: # feeler connections are closed as soon as the node receives a `version` message p2p_conn.wait_until(lambda: p2p_conn.message_count["version"] == 1, check_connected=False) p2p_conn.wait_until(lambda: not p2p_conn.is_connected, check_connected=False) @@ -700,6 +860,9 @@ def addconnection_callback(address, port): p2p_conn.wait_for_connect() self.p2ps.append(p2p_conn) + if supports_v2_p2p: + p2p_conn.wait_until(lambda: p2p_conn.v2_state.tried_v2_handshake) + p2p_conn.wait_until(lambda: not p2p_conn.on_connection_send_msg) if wait_for_verack: p2p_conn.wait_for_verack() p2p_conn.sync_with_ping() @@ -712,12 +875,13 @@ def num_test_p2p_connections(self): def disconnect_p2ps(self): """Close all p2p connections to the node. - Use only after each p2p has sent a version message to ensure the wait works.""" + The state of the peers (such as txrequests) may not be fully cleared + yet, even after this method returns.""" for p in self.p2ps: p.peer_disconnect() del self.p2ps[:] - wait_until_helper_internal(lambda: self.num_test_p2p_connections() == 0, timeout_factor=self.timeout_factor) + self.wait_until(lambda: self.num_test_p2p_connections() == 0) def bumpmocktime(self, seconds): """Fast forward using setmocktime to self.mocktime + seconds. Requires setmocktime to have @@ -726,6 +890,9 @@ def bumpmocktime(self, seconds): self.mocktime += seconds self.setmocktime(self.mocktime) + def wait_until(self, test_function, timeout=60, check_interval=0.05): + return wait_until_helper_internal(test_function, timeout=timeout, timeout_factor=self.timeout_factor, check_interval=check_interval) + class TestNodeCLIAttr: def __init__(self, cli, command): @@ -744,7 +911,7 @@ def arg_to_cli(arg): return str(arg).lower() elif arg is None: return 'null' - elif isinstance(arg, dict) or isinstance(arg, list): + elif isinstance(arg, dict) or isinstance(arg, list) or isinstance(arg, tuple): return json.dumps(arg, default=serialization_fallback) else: return str(arg) @@ -752,16 +919,16 @@ def arg_to_cli(arg): class TestNodeCLI(): """Interface to bitcoin-cli for an individual node""" - def __init__(self, binary, datadir): + def __init__(self, binaries, datadir): self.options = [] - self.binary = binary + self.binaries = binaries self.datadir = datadir self.input = None self.log = logging.getLogger('TestFramework.bitcoincli') def __call__(self, *options, input=None): # TestNodeCLI is callable with bitcoin-cli command-line options - cli = TestNodeCLI(self.binary, self.datadir) + cli = TestNodeCLI(self.binaries, self.datadir) cli.options = [str(o) for o in options] cli.input = input return cli @@ -778,19 +945,35 @@ def batch(self, requests): results.append(dict(error=e)) return results - def send_cli(self, command=None, *args, **kwargs): + def send_cli(self, clicommand=None, *args, **kwargs): """Run bitcoin-cli command. Deserializes returned string as python object.""" pos_args = [arg_to_cli(arg) for arg in args] - named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()] - p_args = [self.binary, f"-datadir={self.datadir}"] + self.options + named_args = [key + "=" + arg_to_cli(value) for (key, value) in kwargs.items() if value is not None] + p_args = self.binaries.rpc_argv() + [f"-datadir={self.datadir}"] + self.options if named_args: p_args += ["-named"] - if command is not None: - p_args += [command] + base_arg_pos = len(p_args) + if clicommand is not None: + p_args += [clicommand] p_args += pos_args + named_args + + # TEST_CLI_MAX_ARG_SIZE is set low enough that checking the string + # length is enough and encoding to bytes is not needed before + # calculating the sum. + sum_arg_size = sum(len(arg) for arg in p_args) + stdin_data = self.input + if sum_arg_size >= TEST_CLI_MAX_ARG_SIZE: + self.log.debug(f"Cli: Command size {sum_arg_size} too large, using stdin") + rpc_args = "\n".join([arg for arg in p_args[base_arg_pos:]]) + if stdin_data is not None: + stdin_data += "\n" + rpc_args + else: + stdin_data = rpc_args + p_args = p_args[:base_arg_pos] + ['-stdin'] + self.log.debug("Running bitcoin-cli {}".format(p_args[2:])) process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - cli_stdout, cli_stderr = process.communicate(input=self.input) + cli_stdout, cli_stderr = process.communicate(input=stdin_data) returncode = process.poll() if returncode: match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) @@ -798,95 +981,10 @@ def send_cli(self, command=None, *args, **kwargs): code, message = match.groups() raise JSONRPCException(dict(code=int(code), message=message)) # Ignore cli_stdout, raise with cli_stderr - raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr) + raise subprocess.CalledProcessError(returncode, p_args, output=cli_stderr) try: + if not cli_stdout.strip(): + return None return json.loads(cli_stdout, parse_float=decimal.Decimal) except (json.JSONDecodeError, decimal.InvalidOperation): return cli_stdout.rstrip("\n") - -class RPCOverloadWrapper(): - def __init__(self, rpc, cli=False, descriptors=False): - self.rpc = rpc - self.is_cli = cli - self.descriptors = descriptors - - def __getattr__(self, name): - return getattr(self.rpc, name) - - def createwallet_passthrough(self, *args, **kwargs): - return self.__getattr__("createwallet")(*args, **kwargs) - - def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None, external_signer=None): - if descriptors is None: - descriptors = self.descriptors - return self.__getattr__('createwallet')(wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup, external_signer) - - def importprivkey(self, privkey, label=None, rescan=None): - wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('importprivkey')(privkey, label, rescan) - desc = descsum_create('combo(' + privkey + ')') - req = [{ - 'desc': desc, - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }] - import_res = self.importdescriptors(req) - if not import_res[0]['success']: - raise JSONRPCException(import_res[0]['error']) - - def addmultisigaddress(self, nrequired, keys, label=None, address_type=None): - wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('addmultisigaddress')(nrequired, keys, label, address_type) - cms = self.createmultisig(nrequired, keys, address_type) - req = [{ - 'desc': cms['descriptor'], - 'timestamp': 0, - 'label': label if label else '' - }] - import_res = self.importdescriptors(req) - if not import_res[0]['success']: - raise JSONRPCException(import_res[0]['error']) - return cms - - def importpubkey(self, pubkey, label=None, rescan=None): - wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('importpubkey')(pubkey, label, rescan) - desc = descsum_create('combo(' + pubkey + ')') - req = [{ - 'desc': desc, - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }] - import_res = self.importdescriptors(req) - if not import_res[0]['success']: - raise JSONRPCException(import_res[0]['error']) - - def importaddress(self, address, label=None, rescan=None, p2sh=None): - wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('importaddress')(address, label, rescan, p2sh) - is_hex = False - try: - int(address ,16) - is_hex = True - desc = descsum_create('raw(' + address + ')') - except Exception: - desc = descsum_create('addr(' + address + ')') - reqs = [{ - 'desc': desc, - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }] - if is_hex and p2sh: - reqs.append({ - 'desc': descsum_create('p2sh(raw(' + address + '))'), - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }) - import_res = self.importdescriptors(reqs) - for res in import_res: - if not res['success']: - raise JSONRPCException(res['error']) diff --git a/resources/scenarios/test_framework/test_shell.py b/resources/scenarios/test_framework/test_shell.py index 09ccec28a..9820a222b 100644 --- a/resources/scenarios/test_framework/test_shell.py +++ b/resources/scenarios/test_framework/test_shell.py @@ -2,9 +2,11 @@ # Copyright (c) 2019-2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +import pathlib from test_framework.test_framework import BitcoinTestFramework + class TestShell: """Wrapper Class for BitcoinTestFramework. @@ -16,11 +18,8 @@ class TestShell: start a single TestShell at a time.""" class __TestShell(BitcoinTestFramework): - def add_options(self, parser): - self.add_wallet_options(parser) - def set_test_params(self): - pass + self.uses_wallet = None def run_test(self): pass @@ -59,7 +58,8 @@ def reset(self): print("Shutdown TestShell before resetting!") else: self.num_nodes = None - super().__init__() + dummy_testshell_file = pathlib.Path(__file__).absolute().parent.parent / "testshell_dummy.py" + super().__init__(dummy_testshell_file) instance = None @@ -67,7 +67,13 @@ def __new__(cls): # This implementation enforces singleton pattern, and will return the # previously initialized instance if available if not TestShell.instance: - TestShell.instance = TestShell.__TestShell() + # BitcoinTestFramework instances are supposed to be constructed with the path + # of the calling test in order to find shared data like configuration and the + # cache. Since TestShell is meant for interactive use, there is no concrete + # test; passing a dummy name is fine though, as only the containing directory + # is relevant for successful initialization. + dummy_testshell_file = pathlib.Path(__file__).absolute().parent.parent / "testshell_dummy.py" + TestShell.instance = TestShell.__TestShell(dummy_testshell_file) TestShell.instance.running = False return TestShell.instance diff --git a/resources/scenarios/test_framework/util.py b/resources/scenarios/test_framework/util.py index 61346e9d1..e5a5938f0 100644 --- a/resources/scenarios/test_framework/util.py +++ b/resources/scenarios/test_framework/util.py @@ -5,7 +5,7 @@ """Helpful routines for regression testing.""" from base64 import b64encode -from decimal import Decimal, ROUND_DOWN +from decimal import Decimal from subprocess import CalledProcessError import hashlib import inspect @@ -13,14 +13,18 @@ import logging import os import pathlib +import platform import random import re -import sys import time from . import coverage from .authproxy import AuthServiceProxy, JSONRPCException -from typing import Callable, Optional, Tuple +from .descriptors import descsum_create +from collections.abc import Callable +from typing import Optional, Union + +SATOSHI_PRECISION = Decimal('0.00000001') logger = logging.getLogger("TestFramework.utils") @@ -52,10 +56,31 @@ def assert_fee_amount(fee, tx_size, feerate_BTC_kvB): raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee))) +def summarise_dict_differences(thing1, thing2): + if not isinstance(thing1, dict) or not isinstance(thing2, dict): + return thing1, thing2 + d1, d2 = {}, {} + for k in sorted(thing1.keys()): + if k not in thing2: + d1[k] = thing1[k] + elif thing1[k] != thing2[k]: + d1[k], d2[k] = summarise_dict_differences(thing1[k], thing2[k]) + for k in sorted(thing2.keys()): + if k not in thing1: + d2[k] = thing2[k] + return d1, d2 + def assert_equal(thing1, thing2, *args): + if thing1 != thing2 and not args and isinstance(thing1, dict) and isinstance(thing2, dict): + d1,d2 = summarise_dict_differences(thing1, thing2) + raise AssertionError("not(%s == %s)\n in particular not(%s == %s)" % (thing1, thing2, d1, d2)) if thing1 != thing2 or any(thing1 != arg for arg in args): raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args)) +def assert_not_equal(thing1, thing2, *, error_message=""): + if thing1 == thing2: + raise AssertionError(f"Both values are {thing1}{f', {error_message}' if error_message else ''}") + def assert_greater_than(thing1, thing2): if thing1 <= thing2: @@ -77,10 +102,9 @@ def assert_raises_message(exc, message, fun, *args, **kwds): except JSONRPCException: raise AssertionError("Use assert_raises_rpc_error() to test RPC failures") except exc as e: - if message is not None and message not in e.error['message']: - raise AssertionError( - "Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format( - message, e.error['message'])) + if message is not None and message not in str(e): + raise AssertionError("Expected substring not found in exception:\n" + f"substring: '{message}'\nexception: {e!r}.") except Exception as e: raise AssertionError("Unexpected exception raised: " + type(e).__name__) else: @@ -230,6 +254,12 @@ def ceildiv(a, b): return -(-a // b) +def random_bitflip(data): + data = list(data) + data[random.randrange(len(data))] ^= (1 << (random.randrange(8))) + return bytes(data) + + def get_fee(tx_size, feerate_btc_kvb): """Calculate the fee in BTC given a feerate is BTC/kvB. Reflects CFeeRate::GetFee""" feerate_sat_kvb = int(feerate_btc_kvb * Decimal(1e8)) # Fee in sat/kvb as an int to avoid float precision errors @@ -237,11 +267,33 @@ def get_fee(tx_size, feerate_btc_kvb): return target_fee_sat / Decimal(1e8) # Return result in BTC -def satoshi_round(amount): - return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) +def satoshi_round(amount: Union[int, float, str], *, rounding: str) -> Decimal: + """Rounds a Decimal amount to the nearest satoshi using the specified rounding mode.""" + return Decimal(amount).quantize(SATOSHI_PRECISION, rounding=rounding) + +def ensure_for(*, duration, f, check_interval=0.2): + """Check if the predicate keeps returning True for duration. -def wait_until_helper_internal(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0): + check_interval can be used to configure the wait time between checks. + Setting check_interval to 0 will allow to have two checks: one in the + beginning and one after duration. + """ + # If check_interval is 0 or negative or larger than duration, we fall back + # to checking once in the beginning and once at the end of duration + if check_interval <= 0 or check_interval > duration: + check_interval = duration + time_end = time.time() + duration + predicate_source = "''''\n" + inspect.getsource(f) + "'''" + while True: + if not f(): + raise AssertionError(f"Predicate {predicate_source} became false within {duration} seconds") + if time.time() > time_end: + return + time.sleep(check_interval) + + +def wait_until_helper_internal(predicate, *, timeout=60, lock=None, timeout_factor=1.0, check_interval=0.05): """Sleep until the predicate resolves to be True. Warning: Note that this method is not recommended to be used in tests as it is @@ -250,13 +302,10 @@ def wait_until_helper_internal(predicate, *, attempts=float('inf'), timeout=floa properly scaled. Furthermore, `wait_until()` from `P2PInterface` class in `p2p.py` has a preset lock. """ - if attempts == float('inf') and timeout == float('inf'): - timeout = 60 timeout = timeout * timeout_factor - attempt = 0 time_end = time.time() + timeout - while attempt < attempts and time.time() < time_end: + while time.time() < time_end: if lock: with lock: if predicate(): @@ -264,17 +313,19 @@ def wait_until_helper_internal(predicate, *, attempts=float('inf'), timeout=floa else: if predicate(): return - attempt += 1 - time.sleep(0.05) + time.sleep(check_interval) # Print the cause of the timeout predicate_source = "''''\n" + inspect.getsource(predicate) + "'''" logger.error("wait_until() failed. Predicate: {}".format(predicate_source)) - if attempt >= attempts: - raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts)) - elif time.time() >= time_end: - raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout)) - raise RuntimeError('Unreachable') + raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout)) + + +def bpf_cflags(): + return [ + "-Wno-error=implicit-function-declaration", + "-Wno-duplicate-decl-specifier", # https://github.com/bitcoin/bitcoin/issues/32322 + ] def sha256sum_file(filename): @@ -287,10 +338,11 @@ def sha256sum_file(filename): return h.digest() -# TODO: Remove and use random.randbytes(n) instead, available in Python 3.9 -def random_bytes(n): - """Return a random bytes object of length n.""" - return bytes(random.getrandbits(8) for i in range(n)) +def util_xor(data, key, *, offset): + data = bytearray(data) + for i in range(len(data)): + data[i] ^= key[(i + offset) % len(key)] + return bytes(data) # RPC/P2P connection constants and functions @@ -298,9 +350,9 @@ def random_bytes(n): # The maximum number of nodes a single test can spawn MAX_NODES = 12 -# Don't assign rpc or p2p ports lower than this +# Don't assign p2p, rpc or tor ports lower than this PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000)) -# The number of ports to "reserve" for p2p and rpc, each +# The number of ports to "reserve" for p2p, rpc and tor, each PORT_RANGE = 5000 @@ -340,7 +392,11 @@ def p2p_port(n): def rpc_port(n): - return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) + return p2p_port(n) + PORT_RANGE + + +def tor_port(n): + return p2p_port(n) + PORT_RANGE * 2 def rpc_url(datadir, i, chain, rpchost): @@ -401,14 +457,23 @@ def write_config(config_path, *, n, chain, extra_config="", disable_autoconnect= # in tests. f.write("peertimeout=999999999\n") f.write("printtoconsole=0\n") - f.write("upnp=0\n") f.write("natpmp=0\n") f.write("shrinkdebugfile=0\n") - f.write("deprecatedrpc=create_bdb\n") # Required to run the tests # To improve SQLite wallet performance so that the tests don't timeout, use -unsafesqlitesync f.write("unsafesqlitesync=1\n") if disable_autoconnect: f.write("connect=0\n") + # Limit max connections to mitigate test failures on some systems caused by the warning: + # "Warning: Reducing -maxconnections from <...> to <...> due to system limitations". + # The value is calculated as follows: + # available_fds = 256 // Same as FD_SETSIZE on NetBSD. + # MIN_CORE_FDS = 151 // Number of file descriptors required for core functionality. + # MAX_ADDNODE_CONNECTIONS = 8 // Maximum number of -addnode outgoing nodes. + # nBind == 3 // Maximum number of bound interfaces used in a test. + # + # min_required_fds = MIN_CORE_FDS + MAX_ADDNODE_CONNECTIONS + nBind = 151 + 8 + 3 = 162; + # nMaxConnections = available_fds - min_required_fds = 256 - 161 = 94; + f.write("maxconnections=94\n") f.write(extra_config) @@ -416,16 +481,16 @@ def get_datadir_path(dirname, n): return pathlib.Path(dirname) / f"node{n}" -def get_temp_default_datadir(temp_dir: pathlib.Path) -> Tuple[dict, pathlib.Path]: +def get_temp_default_datadir(temp_dir: pathlib.Path) -> tuple[dict, pathlib.Path]: """Return os-specific environment variables that can be set to make the GetDefaultDataDir() function return a datadir path under the provided temp_dir, as well as the complete path it would return.""" - if sys.platform == "win32": + if platform.system() == "Windows": env = dict(APPDATA=str(temp_dir)) datadir = temp_dir / "Bitcoin" else: env = dict(HOME=str(temp_dir)) - if sys.platform == "darwin": + if platform.system() == "Darwin": datadir = temp_dir / "Library/Application Support/Bitcoin" else: datadir = temp_dir / ".bitcoin" @@ -490,18 +555,6 @@ def check_node_connections(*, node, num_in, num_out): ############################# -def find_output(node, txid, amount, *, blockhash=None): - """ - Return index to output of txid with value amount - Raises exception if there is none. - """ - txdata = node.getrawtransaction(txid, 1, blockhash) - for i in range(len(txdata["vout"])): - if txdata["vout"][i]["value"] == amount: - return i - raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount))) - - # Create large OP_RETURN txouts that can be appended to a transaction # to make it large (helper for constructing large transactions). The # total serialized size of the txouts is about 66k vbytes. @@ -549,3 +602,20 @@ def find_vout_for_address(node, txid, addr): if addr == tx["vout"][i]["scriptPubKey"]["address"]: return i raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr)) + + +def sync_txindex(test_framework, node): + test_framework.log.debug("Waiting for node txindex to sync") + sync_start = int(time.time()) + test_framework.wait_until(lambda: node.getindexinfo("txindex")["txindex"]["synced"]) + test_framework.log.debug(f"Synced in {time.time() - sync_start} seconds") + +def wallet_importprivkey(wallet_rpc, privkey, timestamp, *, label=""): + desc = descsum_create("combo(" + privkey + ")") + req = [{ + "desc": desc, + "timestamp": timestamp, + "label": label, + }] + import_res = wallet_rpc.importdescriptors(req) + assert_equal(import_res[0]["success"], True) diff --git a/resources/scenarios/test_framework/v2_p2p.py b/resources/scenarios/test_framework/v2_p2p.py new file mode 100644 index 000000000..87600c36d --- /dev/null +++ b/resources/scenarios/test_framework/v2_p2p.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Class for v2 P2P protocol (see BIP 324)""" + +import random + +from .crypto.bip324_cipher import FSChaCha20Poly1305 +from .crypto.chacha20 import FSChaCha20 +from .crypto.ellswift import ellswift_create, ellswift_ecdh_xonly +from .crypto.hkdf import hkdf_sha256 +from .key import TaggedHash +from .messages import MAGIC_BYTES + + +CHACHA20POLY1305_EXPANSION = 16 +HEADER_LEN = 1 +IGNORE_BIT_POS = 7 +LENGTH_FIELD_LEN = 3 +MAX_GARBAGE_LEN = 4095 + +SHORTID = { + 1: b"addr", + 2: b"block", + 3: b"blocktxn", + 4: b"cmpctblock", + 5: b"feefilter", + 6: b"filteradd", + 7: b"filterclear", + 8: b"filterload", + 9: b"getblocks", + 10: b"getblocktxn", + 11: b"getdata", + 12: b"getheaders", + 13: b"headers", + 14: b"inv", + 15: b"mempool", + 16: b"merkleblock", + 17: b"notfound", + 18: b"ping", + 19: b"pong", + 20: b"sendcmpct", + 21: b"tx", + 22: b"getcfilters", + 23: b"cfilter", + 24: b"getcfheaders", + 25: b"cfheaders", + 26: b"getcfcheckpt", + 27: b"cfcheckpt", + 28: b"addrv2", +} + +# Dictionary which contains short message type ID for the P2P message +MSGTYPE_TO_SHORTID = {msgtype: shortid for shortid, msgtype in SHORTID.items()} + + +class EncryptedP2PState: + """A class for managing the state when v2 P2P protocol is used. Performs initial v2 handshake and encrypts/decrypts + P2P messages. P2PConnection uses an object of this class. + + + Args: + initiating (bool): defines whether the P2PConnection is an initiator or responder. + - initiating = True for inbound connections in the test framework [TestNode <------- P2PConnection] + - initiating = False for outbound connections in the test framework [TestNode -------> P2PConnection] + + net (string): chain used (regtest, signet etc..) + + Methods: + perform an advanced form of diffie-hellman handshake to instantiate the encrypted transport. before exchanging + any P2P messages, 2 nodes perform this handshake in order to determine a shared secret that is unique to both + of them and use it to derive keys to encrypt/decrypt P2P messages. + - initial v2 handshakes is performed by: (see BIP324 section #overall-handshake-pseudocode) + 1. initiator using initiate_v2_handshake(), complete_handshake() and authenticate_handshake() + 2. responder using respond_v2_handshake(), complete_handshake() and authenticate_handshake() + - initialize_v2_transport() sets various BIP324 derived keys and ciphers. + + encrypt/decrypt v2 P2P messages using v2_enc_packet() and v2_receive_packet(). + """ + def __init__(self, *, initiating, net): + self.initiating = initiating # True if initiator + self.net = net + self.peer = {} # object with various BIP324 derived keys and ciphers + self.privkey_ours = None + self.ellswift_ours = None + self.sent_garbage = b"" + self.received_garbage = b"" + self.received_prefix = b"" # received ellswift bytes till the first mismatch from 16 bytes v1_prefix + self.tried_v2_handshake = False # True when the initial handshake is over + # stores length of packet contents to detect whether first 3 bytes (which contains length of packet contents) + # has been decrypted. set to -1 if decryption hasn't been done yet. + self.contents_len = -1 + self.found_garbage_terminator = False + self.transport_version = b'' + + @staticmethod + def v2_ecdh(priv, ellswift_theirs, ellswift_ours, initiating): + """Compute BIP324 shared secret. + + Returns: + bytes - BIP324 shared secret + """ + ecdh_point_x32 = ellswift_ecdh_xonly(ellswift_theirs, priv) + if initiating: + # Initiating, place our public key encoding first. + return TaggedHash("bip324_ellswift_xonly_ecdh", ellswift_ours + ellswift_theirs + ecdh_point_x32) + else: + # Responding, place their public key encoding first. + return TaggedHash("bip324_ellswift_xonly_ecdh", ellswift_theirs + ellswift_ours + ecdh_point_x32) + + def generate_keypair_and_garbage(self, garbage_len=None): + """Generates ellswift keypair and 4095 bytes garbage at max""" + self.privkey_ours, self.ellswift_ours = ellswift_create() + if garbage_len is None: + garbage_len = random.randrange(MAX_GARBAGE_LEN + 1) + self.sent_garbage = random.randbytes(garbage_len) + return self.ellswift_ours + self.sent_garbage + + def initiate_v2_handshake(self): + """Initiator begins the v2 handshake by sending its ellswift bytes and garbage + + Returns: + bytes - bytes to be sent to the peer when starting the v2 handshake as an initiator + """ + return self.generate_keypair_and_garbage() + + def respond_v2_handshake(self, response): + """Responder begins the v2 handshake by sending its ellswift bytes and garbage. However, the responder + sends this after having received at least one byte that mismatches 16-byte v1_prefix. + + Returns: + 1. int - length of bytes that were consumed so that recvbuf can be updated + 2. bytes - bytes to be sent to the peer when starting the v2 handshake as a responder. + - returns b"" if more bytes need to be received before we can respond and start the v2 handshake. + - returns -1 to downgrade the connection to v1 P2P. + """ + v1_prefix = MAGIC_BYTES[self.net] + b'version\x00\x00\x00\x00\x00' + while len(self.received_prefix) < 16: + byte = response.read(1) + # return b"" if we need to receive more bytes + if not byte: + return len(self.received_prefix), b"" + self.received_prefix += byte + if self.received_prefix[-1] != v1_prefix[len(self.received_prefix) - 1]: + return len(self.received_prefix), self.generate_keypair_and_garbage() + # return -1 to decide v1 only after all 16 bytes processed + return len(self.received_prefix), -1 + + def complete_handshake(self, response): + """ Instantiates the encrypted transport and + sends garbage terminator + optional decoy packets + transport version packet. + Done by both initiator and responder. + + Returns: + 1. int - length of bytes that were consumed. returns 0 if all 64 bytes from ellswift haven't been received yet. + 2. bytes - bytes to be sent to the peer when completing the v2 handshake + """ + ellswift_theirs = self.received_prefix + response.read(64 - len(self.received_prefix)) + # return b"" if we need to receive more bytes + if len(ellswift_theirs) != 64: + return 0, b"" + ecdh_secret = self.v2_ecdh(self.privkey_ours, ellswift_theirs, self.ellswift_ours, self.initiating) + self.initialize_v2_transport(ecdh_secret) + # Send garbage terminator + msg_to_send = self.peer['send_garbage_terminator'] + # Optionally send decoy packets after garbage terminator. + aad = self.sent_garbage + for decoy_content_len in [random.randint(1, 100) for _ in range(random.randint(0, 10))]: + msg_to_send += self.v2_enc_packet(decoy_content_len * b'\x00', aad=aad, ignore=True) + aad = b'' + # Send version packet. + msg_to_send += self.v2_enc_packet(self.transport_version, aad=aad) + return 64 - len(self.received_prefix), msg_to_send + + def authenticate_handshake(self, response): + """ Ensures that the received optional decoy packets and transport version packet are authenticated. + Marks the v2 handshake as complete. Done by both initiator and responder. + + Returns: + 1. int - length of bytes that were processed so that recvbuf can be updated + 2. bool - True if the authentication was successful/more bytes need to be received and False otherwise + """ + processed_length = 0 + + # Detect garbage terminator in the received bytes + if not self.found_garbage_terminator: + received_garbage = response[:16] + response = response[16:] + processed_length = len(received_garbage) + for i in range(MAX_GARBAGE_LEN + 1): + if received_garbage[-16:] == self.peer['recv_garbage_terminator']: + # Receive, decode, and ignore version packet. + # This includes skipping decoys and authenticating the received garbage. + self.found_garbage_terminator = True + self.received_garbage = received_garbage[:-16] + break + else: + # don't update recvbuf since more bytes need to be received + if len(response) == 0: + return 0, True + received_garbage += response[:1] + processed_length += 1 + response = response[1:] + else: + # disconnect since garbage terminator was not seen after 4 KiB of garbage. + return processed_length, False + + # Process optional decoy packets and transport version packet + while not self.tried_v2_handshake: + length, contents = self.v2_receive_packet(response, aad=self.received_garbage) + if length == -1: + return processed_length, False + elif length == 0: + return processed_length, True + processed_length += length + self.received_garbage = b"" + # decoy packets have contents = None. v2 handshake is complete only when version packet + # (can be empty with contents = b"") with contents != None is received. + if contents is not None: + assert contents == b"" # currently TestNode sends an empty version packet + self.tried_v2_handshake = True + return processed_length, True + response = response[length:] + + def initialize_v2_transport(self, ecdh_secret): + """Sets the peer object with various BIP324 derived keys and ciphers.""" + peer = {} + salt = b'bitcoin_v2_shared_secret' + MAGIC_BYTES[self.net] + for name in ('initiator_L', 'initiator_P', 'responder_L', 'responder_P', 'garbage_terminators', 'session_id'): + peer[name] = hkdf_sha256(salt=salt, ikm=ecdh_secret, info=name.encode('utf-8'), length=32) + if self.initiating: + self.peer['send_L'] = FSChaCha20(peer['initiator_L']) + self.peer['send_P'] = FSChaCha20Poly1305(peer['initiator_P']) + self.peer['send_garbage_terminator'] = peer['garbage_terminators'][:16] + self.peer['recv_L'] = FSChaCha20(peer['responder_L']) + self.peer['recv_P'] = FSChaCha20Poly1305(peer['responder_P']) + self.peer['recv_garbage_terminator'] = peer['garbage_terminators'][16:] + else: + self.peer['send_L'] = FSChaCha20(peer['responder_L']) + self.peer['send_P'] = FSChaCha20Poly1305(peer['responder_P']) + self.peer['send_garbage_terminator'] = peer['garbage_terminators'][16:] + self.peer['recv_L'] = FSChaCha20(peer['initiator_L']) + self.peer['recv_P'] = FSChaCha20Poly1305(peer['initiator_P']) + self.peer['recv_garbage_terminator'] = peer['garbage_terminators'][:16] + self.peer['session_id'] = peer['session_id'] + + def v2_enc_packet(self, contents, aad=b'', ignore=False): + """Encrypt a BIP324 packet. + + Returns: + bytes - encrypted packet contents + """ + assert len(contents) <= 2**24 - 1 + header = (ignore << IGNORE_BIT_POS).to_bytes(HEADER_LEN, 'little') + plaintext = header + contents + aead_ciphertext = self.peer['send_P'].encrypt(aad, plaintext) + enc_plaintext_len = self.peer['send_L'].crypt(len(contents).to_bytes(LENGTH_FIELD_LEN, 'little')) + return enc_plaintext_len + aead_ciphertext + + def v2_receive_packet(self, response, aad=b''): + """Decrypt a BIP324 packet + + Returns: + 1. int - number of bytes consumed (or -1 if error) + 2. bytes - contents of decrypted non-decoy packet if any (or None otherwise) + """ + if self.contents_len == -1: + if len(response) < LENGTH_FIELD_LEN: + return 0, None + enc_contents_len = response[:LENGTH_FIELD_LEN] + self.contents_len = int.from_bytes(self.peer['recv_L'].crypt(enc_contents_len), 'little') + response = response[LENGTH_FIELD_LEN:] + if len(response) < HEADER_LEN + self.contents_len + CHACHA20POLY1305_EXPANSION: + return 0, None + aead_ciphertext = response[:HEADER_LEN + self.contents_len + CHACHA20POLY1305_EXPANSION] + plaintext = self.peer['recv_P'].decrypt(aad, aead_ciphertext) + if plaintext is None: + return -1, None # disconnect + header = plaintext[:HEADER_LEN] + length = LENGTH_FIELD_LEN + HEADER_LEN + self.contents_len + CHACHA20POLY1305_EXPANSION + self.contents_len = -1 + return length, None if (header[0] & (1 << IGNORE_BIT_POS)) else plaintext[HEADER_LEN:] diff --git a/resources/scenarios/test_framework/wallet.py b/resources/scenarios/test_framework/wallet.py index 035a482f4..a47ccab01 100644 --- a/resources/scenarios/test_framework/wallet.py +++ b/resources/scenarios/test_framework/wallet.py @@ -9,7 +9,6 @@ from enum import Enum from typing import ( Any, - List, Optional, ) from test_framework.address import ( @@ -33,10 +32,10 @@ CTxIn, CTxInWitness, CTxOut, + hash256, ) from test_framework.script import ( CScript, - LEAF_VERSION_TAPSCRIPT, OP_NOP, OP_RETURN, OP_TRUE, @@ -44,6 +43,7 @@ taproot_construct, ) from test_framework.script_util import ( + bulk_vout, key_to_p2pk_script, key_to_p2pkh_script, key_to_p2sh_p2wpkh_script, @@ -52,6 +52,7 @@ from test_framework.util import ( assert_equal, assert_greater_than_or_equal, + get_fee, ) from test_framework.wallet_util import generate_keypair @@ -66,7 +67,10 @@ class MiniWalletMode(Enum): However, if the transactions need to be modified by the user (e.g. prepending scriptSig for testing opcodes that are activated by a soft-fork), or the txs should contain an actual signature, the raw modes RAW_OP_TRUE and RAW_P2PK - can be useful. Summary of modes: + can be useful. In order to avoid mixing of UTXOs between different MiniWallet + instances, a tag name can be passed to the default mode, to create different + output scripts. Note that the UTXOs from the pre-generated test chain can + only be spent if no tag is passed. Summary of modes: | output | | tx is | can modify | needs mode | description | address | standard | scriptSig | signing @@ -81,22 +85,25 @@ class MiniWalletMode(Enum): class MiniWallet: - def __init__(self, test_node, *, mode=MiniWalletMode.ADDRESS_OP_TRUE): + def __init__(self, test_node, *, mode=MiniWalletMode.ADDRESS_OP_TRUE, tag_name=None): self._test_node = test_node self._utxos = [] self._mode = mode assert isinstance(mode, MiniWalletMode) if mode == MiniWalletMode.RAW_OP_TRUE: + assert tag_name is None self._scriptPubKey = bytes(CScript([OP_TRUE])) elif mode == MiniWalletMode.RAW_P2PK: # use simple deterministic private key (k=1) + assert tag_name is None self._priv_key = ECKey() self._priv_key.set((1).to_bytes(32, 'big'), True) pub_key = self._priv_key.get_pubkey() self._scriptPubKey = key_to_p2pk_script(pub_key.get_bytes()) elif mode == MiniWalletMode.ADDRESS_OP_TRUE: - self._address, self._internal_key = create_deterministic_address_bcrt1_p2tr_op_true() + internal_key = None if tag_name is None else compute_xonly_pubkey(hash256(tag_name.encode()))[0] + self._address, self._taproot_info = create_deterministic_address_bcrt1_p2tr_op_true(internal_key) self._scriptPubKey = address_to_scriptpubkey(self._address) # When the pre-mined test framework chain is used, it contains coinbase @@ -109,17 +116,13 @@ def __init__(self, test_node, *, mode=MiniWalletMode.ADDRESS_OP_TRUE): def _create_utxo(self, *, txid, vout, value, height, coinbase, confirmations): return {"txid": txid, "vout": vout, "value": value, "height": height, "coinbase": coinbase, "confirmations": confirmations} - def _bulk_tx(self, tx, target_weight): - """Pad a transaction with extra outputs until it reaches a target weight (or higher). + def _bulk_tx(self, tx, target_vsize): + """Pad a transaction with extra outputs until it reaches a target vsize. returns the tx """ - tx.vout.append(CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN, b'a']))) - dummy_vbytes = (target_weight - tx.get_weight() + 3) // 4 - tx.vout[-1].scriptPubKey = CScript([OP_RETURN, b'a' * dummy_vbytes]) - # Lower bound should always be off by at most 3 - assert_greater_than_or_equal(tx.get_weight(), target_weight) - # Higher bound should always be off by at most 3 + 12 weight (for encoding the length) - assert_greater_than_or_equal(target_weight + 15, tx.get_weight()) + tx.vout.append(CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN]))) + bulk_vout(tx, target_vsize) + def get_balance(self): return sum(u['value'] for u in self._utxos) @@ -181,7 +184,12 @@ def sign_tx(self, tx, fixed_length=True): elif self._mode == MiniWalletMode.ADDRESS_OP_TRUE: tx.wit.vtxinwit = [CTxInWitness()] * len(tx.vin) for i in tx.wit.vtxinwit: - i.scriptWitness.stack = [CScript([OP_TRUE]), bytes([LEAF_VERSION_TAPSCRIPT]) + self._internal_key] + assert_equal(len(self._taproot_info.leaves), 1) + leaf_info = list(self._taproot_info.leaves.values())[0] + i.scriptWitness.stack = [ + leaf_info.script, + bytes([leaf_info.version | self._taproot_info.negflag]) + self._taproot_info.internal_pubkey, + ] else: assert False @@ -198,7 +206,7 @@ def generate(self, num_blocks, **kwargs): self.rescan_utxos() return blocks - def get_scriptPubKey(self): + def get_output_script(self): return self._scriptPubKey def get_descriptor(self): @@ -270,7 +278,7 @@ def send_to(self, *, from_node, scriptPubKey, amount, fee=1000): return { "sent_vout": 1, "txid": txid, - "wtxid": tx.getwtxid(), + "wtxid": tx.wtxid_hex, "hex": tx.serialize().hex(), "tx": tx, } @@ -284,14 +292,15 @@ def send_self_transfer_multi(self, *, from_node, **kwargs): def create_self_transfer_multi( self, *, - utxos_to_spend: Optional[List[dict]] = None, + utxos_to_spend: Optional[list[dict]] = None, num_outputs=1, amount_per_output=0, + version=2, locktime=0, sequence=0, fee_per_output=1000, - target_weight=0, - confirmed_only=False + target_vsize=0, + confirmed_only=False, ): """ Create and return a transaction that spends the given UTXOs and creates a @@ -314,14 +323,15 @@ def create_self_transfer_multi( tx = CTransaction() tx.vin = [CTxIn(COutPoint(int(utxo_to_spend['txid'], 16), utxo_to_spend['vout']), nSequence=seq) for utxo_to_spend, seq in zip(utxos_to_spend, sequence)] tx.vout = [CTxOut(amount_per_output, bytearray(self._scriptPubKey)) for _ in range(num_outputs)] + tx.version = version tx.nLockTime = locktime self.sign_tx(tx) - if target_weight: - self._bulk_tx(tx, target_weight) + if target_vsize: + self._bulk_tx(tx, target_vsize) - txid = tx.rehash() + txid = tx.txid_hex return { "new_utxos": [self._create_utxo( txid=txid, @@ -333,19 +343,20 @@ def create_self_transfer_multi( ) for i in range(len(tx.vout))], "fee": fee, "txid": txid, - "wtxid": tx.getwtxid(), + "wtxid": tx.wtxid_hex, "hex": tx.serialize().hex(), "tx": tx, } - def create_self_transfer(self, *, + def create_self_transfer( + self, + *, fee_rate=Decimal("0.003"), fee=Decimal("0"), utxo_to_spend=None, - locktime=0, - sequence=0, - target_weight=0, - confirmed_only=False + target_vsize=0, + confirmed_only=False, + **kwargs, ): """Create and return a tx with the specified fee. If fee is 0, use fee_rate, where the resulting fee may be exact or at most one satoshi higher than needed.""" utxo_to_spend = utxo_to_spend or self.get_utxo(confirmed_only=confirmed_only) @@ -358,11 +369,19 @@ def create_self_transfer(self, *, vsize = Decimal(168) # P2PK (73 bytes scriptSig + 35 bytes scriptPubKey + 60 bytes other) else: assert False + if target_vsize and not fee: # respect fee_rate if target vsize is passed + fee = get_fee(target_vsize, fee_rate) send_value = utxo_to_spend["value"] - (fee or (fee_rate * vsize / 1000)) - + if send_value <= 0: + raise RuntimeError(f"UTXO value {utxo_to_spend['value']} is too small to cover fees {(fee or (fee_rate * vsize / 1000))}") # create tx - tx = self.create_self_transfer_multi(utxos_to_spend=[utxo_to_spend], locktime=locktime, sequence=sequence, amount_per_output=int(COIN * send_value), target_weight=target_weight) - if not target_weight: + tx = self.create_self_transfer_multi( + utxos_to_spend=[utxo_to_spend], + amount_per_output=int(COIN * send_value), + target_vsize=target_vsize, + **kwargs, + ) + if not target_vsize: assert_equal(tx["tx"].get_vsize(), vsize) tx["new_utxo"] = tx.pop("new_utxos")[0] diff --git a/resources/scenarios/test_framework/wallet_util.py b/resources/scenarios/test_framework/wallet_util.py index 44811918b..2168e607b 100755 --- a/resources/scenarios/test_framework/wallet_util.py +++ b/resources/scenarios/test_framework/wallet_util.py @@ -4,6 +4,7 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Useful util functions for testing the wallet""" from collections import namedtuple +import unittest from test_framework.address import ( byte_to_base58, @@ -15,6 +16,11 @@ script_to_p2wsh, ) from test_framework.key import ECKey +from test_framework.messages import ( + CTxIn, + CTxInWitness, + WITNESS_SCALE_FACTOR, +) from test_framework.script_util import ( key_to_p2pkh_script, key_to_p2wpkh_script, @@ -123,6 +129,19 @@ def generate_keypair(compressed=True, wif=False): privkey = bytes_to_wif(privkey.get_bytes(), compressed) return privkey, pubkey +def calculate_input_weight(scriptsig_hex, witness_stack_hex=None): + """Given a scriptSig and a list of witness stack items for an input in hex format, + calculate the total input weight. If the input has no witness data, + `witness_stack_hex` can be set to None.""" + tx_in = CTxIn(scriptSig=bytes.fromhex(scriptsig_hex)) + witness_size = 0 + if witness_stack_hex is not None: + tx_inwit = CTxInWitness() + for witness_item_hex in witness_stack_hex: + tx_inwit.scriptWitness.stack.append(bytes.fromhex(witness_item_hex)) + witness_size = len(tx_inwit.serialize()) + return len(tx_in.serialize()) * WITNESS_SCALE_FACTOR + witness_size + class WalletUnlock(): """ A context manager for unlocking a wallet with a passphrase and automatically locking it afterward. @@ -141,3 +160,42 @@ def __enter__(self): def __exit__(self, *args): _ = args self.wallet.walletlock() + + +class TestFrameworkWalletUtil(unittest.TestCase): + def test_calculate_input_weight(self): + SKELETON_BYTES = 32 + 4 + 4 # prevout-txid, prevout-index, sequence + SMALL_LEN_BYTES = 1 # bytes needed for encoding scriptSig / witness item lengths < 253 + LARGE_LEN_BYTES = 3 # bytes needed for encoding scriptSig / witness item lengths >= 253 + + # empty scriptSig, no witness + self.assertEqual(calculate_input_weight(""), + (SKELETON_BYTES + SMALL_LEN_BYTES) * WITNESS_SCALE_FACTOR) + self.assertEqual(calculate_input_weight("", None), + (SKELETON_BYTES + SMALL_LEN_BYTES) * WITNESS_SCALE_FACTOR) + # small scriptSig, no witness + scriptSig_small = "00"*252 + self.assertEqual(calculate_input_weight(scriptSig_small, None), + (SKELETON_BYTES + SMALL_LEN_BYTES + 252) * WITNESS_SCALE_FACTOR) + # small scriptSig, empty witness stack + self.assertEqual(calculate_input_weight(scriptSig_small, []), + (SKELETON_BYTES + SMALL_LEN_BYTES + 252) * WITNESS_SCALE_FACTOR + SMALL_LEN_BYTES) + # large scriptSig, no witness + scriptSig_large = "00"*253 + self.assertEqual(calculate_input_weight(scriptSig_large, None), + (SKELETON_BYTES + LARGE_LEN_BYTES + 253) * WITNESS_SCALE_FACTOR) + # large scriptSig, empty witness stack + self.assertEqual(calculate_input_weight(scriptSig_large, []), + (SKELETON_BYTES + LARGE_LEN_BYTES + 253) * WITNESS_SCALE_FACTOR + SMALL_LEN_BYTES) + # empty scriptSig, 5 small witness stack items + self.assertEqual(calculate_input_weight("", ["00", "11", "22", "33", "44"]), + ((SKELETON_BYTES + SMALL_LEN_BYTES) * WITNESS_SCALE_FACTOR) + SMALL_LEN_BYTES + 5 * SMALL_LEN_BYTES + 5) + # empty scriptSig, 253 small witness stack items + self.assertEqual(calculate_input_weight("", ["00"]*253), + ((SKELETON_BYTES + SMALL_LEN_BYTES) * WITNESS_SCALE_FACTOR) + LARGE_LEN_BYTES + 253 * SMALL_LEN_BYTES + 253) + # small scriptSig, 3 large witness stack items + self.assertEqual(calculate_input_weight(scriptSig_small, ["00"*253]*3), + ((SKELETON_BYTES + SMALL_LEN_BYTES + 252) * WITNESS_SCALE_FACTOR) + SMALL_LEN_BYTES + 3 * LARGE_LEN_BYTES + 3*253) + # large scriptSig, 3 large witness stack items + self.assertEqual(calculate_input_weight(scriptSig_large, ["00"*253]*3), + ((SKELETON_BYTES + LARGE_LEN_BYTES + 253) * WITNESS_SCALE_FACTOR) + SMALL_LEN_BYTES + 3 * LARGE_LEN_BYTES + 3*253) diff --git a/resources/scenarios/test_scenarios/buggy_failure.py b/resources/scenarios/test_scenarios/buggy_failure.py index e982680d5..fbda306d7 100644 --- a/resources/scenarios/test_scenarios/buggy_failure.py +++ b/resources/scenarios/test_scenarios/buggy_failure.py @@ -21,7 +21,7 @@ def run_test(self): def main(): - Failure().main() + Failure("").main() if __name__ == "__main__": diff --git a/resources/scenarios/test_scenarios/connect_dag.py b/resources/scenarios/test_scenarios/connect_dag.py index 5747291cb..c45caef6f 100644 --- a/resources/scenarios/test_scenarios/connect_dag.py +++ b/resources/scenarios/test_scenarios/connect_dag.py @@ -118,7 +118,7 @@ def assert_connection(self, connector, connectee_index, connection_type: Connect def main(): - ConnectDag().main() + ConnectDag("").main() if __name__ == "__main__": diff --git a/resources/scenarios/test_scenarios/generate_one_allnodes.py b/resources/scenarios/test_scenarios/generate_one_allnodes.py index 30ea41445..6d70f2bd8 100644 --- a/resources/scenarios/test_scenarios/generate_one_allnodes.py +++ b/resources/scenarios/test_scenarios/generate_one_allnodes.py @@ -26,7 +26,7 @@ def run_test(self): def main(): - GenOneAllNodes().main() + GenOneAllNodes("").main() if __name__ == "__main__": diff --git a/resources/scenarios/test_scenarios/p2p_interface.py b/resources/scenarios/test_scenarios/p2p_interface.py index 64636267c..61efa1012 100644 --- a/resources/scenarios/test_scenarios/p2p_interface.py +++ b/resources/scenarios/test_scenarios/p2p_interface.py @@ -18,8 +18,7 @@ def __init__(self): self.blocks = defaultdict(int) def on_block(self, message): - message.block.calc_sha256() - self.blocks[message.block.sha256] += 1 + self.blocks[message.block.hash_int] += 1 class GetdataTest(Commander): @@ -53,7 +52,7 @@ def run_test(self): def main(): - GetdataTest().main() + GetdataTest("").main() if __name__ == "__main__": diff --git a/resources/scenarios/test_scenarios/pyln_connect.py b/resources/scenarios/test_scenarios/pyln_connect.py index e089977a6..40e68d37d 100644 --- a/resources/scenarios/test_scenarios/pyln_connect.py +++ b/resources/scenarios/test_scenarios/pyln_connect.py @@ -57,7 +57,7 @@ def run_test(self): def main(): - PyLNConnect().main() + PyLNConnect("").main() if __name__ == "__main__": diff --git a/resources/scenarios/test_scenarios/signet_grinder.py b/resources/scenarios/test_scenarios/signet_grinder.py index 1bee09409..9830abea8 100644 --- a/resources/scenarios/test_scenarios/signet_grinder.py +++ b/resources/scenarios/test_scenarios/signet_grinder.py @@ -12,7 +12,7 @@ def run_test(self): def main(): - SignetGrinder().main() + SignetGrinder("").main() if __name__ == "__main__": diff --git a/resources/scenarios/tx_flood.py b/resources/scenarios/tx_flood.py index 7a60bccc5..7d46a6baa 100755 --- a/resources/scenarios/tx_flood.py +++ b/resources/scenarios/tx_flood.py @@ -68,7 +68,7 @@ def run_test(self): def main(): - TXFlood().main() + TXFlood("").main() if __name__ == "__main__":