diff --git a/depend/bitcoin/.github/actions/clear-files/action.yml b/depend/bitcoin/.github/actions/clear-files/action.yml new file mode 100644 index 0000000..0008f82 --- /dev/null +++ b/depend/bitcoin/.github/actions/clear-files/action.yml @@ -0,0 +1,12 @@ +name: 'Clear unnecessary files' +description: 'Clear out unnecessary files to make space on the VM' +runs: + using: 'composite' + steps: + - name: Clear unnecessary files + shell: bash + env: + DEBIAN_FRONTEND: noninteractive + run: | + set +o errexit + sudo bash -c '(ionice -c 3 nice -n 19 rm -rf /usr/share/dotnet/ /usr/local/graalvm/ /usr/local/.ghcup/ /usr/local/share/powershell /usr/local/share/chromium /usr/local/lib/android /usr/local/lib/node_modules)&' diff --git a/depend/bitcoin/.github/workflows/ci.yml b/depend/bitcoin/.github/workflows/ci.yml index 880cc90..2563c2e 100644 --- a/depend/bitcoin/.github/workflows/ci.yml +++ b/depend/bitcoin/.github/workflows/ci.yml @@ -56,12 +56,12 @@ jobs: fi test-each-commit: - name: 'test each commit' + name: 'test max 6 ancestor commits' runs-on: ubuntu-24.04 if: github.event_name == 'pull_request' && github.event.pull_request.commits != 1 timeout-minutes: 360 # Use maximum time, see https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes. Assuming a worst case time of 1 hour per commit, this leads to a --max-count=6 below. env: - MAX_COUNT: 6 + MAX_COUNT: 6 # Keep in sync with name above steps: - name: Determine fetch depth run: echo "FETCH_DEPTH=$((${{ github.event.pull_request.commits }} + 2))" >> "$GITHUB_ENV" @@ -155,11 +155,11 @@ jobs: - name: Clang version run: | - # Use the earliest Xcode supported by the version of macOS denoted in + # Use the latest Xcode supported by the version of macOS denoted in # doc/release-notes-empty-template.md and providing at least the # minimum clang version denoted in doc/dependencies.md. - # See: https://developer.apple.com/documentation/xcode-release-notes/xcode-16-release-notes - sudo xcode-select --switch /Applications/Xcode_16.0.app + # See: https://developer.apple.com/documentation/xcode-release-notes/xcode-16_2-release-notes + sudo xcode-select --switch /Applications/Xcode_16.2.app clang --version - name: Install Homebrew packages @@ -218,10 +218,10 @@ jobs: job-type: [standard, fuzz] include: - job-type: standard - generate-options: '-DBUILD_GUI=ON -DWITH_ZMQ=ON -DBUILD_BENCH=ON -DBUILD_KERNEL_LIB=ON -DBUILD_UTIL_CHAINSTATE=ON -DWERROR=ON' + generate-options: '-DBUILD_BENCH=ON -DBUILD_KERNEL_LIB=ON -DBUILD_UTIL_CHAINSTATE=ON -DWERROR=ON' job-name: 'Windows native, VS 2022' - job-type: fuzz - generate-options: '-DVCPKG_MANIFEST_NO_DEFAULT_FEATURES=ON -DVCPKG_MANIFEST_FEATURES="wallet" -DBUILD_GUI=OFF -DBUILD_FOR_FUZZING=ON -DWERROR=ON' + generate-options: '-DVCPKG_MANIFEST_NO_DEFAULT_FEATURES=ON -DVCPKG_MANIFEST_FEATURES="wallet" -DBUILD_GUI=OFF -DWITH_ZMQ=OFF -DBUILD_FOR_FUZZING=ON -DWERROR=ON' job-name: 'Windows native, fuzz, VS 2022' steps: @@ -344,13 +344,25 @@ jobs: py -3 test/fuzz/test_runner.py --par $NUMBER_OF_PROCESSORS --loglevel DEBUG "${RUNNER_TEMP}/qa-assets/fuzz_corpora" windows-cross: - name: 'Windows-cross to x86_64' + name: 'Windows-cross to x86_64, ${{ matrix.crt }}' needs: runners runs-on: ${{ needs.runners.outputs.provider == 'cirrus' && 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' || 'ubuntu-24.04' }} if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }} + strategy: + fail-fast: false + matrix: + crt: [msvcrt, ucrt] + include: + - crt: msvcrt + file-env: './ci/test/00_setup_env_win64_msvcrt.sh' + artifact-name: 'x86_64-w64-mingw32-executables' + - crt: ucrt + file-env: './ci/test/00_setup_env_win64.sh' + artifact-name: 'x86_64-w64-mingw32ucrt-executables' + env: - FILE_ENV: './ci/test/00_setup_env_win64.sh' + FILE_ENV: ${{ matrix.file-env }} DANGER_CI_ON_HOST_FOLDERS: 1 steps: @@ -379,7 +391,7 @@ jobs: - name: Upload built executables uses: actions/upload-artifact@v4 with: - name: x86_64-w64-mingw32-executables-${{ github.run_id }} + name: ${{ matrix.artifact-name }}-${{ github.run_id }} path: | ${{ env.BASE_BUILD_DIR }}/bin/*.dll ${{ env.BASE_BUILD_DIR }}/bin/*.exe @@ -388,10 +400,20 @@ jobs: ${{ env.BASE_BUILD_DIR }}/test/config.ini windows-native-test: - name: 'Windows, test cross-built' + name: 'Windows, ${{ matrix.crt }}, test cross-built' runs-on: windows-2022 needs: windows-cross + strategy: + fail-fast: false + matrix: + crt: [msvcrt, ucrt] + include: + - crt: msvcrt + artifact-name: 'x86_64-w64-mingw32-executables' + - crt: ucrt + artifact-name: 'x86_64-w64-mingw32ucrt-executables' + env: PYTHONUTF8: 1 TEST_RUNNER_TIMEOUT_FACTOR: 40 @@ -404,7 +426,7 @@ jobs: - name: Download built executables uses: actions/download-artifact@v5 with: - name: x86_64-w64-mingw32-executables-${{ github.run_id }} + name: ${{ matrix.artifact-name }}-${{ github.run_id }} - name: Run bitcoind.exe run: ./bin/bitcoind.exe -version @@ -433,6 +455,7 @@ jobs: - name: Run unit tests # Can't use ctest here like other jobs as we don't have a CMake build tree. run: | + ./bin/test_bitcoin-qt.exe ./bin/test_bitcoin.exe -l test_suite # Intentionally run sequentially here, to catch test case failures caused by dirty global state from prior test cases. ./src/secp256k1/bin/exhaustive_tests.exe ./src/secp256k1/bin/noverify_tests.exe @@ -516,7 +539,7 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_i686_no_ipc.sh' - - name: 'fuzzer,address,undefined,integer, no depends' + - name: 'fuzzer,address,undefined,integer' cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' fallback-runner: 'ubuntu-24.04' timeout-minutes: 240 @@ -581,6 +604,10 @@ jobs: with: cache-provider: ${{ matrix.provider || needs.runners.outputs.provider }} + - name: Clear unnecessary files + if: ${{ needs.runners.outputs.provider == 'gha' && true || false }} # Only needed on GHA runners + uses: ./.github/actions/clear-files + - name: Enable bpfcc script if: ${{ env.CONTAINER_NAME == 'ci_native_asan' }} # In the image build step, no external environment variables are available, diff --git a/depend/bitcoin/CMakeLists.txt b/depend/bitcoin/CMakeLists.txt index f264acc..9e6c255 100644 --- a/depend/bitcoin/CMakeLists.txt +++ b/depend/bitcoin/CMakeLists.txt @@ -105,7 +105,7 @@ option(BUILD_UTIL "Build bitcoin-util executable." ${BUILD_TESTS}) option(BUILD_UTIL_CHAINSTATE "Build experimental bitcoin-chainstate executable." OFF) option(BUILD_KERNEL_LIB "Build experimental bitcoinkernel library." ${BUILD_UTIL_CHAINSTATE}) -option(BUILD_KERNEL_TEST "Build tests for the experimental bitcoinkernel library." ${BUILD_KERNEL_LIB}) +cmake_dependent_option(BUILD_KERNEL_TEST "Build tests for the experimental bitcoinkernel library." ON "BUILD_KERNEL_LIB" OFF) option(ENABLE_WALLET "Enable wallet." ON) if(ENABLE_WALLET) diff --git a/depend/bitcoin/CMakePresets.json b/depend/bitcoin/CMakePresets.json index d478af3..ae9d06d 100644 --- a/depend/bitcoin/CMakePresets.json +++ b/depend/bitcoin/CMakePresets.json @@ -14,7 +14,8 @@ "toolchainFile": "$env{VCPKG_ROOT}\\scripts\\buildsystems\\vcpkg.cmake", "cacheVariables": { "VCPKG_TARGET_TRIPLET": "x64-windows", - "BUILD_GUI": "ON" + "BUILD_GUI": "ON", + "WITH_ZMQ": "ON" } }, { @@ -30,7 +31,8 @@ "toolchainFile": "$env{VCPKG_ROOT}\\scripts\\buildsystems\\vcpkg.cmake", "cacheVariables": { "VCPKG_TARGET_TRIPLET": "x64-windows-static", - "BUILD_GUI": "ON" + "BUILD_GUI": "ON", + "WITH_ZMQ": "ON" } }, { diff --git a/depend/bitcoin/ci/README.md b/depend/bitcoin/ci/README.md index 9abfce1..293294a 100644 --- a/depend/bitcoin/ci/README.md +++ b/depend/bitcoin/ci/README.md @@ -43,7 +43,7 @@ into the local CI. To run the test stage with a specific configuration: ``` -env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh' +env -i HOME="$HOME" PATH="$PATH" USER="$USER" FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh ``` ## Configurations @@ -62,7 +62,7 @@ It is also possible to force a specific configuration without modifying the file. For example, ``` -env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'MAKEJOBS="-j1" FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh' +env -i HOME="$HOME" PATH="$PATH" USER="$USER" MAKEJOBS="-j1" FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh ``` The files starting with `0n` (`n` greater than 0) are the scripts that are run diff --git a/depend/bitcoin/ci/test/00_setup_env.sh b/depend/bitcoin/ci/test/00_setup_env.sh index a8f8d0e..890bccd 100755 --- a/depend/bitcoin/ci/test/00_setup_env.sh +++ b/depend/bitcoin/ci/test/00_setup_env.sh @@ -6,7 +6,7 @@ export LC_ALL=C.UTF-8 -set -ex +set -o errexit -o pipefail -o xtrace # The source root dir, usually from git, usually read-only. # The ci system copies this folder. @@ -22,9 +22,6 @@ export DEPENDS_DIR=${DEPENDS_DIR:-$BASE_ROOT_DIR/depends} # A folder for the ci system to put temporary files (build result, datadirs for tests, ...) # This folder only exists on the ci guest. export BASE_SCRATCH_DIR=${BASE_SCRATCH_DIR:-$BASE_ROOT_DIR/ci/scratch} -# A folder for the ci system to put executables. -# This folder only exists on the ci guest. -export BINS_SCRATCH_DIR="${BASE_SCRATCH_DIR}/bins/" echo "Setting specific values in env" if [ -n "${FILE_ENV}" ]; then @@ -36,8 +33,6 @@ fi echo "Fallback to default values in env (if not yet set)" # The number of parallel jobs to pass down to make and test_runner.py export MAKEJOBS=${MAKEJOBS:--j$(if command -v nproc > /dev/null 2>&1; then nproc; else sysctl -n hw.logicalcpu; fi)} -# Whether to prefer BusyBox over GNU utilities -export USE_BUSY_BOX=${USE_BUSY_BOX:-false} export RUN_UNIT_TESTS=${RUN_UNIT_TESTS:-true} export RUN_FUNCTIONAL_TESTS=${RUN_FUNCTIONAL_TESTS:-true} diff --git a/depend/bitcoin/ci/test/00_setup_env_arm.sh b/depend/bitcoin/ci/test/00_setup_env_arm.sh index 46b3df8..c410a80 100755 --- a/depend/bitcoin/ci/test/00_setup_env_arm.sh +++ b/depend/bitcoin/ci/test/00_setup_env_arm.sh @@ -8,11 +8,10 @@ export LC_ALL=C.UTF-8 export HOST=arm-linux-gnueabihf export DPKG_ADD_ARCH="armhf" -export PACKAGES="python3-zmq g++-arm-linux-gnueabihf busybox libc6:armhf libstdc++6:armhf libfontconfig1:armhf libxcb1:armhf" +export PACKAGES="python3-zmq g++-arm-linux-gnueabihf libc6:armhf libstdc++6:armhf libfontconfig1:armhf libxcb1:armhf" export CONTAINER_NAME=ci_arm_linux export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" # Check that https://packages.ubuntu.com/noble/g++-arm-linux-gnueabihf (version 13.x, similar to guix) can cross-compile export CI_IMAGE_PLATFORM="linux/arm64" -export USE_BUSY_BOX=true export RUN_UNIT_TESTS=true export RUN_FUNCTIONAL_TESTS=false export GOAL="install" diff --git a/depend/bitcoin/ci/test/00_setup_env_win64.sh b/depend/bitcoin/ci/test/00_setup_env_win64.sh index 3a34910..dbbf55c 100755 --- a/depend/bitcoin/ci/test/00_setup_env_win64.sh +++ b/depend/bitcoin/ci/test/00_setup_env_win64.sh @@ -1,15 +1,15 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-present The Bitcoin Core developers +# Copyright (c) 2025-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_win64 -export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" # Check that https://packages.ubuntu.com/noble/g++-mingw-w64-x86-64-posix (version 13.x, similar to guix) can cross-compile -export HOST=x86_64-w64-mingw32 -export PACKAGES="g++-mingw-w64-x86-64-posix nsis" +export CI_IMAGE_NAME_TAG="mirror.gcr.io/debian:trixie" # Check that https://packages.debian.org/trixie/g++-mingw-w64-ucrt64 can cross-compile +export HOST=x86_64-w64-mingw32ucrt +export PACKAGES="g++-mingw-w64-ucrt64 nsis" export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export GOAL="deploy" diff --git a/depend/bitcoin/ci/test/00_setup_env_win64_msvcrt.sh b/depend/bitcoin/ci/test/00_setup_env_win64_msvcrt.sh new file mode 100755 index 0000000..86e0ea9 --- /dev/null +++ b/depend/bitcoin/ci/test/00_setup_env_win64_msvcrt.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +export LC_ALL=C.UTF-8 + +export CONTAINER_NAME=ci_win64_msvcrt +export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" # Check that https://packages.ubuntu.com/noble/g++-mingw-w64-x86-64-posix (version 13.x, similar to guix) can cross-compile +export HOST=x86_64-w64-mingw32 +export PACKAGES="g++-mingw-w64-x86-64-posix nsis" +export RUN_UNIT_TESTS=false +export RUN_FUNCTIONAL_TESTS=false +export GOAL="deploy" +export BITCOIN_CONFIG="\ + --preset=dev-mode \ + -DENABLE_IPC=OFF \ + -DWITH_USDT=OFF \ + -DREDUCE_EXPORTS=ON \ + -DCMAKE_CXX_FLAGS='-Wno-error=maybe-uninitialized' \ +" diff --git a/depend/bitcoin/ci/test/01_base_install.sh b/depend/bitcoin/ci/test/01_base_install.sh index 9bb67aa..a0f4164 100755 --- a/depend/bitcoin/ci/test/01_base_install.sh +++ b/depend/bitcoin/ci/test/01_base_install.sh @@ -90,7 +90,7 @@ mkdir -p "${DEPENDS_DIR}/SDKs" "${DEPENDS_DIR}/sdk-sources" OSX_SDK_BASENAME="Xcode-${XCODE_VERSION}-${XCODE_BUILD_ID}-extracted-SDK-with-libcxx-headers" if [ -n "$XCODE_VERSION" ] && [ ! -d "${DEPENDS_DIR}/SDKs/${OSX_SDK_BASENAME}" ]; then - OSX_SDK_FILENAME="${OSX_SDK_BASENAME}.tar.gz" + OSX_SDK_FILENAME="${OSX_SDK_BASENAME}.tar" OSX_SDK_PATH="${DEPENDS_DIR}/sdk-sources/${OSX_SDK_FILENAME}" if [ ! -f "$OSX_SDK_PATH" ]; then ${CI_RETRY_EXE} curl --location --fail "${SDK_URL}/${OSX_SDK_FILENAME}" -o "$OSX_SDK_PATH" diff --git a/depend/bitcoin/ci/test/02_run_container.py b/depend/bitcoin/ci/test/02_run_container.py index 4d0bed2..311fe92 100755 --- a/depend/bitcoin/ci/test/02_run_container.py +++ b/depend/bitcoin/ci/test/02_run_container.py @@ -26,7 +26,6 @@ def main(): ["bash", "-c", "grep export ./ci/test/00_setup_env*.sh"], stdout=subprocess.PIPE, text=True, - encoding="utf8", ).stdout.splitlines() settings = set(l.split("=")[0].split("export ")[1] for l in settings) # Add "hidden" settings, which are never exported, manually. Otherwise, @@ -42,7 +41,7 @@ def main(): u=os.environ["USER"], c=os.environ["CONTAINER_NAME"], ) - with open(env_file, "w", encoding="utf8") as file: + with open(env_file, "w") as file: for k, v in os.environ.items(): if k in settings: file.write(f"{k}={v}\n") diff --git a/depend/bitcoin/ci/test/02_run_container.sh b/depend/bitcoin/ci/test/02_run_container.sh index 251b01c..41128b5 100755 --- a/depend/bitcoin/ci/test/02_run_container.sh +++ b/depend/bitcoin/ci/test/02_run_container.sh @@ -18,16 +18,13 @@ else fi CI_EXEC () { - $CI_EXEC_CMD_PREFIX bash -c "export PATH=\"/path_with space:${BINS_SCRATCH_DIR}:${BASE_ROOT_DIR}/ci/retry:\$PATH\" && cd \"${BASE_ROOT_DIR}\" && $*" + $CI_EXEC_CMD_PREFIX bash -c "export PATH=\"/path_with space:${BASE_ROOT_DIR}/ci/retry:\$PATH\" && cd \"${BASE_ROOT_DIR}\" && $*" } export -f CI_EXEC # Normalize all folders to BASE_ROOT_DIR CI_EXEC rsync --recursive --perms --stats --human-readable "${BASE_READ_ONLY_DIR}/" "${BASE_ROOT_DIR}" || echo "Nothing to copy from ${BASE_READ_ONLY_DIR}/" CI_EXEC "${BASE_ROOT_DIR}/ci/test/01_base_install.sh" - -CI_EXEC mkdir -p "${BINS_SCRATCH_DIR}" - CI_EXEC "${BASE_ROOT_DIR}/ci/test/03_test_script.sh" if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then diff --git a/depend/bitcoin/ci/test/03_test_script.sh b/depend/bitcoin/ci/test/03_test_script.sh index 09da802..e61dc33 100755 --- a/depend/bitcoin/ci/test/03_test_script.sh +++ b/depend/bitcoin/ci/test/03_test_script.sh @@ -85,15 +85,6 @@ elif [ "$RUN_UNIT_TESTS" = "true" ]; then fi fi -if [ "$USE_BUSY_BOX" = "true" ]; then - echo "Setup to use BusyBox utils" - # tar excluded for now because it requires passing in the exact archive type in ./depends (fixed in later BusyBox version) - # ar excluded for now because it does not recognize the -q option in ./depends (unknown if fixed) - for util in $(busybox --list | grep -v "^ar$" | grep -v "^tar$" ); do ln -s "$(command -v busybox)" "${BINS_SCRATCH_DIR}/$util"; done - # Print BusyBox version - patch --help -fi - # Make sure default datadir does not exist and is never read by creating a dummy file if [ "$CI_OS_NAME" == "macos" ]; then echo > "${HOME}/Library/Application Support/Bitcoin" diff --git a/depend/bitcoin/contrib/asmap/asmap-tool.py b/depend/bitcoin/contrib/asmap/asmap-tool.py index 33a380a..c679ab1 100755 --- a/depend/bitcoin/contrib/asmap/asmap-tool.py +++ b/depend/bitcoin/contrib/asmap/asmap-tool.py @@ -140,15 +140,19 @@ def main(): state1 = load_file(args.infile1) state2 = load_file(args.infile2) ipv4_changed = 0 + ipv4_entries_changed = 0 ipv6_changed = 0 + ipv6_entries_changed = 0 for prefix, old_asn, new_asn in state1.diff(state2): if args.ignore_unassigned and old_asn == 0: continue net = asmap.prefix_to_net(prefix) if isinstance(net, ipaddress.IPv4Network): - ipv4_changed += 1 << (32 - net.prefixlen) + ipv4_changed += net.num_addresses + ipv4_entries_changed += 1 elif isinstance(net, ipaddress.IPv6Network): - ipv6_changed += 1 << (128 - net.prefixlen) + ipv6_changed += net.num_addresses + ipv6_entries_changed += 1 if new_asn == 0: print(f"# {net} was AS{old_asn}") elif old_asn == 0: @@ -159,8 +163,9 @@ def main(): ipv6_change_str = "" if ipv6_changed == 0 else f" (2^{math.log2(ipv6_changed):.2f})" print( - f"# {ipv4_changed}{ipv4_change_str} IPv4 addresses changed; " - f"{ipv6_changed}{ipv6_change_str} IPv6 addresses changed" +f"""# Summary +IPv4: {ipv4_entries_changed} entries with {ipv4_changed}{ipv4_change_str} addresses changed +IPv6: {ipv6_entries_changed} entries with {ipv6_changed}{ipv6_change_str} addresses changed""" ) elif args.subcommand == "diff_addrs": state1 = load_file(args.infile1) diff --git a/depend/bitcoin/contrib/devtools/circular-dependencies.py b/depend/bitcoin/contrib/devtools/circular-dependencies.py index b742a8c..af19b3a 100755 --- a/depend/bitcoin/contrib/devtools/circular-dependencies.py +++ b/depend/bitcoin/contrib/devtools/circular-dependencies.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2018-2020 The Bitcoin Core developers +# Copyright (c) 2018-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -49,7 +49,7 @@ def module_name(path): # TODO: implement support for multiple include directories for arg in sorted(files.keys()): module = files[arg] - with open(arg, 'r', encoding="utf8") as f: + with open(arg, 'r') as f: for line in f: match = RE.match(line) if match: diff --git a/depend/bitcoin/contrib/devtools/clang-format-diff.py b/depend/bitcoin/contrib/devtools/clang-format-diff.py index 30e804d..f351563 100755 --- a/depend/bitcoin/contrib/devtools/clang-format-diff.py +++ b/depend/bitcoin/contrib/devtools/clang-format-diff.py @@ -169,7 +169,7 @@ def main(): sys.exit(p.returncode) if not args.i: - with open(filename, encoding="utf8") as f: + with open(filename) as f: code = f.readlines() formatted_code = StringIO(stdout).readlines() diff = difflib.unified_diff( diff --git a/depend/bitcoin/contrib/devtools/copyright_header.py b/depend/bitcoin/contrib/devtools/copyright_header.py index 12d7276..aa77de8 100755 --- a/depend/bitcoin/contrib/devtools/copyright_header.py +++ b/depend/bitcoin/contrib/devtools/copyright_header.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2016-2022 The Bitcoin Core developers +# Copyright (c) 2016-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -54,12 +54,12 @@ def applies_to_file(filename): GIT_TOPLEVEL_CMD = 'git rev-parse --show-toplevel'.split(' ') def call_git_ls(base_directory): - out = subprocess.check_output([*GIT_LS_CMD, base_directory]) - return [f for f in out.decode("utf-8").split('\n') if f != ''] + out = subprocess.check_output([*GIT_LS_CMD, base_directory], text=True) + return [f for f in out.split('\n') if f != ''] def call_git_toplevel(): "Returns the absolute path to the project root" - return subprocess.check_output(GIT_TOPLEVEL_CMD).strip().decode("utf-8") + return subprocess.check_output(GIT_TOPLEVEL_CMD, text=True).strip() def get_filenames_to_examine(base_directory): "Returns an array of absolute paths to any project files in the base_directory that pass the include/exclude filters" @@ -140,7 +140,7 @@ def file_has_without_c_style_copyright_for_holder(contents, holder_name): ################################################################################ def read_file(filename): - return open(filename, 'r', encoding="utf8").read() + return open(filename, 'r').read() def gather_file_info(filename): info = {} @@ -298,8 +298,8 @@ def report_cmd(argv): GIT_LOG_CMD = "git log --pretty=format:%%ai %s" def call_git_log(filename): - out = subprocess.check_output((GIT_LOG_CMD % filename).split(' ')) - return out.decode("utf-8").split('\n') + out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '), text=True) + return out.split('\n') def get_git_change_years(filename): git_log_lines = call_git_log(filename) @@ -316,12 +316,12 @@ def get_most_recent_git_change_year(filename): ################################################################################ def read_file_lines(filename): - with open(filename, 'r', encoding="utf8") as f: + with open(filename, 'r') as f: file_lines = f.readlines() return file_lines def write_file_lines(filename, file_lines): - with open(filename, 'w', encoding="utf8") as f: + with open(filename, 'w') as f: f.write(''.join(file_lines)) ################################################################################ diff --git a/depend/bitcoin/contrib/devtools/gen-manpages.py b/depend/bitcoin/contrib/devtools/gen-manpages.py index 12d1615..02e5f83 100755 --- a/depend/bitcoin/contrib/devtools/gen-manpages.py +++ b/depend/bitcoin/contrib/devtools/gen-manpages.py @@ -3,6 +3,7 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import os +import re import subprocess import sys import tempfile @@ -59,10 +60,11 @@ print(f'{abspath} not found or not an executable', file=sys.stderr) sys.exit(1) # take first line (which must contain version) - verstr = r.stdout.splitlines()[0] - # last word of line is the actual version e.g. v22.99.0-5c6b3d5b3508 - verstr = verstr.split()[-1] - assert verstr.startswith('v') + output = r.stdout.splitlines()[0] + # find the version e.g. v30.99.0-ce771726f3e7 + search = re.search(r"v[0-9]\S+", output) + assert search + verstr = search.group(0) # remaining lines are copyright copyright = r.stdout.split('\n')[1:] assert copyright[0].startswith('Copyright (C)') diff --git a/depend/bitcoin/contrib/filter-lcov.py b/depend/bitcoin/contrib/filter-lcov.py index db780ad..1dedea6 100755 --- a/depend/bitcoin/contrib/filter-lcov.py +++ b/depend/bitcoin/contrib/filter-lcov.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2017-2020 The Bitcoin Core developers +# Copyright (c) 2017-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -16,8 +16,8 @@ outfile = args.outfile in_remove = False -with open(tracefile, 'r', encoding="utf8") as f: - with open(outfile, 'w', encoding="utf8") as wf: +with open(tracefile, 'r') as f: + with open(outfile, 'w') as wf: for line in f: for p in pattern: if line.startswith("SF:") and p in line: diff --git a/depend/bitcoin/contrib/guix/README.md b/depend/bitcoin/contrib/guix/README.md index 7f6b823..aadc231 100644 --- a/depend/bitcoin/contrib/guix/README.md +++ b/depend/bitcoin/contrib/guix/README.md @@ -37,7 +37,7 @@ You can then either point to the SDK using the `SDK_PATH` environment variable: ```sh # Extract the SDK tarball to /path/to/parent/dir/of/extracted/SDK/Xcode---extracted-SDK-with-libcxx-headers -tar -C /path/to/parent/dir/of/extracted/SDK -xaf /path/to/Xcode---extracted-SDK-with-libcxx-headers.tar.gz +tar -C /path/to/parent/dir/of/extracted/SDK -xaf /path/to/Xcode---extracted-SDK-with-libcxx-headers.tar # Indicate where to locate the SDK tarball export SDK_PATH=/path/to/parent/dir/of/extracted/SDK diff --git a/depend/bitcoin/contrib/guix/symbol-check.py b/depend/bitcoin/contrib/guix/symbol-check.py index 27483aa..c32c1b5 100755 --- a/depend/bitcoin/contrib/guix/symbol-check.py +++ b/depend/bitcoin/contrib/guix/symbol-check.py @@ -26,6 +26,13 @@ # # - libc version 2.34 (https://mirror.stream.centos.org/9-stream/AppStream/x86_64/os/Packages/) # +# bitcoin-qt +# +# Ubuntu 22.04 is currently the baseline for ELF_ALLOWED_LIBRARIES: +# +# libfontconfig version 2.13.1 (https://packages.ubuntu.com/jammy/libfontconfig1) +# +# libfreetype version 2.11.1 (https://packages.ubuntu.com/jammy/libfreetype6) MAX_VERSIONS = { 'GLIBC': { @@ -39,8 +46,7 @@ # Ignore symbols that are exported as part of every executable IGNORE_EXPORTS = { -'environ', '_environ', '__environ', '_fini', '_init', 'stdin', -'stdout', 'stderr', +'stdin', 'stdout', 'stderr', } # Expected linker-loader names can be found here: diff --git a/depend/bitcoin/contrib/linearize/linearize-data.py b/depend/bitcoin/contrib/linearize/linearize-data.py index 74d9830..9b29fe7 100755 --- a/depend/bitcoin/contrib/linearize/linearize-data.py +++ b/depend/bitcoin/contrib/linearize/linearize-data.py @@ -2,7 +2,7 @@ # # linearize-data.py: Construct a linear, no-fork version of the chain. # -# Copyright (c) 2013-2022 The Bitcoin Core developers +# Copyright (c) 2013-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # @@ -34,7 +34,7 @@ def get_blk_dt(blk_hdr): # When getting the list of block hashes, undo any byte reversals. def get_block_hashes(settings): blkindex = [] - with open(settings['hashlist'], "r", encoding="utf8") as f: + with open(settings['hashlist'], "r") as f: for line in f: line = line.rstrip() if settings['rev_hash_bytes'] == 'true': @@ -267,7 +267,7 @@ def run(self): print("Usage: linearize-data.py CONFIG-FILE") sys.exit(1) - with open(sys.argv[1], encoding="utf8") as f: + with open(sys.argv[1]) as f: for line in f: # skip comment lines m = re.search(r'^\s*#', line) diff --git a/depend/bitcoin/contrib/linearize/linearize-hashes.py b/depend/bitcoin/contrib/linearize/linearize-hashes.py index 695bafa..7e99787 100755 --- a/depend/bitcoin/contrib/linearize/linearize-hashes.py +++ b/depend/bitcoin/contrib/linearize/linearize-hashes.py @@ -2,7 +2,7 @@ # # linearize-hashes.py: List blocks in a linear, no-fork version of the chain. # -# Copyright (c) 2013-2022 The Bitcoin Core developers +# Copyright (c) 2013-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # @@ -87,7 +87,7 @@ def get_block_hashes(settings, max_blocks_per_call=10000): def get_rpc_cookie(): # Open the cookie file - with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f: + with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r') as f: combined = f.readline() combined_split = combined.split(":") settings['rpcuser'] = combined_split[0] @@ -98,7 +98,7 @@ def get_rpc_cookie(): print("Usage: linearize-hashes.py CONFIG-FILE") sys.exit(1) - with open(sys.argv[1], encoding="utf8") as f: + with open(sys.argv[1]) as f: for line in f: # skip comment lines m = re.search(r'^\s*#', line) diff --git a/depend/bitcoin/contrib/macdeploy/README.md b/depend/bitcoin/contrib/macdeploy/README.md index d47ee67..1763c6c 100644 --- a/depend/bitcoin/contrib/macdeploy/README.md +++ b/depend/bitcoin/contrib/macdeploy/README.md @@ -44,15 +44,15 @@ xip -x Xcode_15.xip ### Step 2: Generating the SDK tarball from `Xcode.app` -To generate the SDK, run the script [`gen-sdk`](./gen-sdk) with the +To generate the SDK, run the script [`gen-sdk.py`](./gen-sdk.py) with the path to `Xcode.app` (extracted in the previous stage) as the first argument. ```bash -./contrib/macdeploy/gen-sdk '/path/to/Xcode.app' +./contrib/macdeploy/gen-sdk.py '/path/to/Xcode.app' ``` -The generated archive should be: `Xcode-15.0-15A240d-extracted-SDK-with-libcxx-headers.tar.gz`. -The `sha256sum` should be `c0c2e7bb92c1fee0c4e9f3a485e4530786732d6c6dd9e9f418c282aa6892f55d`. +The generated archive should be: `Xcode-15.0-15A240d-extracted-SDK-with-libcxx-headers.tar`. +The `sha256sum` should be `95b00dc41fa090747dc0a7907a5031a2fcb2d7f95c9584ba6bccdb99b6e3d498`. ## Deterministic macOS App Notes diff --git a/depend/bitcoin/contrib/macdeploy/gen-sdk b/depend/bitcoin/contrib/macdeploy/gen-sdk.py similarity index 74% rename from depend/bitcoin/contrib/macdeploy/gen-sdk rename to depend/bitcoin/contrib/macdeploy/gen-sdk.py index f0bbabf..cf37929 100755 --- a/depend/bitcoin/contrib/macdeploy/gen-sdk +++ b/depend/bitcoin/contrib/macdeploy/gen-sdk.py @@ -2,9 +2,7 @@ import argparse import plistlib import pathlib -import sys import tarfile -import gzip import os import contextlib @@ -22,12 +20,12 @@ def run(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawTextHelpFormatter) - parser.add_argument('xcode_app', metavar='XCODEAPP', nargs=1) - parser.add_argument("-o", metavar='OUTSDKTGZ', nargs=1, dest='out_sdktgz', required=False) + parser.add_argument('xcode_app', metavar='XCODEAPP', type=pathlib.Path) + parser.add_argument("-o", metavar='OUTSDKTAR', dest='out_sdkt', type=pathlib.Path, required=False) args = parser.parse_args() - xcode_app = pathlib.Path(args.xcode_app[0]).resolve() + xcode_app = args.xcode_app.resolve() assert xcode_app.is_dir(), "The supplied Xcode.app path '{}' either does not exist or is not a directory".format(xcode_app) xcode_app_plist = xcode_app.joinpath("Contents/version.plist") @@ -47,11 +45,7 @@ def run(): out_name = "Xcode-{xcode_version}-{xcode_build_id}-extracted-SDK-with-libcxx-headers".format(xcode_version=xcode_version, xcode_build_id=xcode_build_id) - if args.out_sdktgz: - out_sdktgz_path = pathlib.Path(args.out_sdktgz_path) - else: - # Construct our own out_sdktgz if not specified on the command line - out_sdktgz_path = pathlib.Path("./{}.tar.gz".format(out_name)) + out_sdkt_path = args.out_sdkt or pathlib.Path("./{}.tar".format(out_name)) def tarfp_add_with_base_change(tarfp, dir_to_add, alt_base_dir): """Add all files in dir_to_add to tarfp, but prepend alt_base_dir to the files' @@ -68,6 +62,8 @@ def tarfp_add_with_base_change(tarfp, dir_to_add, alt_base_dir): """ def change_tarinfo_base(tarinfo): + if tarinfo.name and tarinfo.name.endswith((".swiftmodule", ".modulemap")): + return None if tarinfo.name and tarinfo.name.startswith("./"): tarinfo.name = str(pathlib.Path(alt_base_dir, tarinfo.name)) if tarinfo.linkname and tarinfo.linkname.startswith("./"): @@ -81,16 +77,17 @@ def change_tarinfo_base(tarinfo): return tarinfo with cd(dir_to_add): # recursion already adds entries in sorted order - tarfp.add(".", recursive=True, filter=change_tarinfo_base) - - print("Creating output .tar.gz file...") - with out_sdktgz_path.open("wb") as fp: - with gzip.GzipFile(fileobj=fp, mode='wb', compresslevel=9, mtime=0) as gzf: - with tarfile.open(mode="w", fileobj=gzf, format=tarfile.GNU_FORMAT) as tarfp: - print("Adding MacOSX SDK {} files...".format(sdk_version)) - tarfp_add_with_base_change(tarfp, sdk_dir, out_name) - print("Done! Find the resulting gzipped tarball at:") - print(out_sdktgz_path.resolve()) + tarfp.add("./usr/include", recursive=True, filter=change_tarinfo_base) + tarfp.add("./usr/lib", recursive=True, filter=change_tarinfo_base) + tarfp.add("./System/Library/Frameworks", recursive=True, filter=change_tarinfo_base) + + print("Creating output .tar file...") + with out_sdkt_path.open("wb") as fp: + with tarfile.open(mode="w", fileobj=fp, format=tarfile.PAX_FORMAT) as tarfp: + print("Adding MacOSX SDK {} files...".format(sdk_version)) + tarfp_add_with_base_change(tarfp, sdk_dir, out_name) + print("Done! Find the resulting tarball at:") + print(out_sdkt_path.resolve()) if __name__ == '__main__': run() diff --git a/depend/bitcoin/contrib/message-capture/message-capture-parser.py b/depend/bitcoin/contrib/message-capture/message-capture-parser.py index 0f40971..1d2d633 100755 --- a/depend/bitcoin/contrib/message-capture/message-capture-parser.py +++ b/depend/bitcoin/contrib/message-capture/message-capture-parser.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2020-2022 The Bitcoin Core developers +# Copyright (c) 2020-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Parse message capture binary files. To be used in conjunction with -capturemessages.""" @@ -205,7 +205,7 @@ def main(): jsonrep = json.dumps(messages) if output: - with open(str(output), 'w+', encoding="utf8") as f_out: + with open(str(output), 'w+') as f_out: f_out.write(jsonrep) else: print(jsonrep) diff --git a/depend/bitcoin/contrib/seeds/README.md b/depend/bitcoin/contrib/seeds/README.md index 63a51b5..2eb6846 100644 --- a/depend/bitcoin/contrib/seeds/README.md +++ b/depend/bitcoin/contrib/seeds/README.md @@ -10,14 +10,13 @@ to addrman with). Update `MIN_BLOCKS` in `makeseeds.py` and the `-m`/`--minblocks` arguments below, as needed. -The seeds compiled into the release are created from sipa's, achow101's and luke-jr's +The seeds compiled into the release are created from sipa's and achow101's DNS seed, virtu's crawler, and asmap community AS map data. Run the following commands from the `/contrib/seeds` directory: ``` curl https://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt curl https://21.ninja/seeds.txt.gz | gzip -dc >> seeds_main.txt -curl https://luke.dashjr.org/programs/bitcoin/files/charts/seeds.txt >> seeds_main.txt curl https://mainnet.achownodes.xyz/seeds.txt.gz | gzip -dc >> seeds_main.txt curl https://signet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_signet.txt curl https://testnet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_test.txt diff --git a/depend/bitcoin/contrib/seeds/generate-seeds.py b/depend/bitcoin/contrib/seeds/generate-seeds.py index a3ce012..9ad4880 100755 --- a/depend/bitcoin/contrib/seeds/generate-seeds.py +++ b/depend/bitcoin/contrib/seeds/generate-seeds.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2021 The Bitcoin Core developers +# Copyright (c) 2014-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' @@ -168,16 +168,16 @@ def main(): g.write(' *\n') g.write(' * Each line contains a BIP155 serialized (networkID, addr, port) tuple.\n') g.write(' */\n') - with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f: + with open(os.path.join(indir,'nodes_main.txt'), 'r') as f: process_nodes(g, f, 'chainparams_seed_main') g.write('\n') - with open(os.path.join(indir,'nodes_signet.txt'), 'r', encoding="utf8") as f: + with open(os.path.join(indir,'nodes_signet.txt'), 'r') as f: process_nodes(g, f, 'chainparams_seed_signet') g.write('\n') - with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f: + with open(os.path.join(indir,'nodes_test.txt'), 'r') as f: process_nodes(g, f, 'chainparams_seed_test') g.write('\n') - with open(os.path.join(indir,'nodes_testnet4.txt'), 'r', encoding="utf8") as f: + with open(os.path.join(indir,'nodes_testnet4.txt'), 'r') as f: process_nodes(g, f, 'chainparams_seed_testnet4') g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n') diff --git a/depend/bitcoin/contrib/seeds/makeseeds.py b/depend/bitcoin/contrib/seeds/makeseeds.py index 0e5826e..0af614a 100755 --- a/depend/bitcoin/contrib/seeds/makeseeds.py +++ b/depend/bitcoin/contrib/seeds/makeseeds.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2013-2022 The Bitcoin Core developers +# Copyright (c) 2013-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # @@ -211,7 +211,7 @@ def main(): print('Done.', file=sys.stderr) print('Loading and parsing DNS seeds…', end='', file=sys.stderr, flush=True) - with open(args.seeds, 'r', encoding='utf8') as f: + with open(args.seeds, 'r') as f: lines = f.readlines() ips = [parseline(line) for line in lines] random.shuffle(ips) diff --git a/depend/bitcoin/contrib/verify-binaries/verify.py b/depend/bitcoin/contrib/verify-binaries/verify.py index 6c07b36..af89112 100755 --- a/depend/bitcoin/contrib/verify-binaries/verify.py +++ b/depend/bitcoin/contrib/verify-binaries/verify.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2020-2021 The Bitcoin Core developers +# Copyright (c) 2020-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Script for verifying Bitcoin Core release binaries. @@ -246,8 +246,8 @@ def files_are_equal(filename1, filename2): eq = contents1 == contents2 if not eq: - with open(filename1, 'r', encoding='utf-8') as f1, \ - open(filename2, 'r', encoding='utf-8') as f2: + with open(filename1, 'r') as f1, \ + open(filename2, 'r') as f2: f1lines = f1.readlines() f2lines = f2.readlines() @@ -426,7 +426,7 @@ def verify_shasums_signature( def parse_sums_file(sums_file_path: str, filename_filter: list[str]) -> list[list[str]]: # extract hashes/filenames of binaries to verify from hash file; # each line has the following format: " " - with open(sums_file_path, 'r', encoding='utf8') as hash_file: + with open(sums_file_path, 'r') as hash_file: return [line.split()[:2] for line in hash_file if len(filename_filter) == 0 or any(f in line for f in filename_filter)] diff --git a/depend/bitcoin/contrib/verify-commits/verify-commits.py b/depend/bitcoin/contrib/verify-commits/verify-commits.py index a1fe78a..1af6b03 100755 --- a/depend/bitcoin/contrib/verify-commits/verify-commits.py +++ b/depend/bitcoin/contrib/verify-commits/verify-commits.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2018-2022 The Bitcoin Core developers +# Copyright (c) 2018-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Verify commits against a trusted keys list.""" @@ -82,17 +82,17 @@ def main(): # get directory of this program and read data files dirname = os.path.dirname(os.path.abspath(__file__)) print("Using verify-commits data from " + dirname) - with open(dirname + "/trusted-git-root", "r", encoding="utf8") as f: + with open(dirname + "/trusted-git-root", "r") as f: verified_root = f.read().splitlines()[0] - with open(dirname + "/trusted-sha512-root-commit", "r", encoding="utf8") as f: + with open(dirname + "/trusted-sha512-root-commit", "r") as f: verified_sha512_root = f.read().splitlines()[0] - with open(dirname + "/allow-revsig-commits", "r", encoding="utf8") as f: + with open(dirname + "/allow-revsig-commits", "r") as f: revsig_allowed = f.read().splitlines() - with open(dirname + "/allow-unclean-merge-commits", "r", encoding="utf8") as f: + with open(dirname + "/allow-unclean-merge-commits", "r") as f: unclean_merge_allowed = f.read().splitlines() - with open(dirname + "/allow-incorrect-sha512-commits", "r", encoding="utf8") as f: + with open(dirname + "/allow-incorrect-sha512-commits", "r") as f: incorrect_sha512_allowed = f.read().splitlines() - with open(dirname + "/trusted-keys", "r", encoding="utf8") as f: + with open(dirname + "/trusted-keys", "r") as f: trusted_keys = f.read().splitlines() # Set commit and variables @@ -140,8 +140,8 @@ def main(): # Check that the commit (and parents) was signed with a trusted key valid_sig = False - verify_res = subprocess.run([GIT, '-c', 'gpg.program={}/gpg.sh'.format(dirname), 'verify-commit', "--raw", current_commit], capture_output=True) - for line in verify_res.stderr.decode().splitlines(): + verify_res = subprocess.run([GIT, '-c', 'gpg.program={}/gpg.sh'.format(dirname), 'verify-commit', "--raw", current_commit], text=True, capture_output=True) + for line in verify_res.stderr.splitlines(): if line.startswith("[GNUPG:] VALIDSIG "): key = line.split(" ")[-1] valid_sig = key in trusted_keys @@ -152,7 +152,7 @@ def main(): if prev_commit != "": print("No parent of {} was signed with a trusted key!".format(prev_commit), file=sys.stderr) print("Parents are:", file=sys.stderr) - parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', prev_commit]).decode('utf8').splitlines()[0].split(' ') + parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', prev_commit], text=True).splitlines()[0].split(' ') for parent in parents: subprocess.call([GIT, 'show', '-s', parent], stdout=sys.stderr) else: @@ -162,29 +162,29 @@ def main(): # Check the Tree-SHA512 if (verify_tree or prev_commit == "") and current_commit not in incorrect_sha512_allowed: tree_hash = tree_sha512sum(current_commit) - if ("Tree-SHA512: {}".format(tree_hash)) not in subprocess.check_output([GIT, 'show', '-s', '--format=format:%B', current_commit]).decode('utf8').splitlines(): + if ("Tree-SHA512: {}".format(tree_hash)) not in subprocess.check_output([GIT, 'show', '-s', '--format=format:%B', current_commit], text=True).splitlines(): print("Tree-SHA512 did not match for commit " + current_commit, file=sys.stderr) sys.exit(1) # Merge commits should only have two parents - parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', current_commit]).decode('utf8').splitlines()[0].split(' ') + parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', current_commit], text=True).splitlines()[0].split(' ') if len(parents) > 2: print("Commit {} is an octopus merge".format(current_commit), file=sys.stderr) sys.exit(1) # Check that the merge commit is clean - commit_time = int(subprocess.check_output([GIT, 'show', '-s', '--format=format:%ct', current_commit]).decode('utf8').splitlines()[0]) + commit_time = int(subprocess.check_output([GIT, 'show', '-s', '--format=format:%ct', current_commit], text=True).splitlines()[0]) check_merge = commit_time > time.time() - args.clean_merge * 24 * 60 * 60 # Only check commits in clean_merge days allow_unclean = current_commit in unclean_merge_allowed if len(parents) == 2 and check_merge and not allow_unclean: - current_tree = subprocess.check_output([GIT, 'show', '--format=%T', current_commit]).decode('utf8').splitlines()[0] + current_tree = subprocess.check_output([GIT, 'show', '--format=%T', current_commit], text=True).splitlines()[0] # This merge-tree functionality requires git >= 2.38. The # --write-tree option was added in order to opt-in to the new # behavior. Older versions of git will not recognize the option and # will instead exit with code 128. try: - recreated_tree = subprocess.check_output([GIT, "merge-tree", "--write-tree", parents[0], parents[1]]).decode('utf8').splitlines()[0] + recreated_tree = subprocess.check_output([GIT, "merge-tree", "--write-tree", parents[0], parents[1]], text=True).splitlines()[0] except subprocess.CalledProcessError as e: if e.returncode == 128: print("git v2.38+ is required for this functionality.", file=sys.stderr) diff --git a/depend/bitcoin/depends/README.md b/depend/bitcoin/depends/README.md index 71110e9..706c3db 100644 --- a/depend/bitcoin/depends/README.md +++ b/depend/bitcoin/depends/README.md @@ -122,7 +122,8 @@ Common `host-platform-triplet`s for cross compilation are: - `i686-pc-linux-gnu` for Linux x86 32 bit - `x86_64-pc-linux-gnu` for Linux x86 64 bit -- `x86_64-w64-mingw32` for Win64 +- `x86_64-w64-mingw32` for Windows using MSVCRT +- `x86_64-w64-mingw32ucrt` for Windows using UCRT - `x86_64-apple-darwin` for Intel macOS - `arm64-apple-darwin` for ARM macOS - `arm-linux-gnueabihf` for Linux ARM 32 bit @@ -144,10 +145,14 @@ proceeding with a cross-compile. Under the depends directory, create a subdirectory named `SDKs`. Then, place the extracted SDK under this new directory. For more information, see [SDK Extraction](../contrib/macdeploy/README.md#sdk-extraction). -#### For Win64 cross compilation +#### For Windows cross compilation using MSVCRT apt install g++-mingw-w64-x86-64-posix +#### For Windows cross compilation using UCRT + + apt install g++-mingw-w64-ucrt64 + #### For linux (including i386, ARM) cross compilation Common linux dependencies: diff --git a/depend/bitcoin/depends/config.guess b/depend/bitcoin/depends/config.guess index 48a6846..a9d01fd 100755 --- a/depend/bitcoin/depends/config.guess +++ b/depend/bitcoin/depends/config.guess @@ -1,10 +1,10 @@ #! /bin/sh # Attempt to guess a canonical system name. -# Copyright 1992-2024 Free Software Foundation, Inc. +# Copyright 1992-2025 Free Software Foundation, Inc. # shellcheck disable=SC2006,SC2268 # see below for rationale -timestamp='2024-07-27' +timestamp='2025-07-10' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -60,7 +60,7 @@ version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. -Copyright 1992-2024 Free Software Foundation, Inc. +Copyright 1992-2025 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -1597,8 +1597,11 @@ EOF *:Unleashed:*:*) GUESS=$UNAME_MACHINE-unknown-unleashed$UNAME_RELEASE ;; - *:Ironclad:*:*) - GUESS=$UNAME_MACHINE-unknown-ironclad + x86_64:[Ii]ronclad:*:*|i?86:[Ii]ronclad:*:*) + GUESS=$UNAME_MACHINE-pc-ironclad-mlibc + ;; + *:[Ii]ronclad:*:*) + GUESS=$UNAME_MACHINE-unknown-ironclad-mlibc ;; esac @@ -1808,8 +1811,8 @@ fi exit 1 # Local variables: -# eval: (add-hook 'before-save-hook 'time-stamp) +# eval: (add-hook 'before-save-hook 'time-stamp nil t) # time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-format: "%Y-%02m-%02d" # time-stamp-end: "'" # End: diff --git a/depend/bitcoin/depends/config.sub b/depend/bitcoin/depends/config.sub index 4aaae46..3d35cde 100755 --- a/depend/bitcoin/depends/config.sub +++ b/depend/bitcoin/depends/config.sub @@ -1,10 +1,10 @@ #! /bin/sh # Configuration validation subroutine script. -# Copyright 1992-2024 Free Software Foundation, Inc. +# Copyright 1992-2025 Free Software Foundation, Inc. # shellcheck disable=SC2006,SC2268,SC2162 # see below for rationale -timestamp='2024-05-27' +timestamp='2025-07-10' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -76,7 +76,7 @@ Report bugs and patches to ." version="\ GNU config.sub ($timestamp) -Copyright 1992-2024 Free Software Foundation, Inc. +Copyright 1992-2025 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -145,6 +145,7 @@ case $1 in | kfreebsd*-gnu* \ | knetbsd*-gnu* \ | kopensolaris*-gnu* \ + | ironclad-* \ | linux-* \ | managarm-* \ | netbsd*-eabi* \ @@ -242,7 +243,6 @@ case $1 in | rombug \ | semi \ | sequent* \ - | siemens \ | sgi* \ | siemens \ | sim \ @@ -261,7 +261,7 @@ case $1 in basic_machine=$field1-$field2 basic_os= ;; - zephyr*) + tock* | zephyr*) basic_machine=$field1-unknown basic_os=$field2 ;; @@ -1194,7 +1194,7 @@ case $cpu-$vendor in xscale-* | xscalee[bl]-*) cpu=`echo "$cpu" | sed 's/^xscale/arm/'` ;; - arm64-* | aarch64le-*) + arm64-* | aarch64le-* | arm64_32-*) cpu=aarch64 ;; @@ -1321,6 +1321,7 @@ case $cpu-$vendor in | i960 \ | ia16 \ | ia64 \ + | intelgt \ | ip2k \ | iq2000 \ | javascript \ @@ -1522,6 +1523,10 @@ EOF kernel=nto os=`echo "$basic_os" | sed -e 's|nto|qnx|'` ;; + ironclad*) + kernel=ironclad + os=`echo "$basic_os" | sed -e 's|ironclad|mlibc|'` + ;; linux*) kernel=linux os=`echo "$basic_os" | sed -e 's|linux|gnu|'` @@ -1976,6 +1981,7 @@ case $os in | atheos* \ | auroraux* \ | aux* \ + | banan_os* \ | beos* \ | bitrig* \ | bme* \ @@ -2022,7 +2028,6 @@ case $os in | ios* \ | iris* \ | irix* \ - | ironclad* \ | isc* \ | its* \ | l4re* \ @@ -2118,6 +2123,7 @@ case $os in | sysv* \ | tenex* \ | tirtos* \ + | tock* \ | toppers* \ | tops10* \ | tops20* \ @@ -2214,6 +2220,8 @@ case $kernel-$os-$obj in ;; uclinux-uclibc*- | uclinux-gnu*- ) ;; + ironclad-mlibc*-) + ;; managarm-mlibc*- | managarm-kernel*- ) ;; windows*-msvc*-) @@ -2249,6 +2257,8 @@ case $kernel-$os-$obj in ;; *-eabi*- | *-gnueabi*-) ;; + ios*-simulator- | tvos*-simulator- | watchos*-simulator- ) + ;; none--*) # None (no kernel, i.e. freestanding / bare metal), # can be paired with an machine code file format @@ -2347,8 +2357,8 @@ echo "$cpu-$vendor${kernel:+-$kernel}${os:+-$os}${obj:+-$obj}" exit # Local variables: -# eval: (add-hook 'before-save-hook 'time-stamp) +# eval: (add-hook 'before-save-hook 'time-stamp nil t) # time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-format: "%Y-%02m-%02d" # time-stamp-end: "'" # End: diff --git a/depend/bitcoin/depends/packages/freetype.mk b/depend/bitcoin/depends/packages/freetype.mk index a97f82e..b34edc6 100644 --- a/depend/bitcoin/depends/packages/freetype.mk +++ b/depend/bitcoin/depends/packages/freetype.mk @@ -1,8 +1,8 @@ package=freetype -$(package)_version=2.11.0 +$(package)_version=2.11.1 $(package)_download_path=https://download.savannah.gnu.org/releases/$(package) -$(package)_file_name=$(package)-$($(package)_version).tar.xz -$(package)_sha256_hash=8bee39bd3968c4804b70614a0a3ad597299ad0e824bc8aad5ce8aaf48067bde7 +$(package)_file_name=$(package)-$($(package)_version).tar.gz +$(package)_sha256_hash=f8db94d307e9c54961b39a1cc799a67d46681480696ed72ecf78d4473770f09b $(package)_build_subdir=build $(package)_patches += cmake_minimum.patch diff --git a/depend/bitcoin/depends/packages/qt.mk b/depend/bitcoin/depends/packages/qt.mk index 20de235..60d2b63 100644 --- a/depend/bitcoin/depends/packages/qt.mk +++ b/depend/bitcoin/depends/packages/qt.mk @@ -14,6 +14,7 @@ $(package)_patches := dont_hardcode_pwd.patch $(package)_patches += qtbase_avoid_qmain.patch $(package)_patches += qtbase_platformsupport.patch $(package)_patches += qtbase_plugins_cocoa.patch +$(package)_patches += qtbase_plugins_windows11style.patch $(package)_patches += qtbase_skip_tools.patch $(package)_patches += rcc_hardcode_timestamp.patch $(package)_patches += qttools_skip_dependencies.patch @@ -258,6 +259,7 @@ define $(package)_preprocess_cmds patch -p1 -i $($(package)_patch_dir)/qtbase_avoid_qmain.patch && \ patch -p1 -i $($(package)_patch_dir)/qtbase_platformsupport.patch && \ patch -p1 -i $($(package)_patch_dir)/qtbase_plugins_cocoa.patch && \ + patch -p1 -i $($(package)_patch_dir)/qtbase_plugins_windows11style.patch && \ patch -p1 -i $($(package)_patch_dir)/static_fixes.patch && \ patch -p1 -i $($(package)_patch_dir)/qtbase_skip_tools.patch && \ patch -p1 -i $($(package)_patch_dir)/rcc_hardcode_timestamp.patch diff --git a/depend/bitcoin/depends/packages/qt_details.mk b/depend/bitcoin/depends/packages/qt_details.mk index 3414481..eeea275 100644 --- a/depend/bitcoin/depends/packages/qt_details.mk +++ b/depend/bitcoin/depends/packages/qt_details.mk @@ -13,14 +13,14 @@ qt_details_qttools_sha256_hash := f03bb7df619cd9ac9dba110e30b7bcab5dd88eb8bdc9cc qt_details_patches_path := $(PATCHES_PATH)/qt -qt_details_top_download_path := https://code.qt.io/cgit/qt/qt5.git/plain +qt_details_top_download_path := https://raw.githubusercontent.com/qt/qt5/refs/heads/$(qt_details_version) qt_details_top_cmakelists_file_name := CMakeLists.txt -qt_details_top_cmakelists_download_file := $(qt_details_top_cmakelists_file_name)?h=$(qt_details_version) +qt_details_top_cmakelists_download_file := $(qt_details_top_cmakelists_file_name) qt_details_top_cmakelists_sha256_hash := 9fb720a633c0c0a21c31fe62a34bf617726fed72480d4064f29ca5d6973d513f qt_details_top_cmake_download_path := $(qt_details_top_download_path)/cmake qt_details_top_cmake_ecmoptionaladdsubdirectory_file_name := ECMOptionalAddSubdirectory.cmake -qt_details_top_cmake_ecmoptionaladdsubdirectory_download_file := $(qt_details_top_cmake_ecmoptionaladdsubdirectory_file_name)?h=$(qt_details_version) +qt_details_top_cmake_ecmoptionaladdsubdirectory_download_file := $(qt_details_top_cmake_ecmoptionaladdsubdirectory_file_name) qt_details_top_cmake_ecmoptionaladdsubdirectory_sha256_hash := 97ee8bbfcb0a4bdcc6c1af77e467a1da0c5b386c42be2aa97d840247af5f6f70 qt_details_top_cmake_qttoplevelhelpers_file_name := QtTopLevelHelpers.cmake -qt_details_top_cmake_qttoplevelhelpers_download_file := $(qt_details_top_cmake_qttoplevelhelpers_file_name)?h=$(qt_details_version) +qt_details_top_cmake_qttoplevelhelpers_download_file := $(qt_details_top_cmake_qttoplevelhelpers_file_name) qt_details_top_cmake_qttoplevelhelpers_sha256_hash := 5ac2a7159ee27b5b86d26ecff44922e7b8f319aa847b7b5766dc17932fd4a294 diff --git a/depend/bitcoin/depends/packages/sqlite.mk b/depend/bitcoin/depends/packages/sqlite.mk index 67b7719..632ead7 100644 --- a/depend/bitcoin/depends/packages/sqlite.mk +++ b/depend/bitcoin/depends/packages/sqlite.mk @@ -1,35 +1,35 @@ package=sqlite -$(package)_version=3460100 -$(package)_download_path=https://sqlite.org/2024/ +$(package)_version=3500400 +$(package)_download_path=https://sqlite.org/2025/ $(package)_file_name=sqlite-autoconf-$($(package)_version).tar.gz -$(package)_sha256_hash=67d3fe6d268e6eaddcae3727fce58fcc8e9c53869bdd07a0c61e38ddf2965071 +$(package)_sha256_hash=a3db587a1b92ee5ddac2f66b3edb41b26f9c867275782d46c3a088977d6a5b18 +$(package)_patches = autosetup-fixup.patch define $(package)_set_vars -$(package)_config_opts=--disable-shared --disable-readline --disable-dynamic-extensions --enable-option-checking -$(package)_config_opts+= --disable-rtree --disable-fts4 --disable-fts5 -# We avoid using `--enable-debug` because it overrides CFLAGS, a behavior we want to prevent. -$(package)_cppflags_debug += -DSQLITE_DEBUG -$(package)_cppflags+=-DSQLITE_DQS=0 -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_OMIT_DEPRECATED -$(package)_cppflags+=-DSQLITE_OMIT_SHARED_CACHE -DSQLITE_OMIT_JSON -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -$(package)_cppflags+=-DSQLITE_OMIT_DECLTYPE -DSQLITE_OMIT_PROGRESS_CALLBACK -DSQLITE_OMIT_AUTOINIT +$(package)_config_env := CC_FOR_BUILD="$$(build_CC)" +$(package)_config_opts = --disable-shared --disable-readline --disable-rtree +$(package)_config_opts += --disable-fts4 --disable-fts5 +$(package)_config_opts_debug += --debug +$(package)_cppflags += -DSQLITE_DQS=0 -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_OMIT_DEPRECATED +$(package)_cppflags += -DSQLITE_OMIT_SHARED_CACHE -DSQLITE_OMIT_JSON -DSQLITE_LIKE_DOESNT_MATCH_BLOBS +$(package)_cppflags += -DSQLITE_OMIT_DECLTYPE -DSQLITE_OMIT_PROGRESS_CALLBACK -DSQLITE_OMIT_AUTOINIT +$(package)_cppflags += -DSQLITE_OMIT_LOAD_EXTENSION endef define $(package)_preprocess_cmds - cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub . + patch -p1 < $($(package)_patch_dir)/autosetup-fixup.patch endef +# Remove --with-pic, which is applied globally to configure +# invocations but is incompatible with Autosetup define $(package)_config_cmds - $($(package)_autoconf) + $$(filter-out --with-pic,$($(package)_autoconf)) endef define $(package)_build_cmds - $(MAKE) libsqlite3.la + $(MAKE) libsqlite3.a endef define $(package)_stage_cmds - $(MAKE) DESTDIR=$($(package)_staging_dir) install-libLTLIBRARIES install-includeHEADERS -endef - -define $(package)_postprocess_cmds - rm lib/*.la + $(MAKE) DESTDIR=$($(package)_staging_dir) install-headers install-lib endef diff --git a/depend/bitcoin/depends/patches/freetype/cmake_minimum.patch b/depend/bitcoin/depends/patches/freetype/cmake_minimum.patch index 0a976f8..b0a3d10 100644 --- a/depend/bitcoin/depends/patches/freetype/cmake_minimum.patch +++ b/depend/bitcoin/depends/patches/freetype/cmake_minimum.patch @@ -2,7 +2,7 @@ build: set minimum required CMake to 3.12 --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -97,7 +97,7 @@ +@@ -109,7 +109,7 @@ # FreeType explicitly marks the API to be exported and relies on the compiler # to hide all other symbols. CMake supports a C_VISBILITY_PRESET property # starting with 2.8.12. diff --git a/depend/bitcoin/depends/patches/qt/qtbase_plugins_windows11style.patch b/depend/bitcoin/depends/patches/qt/qtbase_plugins_windows11style.patch new file mode 100644 index 0000000..16d1d3a --- /dev/null +++ b/depend/bitcoin/depends/patches/qt/qtbase_plugins_windows11style.patch @@ -0,0 +1,113 @@ +QWindows11Style: Calculate Spinbox size based on CommonStyle size +Use the calculation from Commonstyle and add the increased padding and +horizontally layouted buttons to the horizontal size hint. + +Fixes: QTBUG-130288 +Change-Id: I7932b782e7873a0178091a51379f17453eb585fd + +Upstream commits: + - Qt 6.8.1: 9107817eaceaacc968dbc767c24594566d637b8c + - Qt 6.9.0: 96d46cad43517adefa2eb7cb8819a0b2cc9241e6 + +--- a/qtbase/src/plugins/styles/modernwindows/qwindows11style.cpp ++++ b/qtbase/src/plugins/styles/modernwindows/qwindows11style.cpp +@@ -2048,39 +2048,22 @@ QSize QWindows11Style::sizeFromContents(ContentsType type, const QStyleOption *o + } + break; + #endif ++#if QT_CONFIG(spinbox) + case QStyle::CT_SpinBox: { + if (const auto *spinBoxOpt = qstyleoption_cast(option)) { + // Add button + frame widths +- int width = 0; +- +- if (const QDateTimeEdit *spinBox = qobject_cast(widget)) { +- const QSize textSizeMin = spinBoxOpt->fontMetrics.size(Qt::TextSingleLine, spinBox->minimumDateTime().toString(spinBox->displayFormat())); +- const QSize textSizeMax = spinBoxOpt->fontMetrics.size(Qt::TextSingleLine, spinBox->maximumDateTime().toString(spinBox->displayFormat())); +- width = qMax(textSizeMin.width(),textSizeMax.width()); +- } else if (const QSpinBox *spinBox = qobject_cast(widget)) { +- const QSize textSizeMin = spinBoxOpt->fontMetrics.size(Qt::TextSingleLine, QString::number(spinBox->minimum())); +- const QSize textSizeMax = spinBoxOpt->fontMetrics.size(Qt::TextSingleLine, QString::number(spinBox->maximum())); +- width = qMax(textSizeMin.width(),textSizeMax.width()); +- width += spinBoxOpt->fontMetrics.size(Qt::TextSingleLine, spinBox->prefix()).width(); +- width += spinBoxOpt->fontMetrics.size(Qt::TextSingleLine, spinBox->suffix()).width(); +- +- } else if (const QDoubleSpinBox *spinBox = qobject_cast(widget)) { +- const QSize textSizeMin = spinBoxOpt->fontMetrics.size(Qt::TextSingleLine, QString::number(spinBox->minimum())); +- const QSize textSizeMax = spinBoxOpt->fontMetrics.size(Qt::TextSingleLine, QString::number(spinBox->maximum())); +- width = qMax(textSizeMin.width(),textSizeMax.width()); +- width += spinBoxOpt->fontMetrics.size(Qt::TextSingleLine, spinBox->prefix()).width(); +- width += spinBoxOpt->fontMetrics.size(Qt::TextSingleLine, spinBox->suffix()).width(); +- } + const qreal dpi = QStyleHelper::dpi(option); + const bool hasButtons = (spinBoxOpt->buttonSymbols != QAbstractSpinBox::NoButtons); +- const int buttonWidth = hasButtons ? 2 * qRound(QStyleHelper::dpiScaled(16, dpi)) : 0; ++ const int margins = 8; ++ const int buttonWidth = hasButtons ? qRound(QStyleHelper::dpiScaled(16, dpi)) : 0; + const int frameWidth = spinBoxOpt->frame ? proxy()->pixelMetric(PM_SpinBoxFrameWidth, + spinBoxOpt, widget) : 0; +- contentSize.setWidth(2 * 12 + width); +- contentSize += QSize(buttonWidth + 2 * frameWidth, 2 * frameWidth); ++ ++ contentSize += QSize(2 * buttonWidth + 2 * frameWidth + 2 * margins, 2 * frameWidth); + } + break; + } ++#endif + default: + contentSize = QWindowsVistaStyle::sizeFromContents(type, option, size, widget); + break; + + +Windows11Style: don't set minimum width for QAbstractSpinBox + +There is no need to set a minimum width for QAbstractSpinBox in +QWindows11Style::polish() as this might override the user preferences. +Also the minimum size handling is now properly done within +sizeFromContents(). + +Change-Id: Ibc1fd7a6f862fc85e3739025b9de581aa235d74c + +Upstream commits: + - Qt 6.8.3: f86da3d3f853adb1a5b823c1cc7be6db4a0265f3 + - Qt 6.9.0: b93a8dfdfe6900cb542fdc587dd2682007a6ac53 + - Qt 6.10.0: 2ec4c28470de115c16944653a5d4f6209452d56c + +--- a/qtbase/src/plugins/styles/modernwindows/qwindows11style.cpp ++++ b/qtbase/src/plugins/styles/modernwindows/qwindows11style.cpp +@@ -29,7 +29,6 @@ QT_BEGIN_NAMESPACE + + const static int topLevelRoundingRadius = 8; //Radius for toplevel items like popups for round corners + const static int secondLevelRoundingRadius = 4; //Radius for second level items like hovered menu item round corners +-constexpr QLatin1StringView originalWidthProperty("_q_windows11_style_original_width"); + + enum WINUI3Color { + subtleHighlightColor, //Subtle highlight based on alpha used for hovered elements +@@ -2140,13 +2139,6 @@ void QWindows11Style::polish(QWidget* widget) + pal.setColor(QPalette::ButtonText, pal.text().color()); + pal.setColor(QPalette::BrightText, pal.text().color()); + widget->setPalette(pal); +- } else if (widget->inherits("QAbstractSpinBox")) { +- const int minWidth = 2 * 24 + 40; +- const int originalWidth = widget->size().width(); +- if (originalWidth < minWidth) { +- widget->resize(minWidth, widget->size().height()); +- widget->setProperty(originalWidthProperty.constData(), originalWidth); +- } + } else if (widget->inherits("QAbstractButton") || widget->inherits("QToolButton")) { + widget->setAutoFillBackground(false); + auto pal = widget->palette(); +@@ -2191,13 +2183,6 @@ void QWindows11Style::unpolish(QWidget *widget) + scrollarea->viewport()->setPalette(pal); + scrollarea->viewport()->setProperty("_q_original_background_palette", QVariant()); + } +- if (widget->inherits("QAbstractSpinBox")) { +- const QVariant originalWidth = widget->property(originalWidthProperty.constData()); +- if (originalWidth.isValid()) { +- widget->resize(originalWidth.toInt(), widget->size().height()); +- widget->setProperty(originalWidthProperty.constData(), QVariant()); +- } +- } + } + + /* diff --git a/depend/bitcoin/depends/patches/sqlite/autosetup-fixup.patch b/depend/bitcoin/depends/patches/sqlite/autosetup-fixup.patch new file mode 100644 index 0000000..46fe5fd --- /dev/null +++ b/depend/bitcoin/depends/patches/sqlite/autosetup-fixup.patch @@ -0,0 +1,20 @@ +autosetup-find-tclsh: Quote CC_FOR_BUILD to prevent word splitting + +In some build environments, CC_FOR_BUILD can include essential compiler +flags. For example, in Guix it may have a value such as: +`/gnu/store/10krix03rl5hqjv2c0qmj44ic9bgd8rc-gcc-toolchain-13.3.0/bin/gcc -isystem /gnu/store/10krix03rl5hqjv2c0qmj44ic9bgd8rc-gcc-toolchain-13.3.0/include` + +See upstream: https://github.com/msteveb/autosetup/pull/81. + + +--- a/autosetup/autosetup-find-tclsh ++++ b/autosetup/autosetup-find-tclsh +@@ -8,7 +8,7 @@ + { $tclsh "$d/${1-autosetup-test-tclsh}"; } 2>/dev/null && exit 0 + done + echo 1>&2 "No installed jimsh or tclsh, building local bootstrap jimsh0" +-for cc in ${CC_FOR_BUILD:-cc} gcc; do ++for cc in "${CC_FOR_BUILD:-cc}" gcc; do + { $cc -o jimsh0 "$d/jimsh0.c"; } 2>/dev/null >/dev/null || continue + ./jimsh0 "$d/${1-autosetup-test-tclsh}" && exit 0 + done diff --git a/depend/bitcoin/doc/REST-interface.md b/depend/bitcoin/doc/REST-interface.md index 0fee781..ed46e22 100644 --- a/depend/bitcoin/doc/REST-interface.md +++ b/depend/bitcoin/doc/REST-interface.md @@ -47,6 +47,11 @@ The HTTP request and response are both handled entirely in-memory. With the /notxdetails/ option JSON response will only contain the transaction hash instead of the complete transaction details. The option only affects the JSON response. +- `GET /rest/blockpart/.?offset=&size=` + +Given a block hash: returns a block part, in binary or hex-encoded binary formats. +Responds with 404 if the block or the byte range doesn't exist. + #### Blockheaders `GET /rest/headers/.?count=` diff --git a/depend/bitcoin/doc/build-windows-msvc.md b/depend/bitcoin/doc/build-windows-msvc.md index cf84cdd..5b75a62 100644 --- a/depend/bitcoin/doc/build-windows-msvc.md +++ b/depend/bitcoin/doc/build-windows-msvc.md @@ -97,7 +97,7 @@ cmake -B build --preset vs2022-static -DVCPKG_INSTALLED_DIR="C:\path_without_spa One can skip vcpkg manifest default features to speedup the configuration step. For example, the following invocation will skip all features except for "wallet" and "tests" and their dependencies: ``` -cmake -B build --preset vs2022 -DVCPKG_MANIFEST_NO_DEFAULT_FEATURES=ON -DVCPKG_MANIFEST_FEATURES="wallet;tests" -DBUILD_GUI=OFF +cmake -B build --preset vs2022 -DVCPKG_MANIFEST_NO_DEFAULT_FEATURES=ON -DVCPKG_MANIFEST_FEATURES="wallet;tests" -DBUILD_GUI=OFF -DWITH_ZMQ=OFF ``` Available features are listed in the [`vcpkg.json`](/vcpkg.json) file. diff --git a/depend/bitcoin/doc/developer-notes.md b/depend/bitcoin/doc/developer-notes.md index 106533b..d17f802 100644 --- a/depend/bitcoin/doc/developer-notes.md +++ b/depend/bitcoin/doc/developer-notes.md @@ -752,8 +752,7 @@ logging messages. They should be used as follows: messages or for infrequent and important events such as a new block tip being found or a new outbound connection being made. These log messages are unconditional, so care must be taken that they can't be used by an - attacker to fill up storage. Note that `LogPrintf(fmt, params...)` is - a deprecated alias for `LogInfo`. + attacker to fill up storage. - `LogError(fmt, params...)` should be used in place of `LogInfo` for severe problems that require the node (or a subsystem) to shut down diff --git a/depend/bitcoin/doc/fuzzing.md b/depend/bitcoin/doc/fuzzing.md index f4333a5..564245b 100644 --- a/depend/bitcoin/doc/fuzzing.md +++ b/depend/bitcoin/doc/fuzzing.md @@ -8,8 +8,6 @@ To quickly get started fuzzing Bitcoin Core using [libFuzzer](https://llvm.org/d $ git clone https://github.com/bitcoin/bitcoin $ cd bitcoin/ $ cmake --preset=libfuzzer -# macOS users: If you have problem with this step then make sure to read "macOS hints for -# libFuzzer" on https://github.com/bitcoin/bitcoin/blob/master/doc/fuzzing.md#macos-hints-for-libfuzzer $ cmake --build build_fuzz $ FUZZ=process_message build_fuzz/bin/fuzz # abort fuzzing using ctrl-c @@ -23,6 +21,9 @@ There is also a runner script to execute all fuzz targets. Refer to For source-based coverage reports, see [developer notes](/doc/developer-notes.md#compiling-for-fuzz-coverage). +macOS users: We recommend fuzzing on Linux, see [macOS notes](#macos-notes) for +more information. + ## Overview of Bitcoin Core fuzzing [Google](https://github.com/google/fuzzing/) has a good overview of fuzzing in general, with contributions from key architects of some of the most-used fuzzers. [This paper](https://agroce.github.io/bitcoin_report.pdf) includes an external overview of the status of Bitcoin Core fuzzing, as of summer 2021. [John Regehr](https://blog.regehr.org/archives/1687) provides good advice on writing code that assists fuzzers in finding bugs, which is useful for developers to keep in mind. @@ -183,29 +184,14 @@ There are 3 ways fuzz tests can be built: tests would not be useful. This build is only useful for ensuring fuzz tests compile and link. -## macOS hints for libFuzzer - -The default Clang/LLVM version supplied by Apple on macOS does not include -fuzzing libraries, so macOS users will need to install a full version, for -example using `brew install llvm`. - -You may also need to take care of giving the correct path for `clang` and -`clang++`, like `CC=/path/to/clang CXX=/path/to/clang++` if the non-systems -`clang` does not come first in your path. - -Using `lld` is required due to issues with Apple's `ld` and `LLVM`. +## macOS notes -Full configuration step for macOS: - -```sh -$ brew install llvm lld -$ cmake --preset=libfuzzer \ - -DCMAKE_C_COMPILER="$(brew --prefix llvm)/bin/clang" \ - -DCMAKE_CXX_COMPILER="$(brew --prefix llvm)/bin/clang++" \ - -DCMAKE_EXE_LINKER_FLAGS="-fuse-ld=lld" -``` +Support for fuzzing on macOS is not officially maintained by this project. If +you are running into issues on macOS, we recommend fuzzing on Linux instead for +best results. On macOS this can be done within Docker or a virtual machine. -Read the [libFuzzer documentation](https://llvm.org/docs/LibFuzzer.html) for more information. This [libFuzzer tutorial](https://github.com/google/fuzzing/blob/master/tutorial/libFuzzerTutorial.md) might also be of interest. +Reproducing and debugging fuzz testcases on macOS is supported, by building the +fuzz binary without support for any specific fuzzing engine. # Fuzzing Bitcoin Core using afl++ @@ -225,10 +211,6 @@ $ cmake -B build_fuzz \ -DCMAKE_CXX_COMPILER="$(pwd)/AFLplusplus/afl-clang-lto++" \ -DBUILD_FOR_FUZZING=ON $ cmake --build build_fuzz -# For macOS you may need to ignore x86 compilation checks when running "cmake --build". If so, -# try compiling using: AFL_NO_X86=1 cmake --build build_fuzz -# Also, it might be required to run "afl-system-config" to adjust the shared -# memory parameters. $ mkdir -p inputs/ outputs/ $ echo A > inputs/thin-air-input $ FUZZ=bech32 ./AFLplusplus/afl-fuzz -i inputs/ -o outputs/ -- build_fuzz/bin/fuzz diff --git a/depend/bitcoin/doc/policy/README.md b/depend/bitcoin/doc/policy/README.md index 4392ffb..a03edc2 100644 --- a/depend/bitcoin/doc/policy/README.md +++ b/depend/bitcoin/doc/policy/README.md @@ -9,7 +9,7 @@ contents. Policy is *not* applied to transactions in blocks. This documentation is not an exhaustive list of all policy rules. -- [Mempool Limits](mempool-limits.md) +- [Mempool Design and Limits](mempool-design.md) - [Mempool Replacements](mempool-replacements.md) - [Packages](packages.md) diff --git a/depend/bitcoin/doc/policy/mempool-design.md b/depend/bitcoin/doc/policy/mempool-design.md new file mode 100644 index 0000000..52ebd1b --- /dev/null +++ b/depend/bitcoin/doc/policy/mempool-design.md @@ -0,0 +1,104 @@ +# Mempool design and limits + +## Definitions + +We view the unconfirmed transactions in the mempool as a directed graph, +with an edge from transaction B to transaction A if B spends an output created +by A (i.e., B is a **child** of A, and A is a **parent** of B). + +A transaction's **ancestors** include, recursively, its parents, the parents of +its parents, etc. A transaction's **descendants** include, recursively, its +children, the children of its children, etc. + +A **cluster** is a connected component of the graph, i.e., a set of +transactions where each transaction is reachable from any other transaction in +the set by following edges in either direction. The cluster corresponding to a +given transaction consists of that transaction, its ancestors and descendants, +and the ancestors and descendants of those transactions, and so on. + +Each cluster is **linearized**, or sorted, in a topologically valid order (i.e., +no transaction appears before any of its ancestors). Our goal is to construct a +linearization where the highest feerate subset of a cluster appears first, +followed by the next highest feerate subset of the remaining transactions, and +so on[1]. We call these subsets **chunks**, and the chunks of a linearization +have the property that they are always in monotonically decreasing feerate +order. + +Given two or more linearized clusters, we can construct a linearization of the +union by simply merge sorting the chunks of each cluster by feerate. + +For any set of linearized clusters, then, we can define the **feerate diagram** +of the set by plotting the cumulative fee (y-axis) against the cumulative size +(x-axis) as we progress from chunk to chunk. Given two linearizations for the +same set of transactions, we can compare their feerate diagrams by +comparing their cumulative fees at each size value. Two diagrams may be +**incomparable** if neither contains the other (i.e., there exist size values at +which each one has a greater cumulative fee than the other). Or, they may be +**equivalent** if they have identical cumulative fees at every size value; or +one may be **strictly better** than the other if they are comparable and there +exists at least one size value for which the cumulative fee is strictly higher +in one of them. + +For more background and rationale, see [2] and [3] below. + +## Mining/eviction + +As described above, the linearization of each cluster gives us a linearization +of the entire mempool. We use this ordering for both block building and +eviction, by selecting chunks at the front of the linearization when +constructing a block template, and by evicting chunks from the back of the +linearization when we need to free up space in the mempool. + +## Replace-by-fee + +Prior to the cluster mempool implementation, it was possible for replacements +to be prevented even if they would make the mempool more profitable for miners, +and it was possible for replacements to be permitted even if the newly accepted +transaction was less desirable to miners than the transactions it was +replacing. With the ability to construct linearizations of the mempool, we're +now able to compare the feerate diagram of the mempool before and after a +proposed replacement, and only accept the replacement if it makes the feerate +diagram strictly better. + +In simple cases, the intuition is that a replacement should have a higher +feerate and fee than the transaction(s) it replaces. But for more complex cases +(where some transactions may have unconfirmed parents), there may not be a +simple way to describe the fee that is needed to successfully replace a set of +transactions, other than to say that the overall feerate diagram of the +resulting mempool must improve somewhere and not be worse anywhere. + +## Mempool limits + +### Motivation + +Selecting chunks in decreasing feerate order when building a block template +will be close to optimal when the maximum size of any chunk is small compared +to the block size. And for mempool eviction, we don't wish to evict too much of +the mempool at once when a single (potentially small) transaction arrives that +takes us over our mempool size limit. For both of these reasons, it's desirable +to limit the maximum size of a cluster and thereby limit the maximum size of +any chunk (as a cluster may consist entirely of one chunk). + +The computation required to linearize a transaction grows (in polynomial time) +with the number of transactions in a cluster, so limiting the number of +transactions in a cluster is necessary to ensure that we're able to find good +(ideally, optimal) linearizations in a reasonable amount of time. + +### Limits + +Transactions submitted to the mempool must not result in clusters that would +exceed the cluster limits (64 transactions and 101 kvB total per cluster). + +## References/Notes +[1] This is an instance of the maximal-ratio closure problem, which is closely +related to the maximal-weight closure problem, as found in the field of mineral +extraction for open pit mining. + +[2] See +https://delvingbitcoin.org/t/an-overview-of-the-cluster-mempool-proposal/393 +for a high level overview of the cluster mempool implementation (PR#33629, +since v31.0) and its design rationale. + +[3] See https://delvingbitcoin.org/t/mempool-incentive-compatibility/553 for an +explanation of why and how we use feerate diagrams for mining, eviction, and +evaluating transaction replacements. diff --git a/depend/bitcoin/doc/policy/mempool-limits.md b/depend/bitcoin/doc/policy/mempool-limits.md deleted file mode 100644 index 73ab017..0000000 --- a/depend/bitcoin/doc/policy/mempool-limits.md +++ /dev/null @@ -1,65 +0,0 @@ -# Mempool Limits - -## Definitions - -Given any two transactions Tx0 and Tx1 where Tx1 spends an output of Tx0, -Tx0 is a *parent* of Tx1 and Tx1 is a *child* of Tx0. - -A transaction's *ancestors* include, recursively, its parents, the parents of its parents, etc. -A transaction's *descendants* include, recursively, its children, the children of its children, etc. - -A mempool entry's *ancestor count* is the total number of in-mempool (unconfirmed) transactions in -its ancestor set, including itself. -A mempool entry's *descendant count* is the total number of in-mempool (unconfirmed) transactions in -its descendant set, including itself. - -A mempool entry's *ancestor size* is the aggregated virtual size of in-mempool (unconfirmed) -transactions in its ancestor set, including itself. -A mempool entry's *descendant size* is the aggregated virtual size of in-mempool (unconfirmed) -transactions in its descendant set, including itself. - -Transactions submitted to the mempool must not exceed the ancestor and descendant limits (aka -mempool *package limits*) set by the node (see `-limitancestorcount`, `-limitancestorsize`, -`-limitdescendantcount`, `-limitdescendantsize`). - -## Exemptions - -### CPFP Carve Out - -**CPFP Carve Out** if a transaction candidate for submission to the -mempool would cause some mempool entry to exceed its descendant limits, an exemption is made if all -of the following conditions are met: - -1. The candidate transaction is no more than 10,000 virtual bytes. - -2. The candidate transaction has an ancestor count of 2 (itself and exactly 1 ancestor). - -3. The in-mempool transaction's descendant count, including the candidate transaction, would only - exceed the limit by 1. - -*Rationale*: this rule was introduced to prevent pinning by domination of a transaction's descendant -limits in two-party contract protocols such as LN. Also see the [mailing list -post](https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html). - -This rule was introduced in [PR #15681](https://github.com/bitcoin/bitcoin/pull/15681). - -### Single-Conflict RBF Carve Out - -When a candidate transaction for submission to the mempool would replace mempool entries, it may -also decrease the descendant count of other mempool entries. Since ancestor/descendant limits are -calculated prior to removing the would-be-replaced transactions, they may be overestimated. - -An exemption is given for a candidate transaction that would replace mempool transactions and meets -all of the following conditions: - -1. The candidate transaction has exactly 1 directly conflicting transaction. - -2. The candidate transaction does not spend any unconfirmed inputs that are not also spent by the - directly conflicting transaction. - -The following discounts are given to account for the would-be-replaced transaction(s): - -1. The descendant count limit is temporarily increased by 1. - -2. The descendant size limit temporarily is increased by the virtual size of the to-be-replaced - directly conflicting transaction. diff --git a/depend/bitcoin/doc/policy/mempool-replacements.md b/depend/bitcoin/doc/policy/mempool-replacements.md index 73682e2..422af9a 100644 --- a/depend/bitcoin/doc/policy/mempool-replacements.md +++ b/depend/bitcoin/doc/policy/mempool-replacements.md @@ -12,12 +12,7 @@ other consensus and policy rules, each of the following conditions are met: 1. (Removed) -2. The replacement transaction only include an unconfirmed input if that input was included in - one of the directly conflicting transactions. An unconfirmed input spends an output from a - currently-unconfirmed transaction. - - *Rationale*: When RBF was originally implemented, the mempool did not keep track of - ancestor feerates yet. This rule was suggested as a temporary restriction. +2. (Removed) 3. The replacement transaction pays an absolute fee of at least the sum paid by the original transactions. @@ -38,23 +33,16 @@ other consensus and policy rules, each of the following conditions are met: *Rationale*: Try to prevent DoS attacks where an attacker causes the network to repeatedly relay transactions each paying a tiny additional amount in fees, e.g. just 1 satoshi. -5. The number of original transactions does not exceed 100. More precisely, the sum of all - directly conflicting transactions' descendant counts (number of transactions inclusive of itself - and its descendants) must not exceed 100; it is possible that this overestimates the true number - of original transactions. +5. The number of distinct clusters corresponding to conflicting transactions does not exceed 100. - *Rationale*: Try to prevent DoS attacks where an attacker is able to easily occupy and flush out - significant portions of the node's mempool using replacements with multiple directly conflicting - transactions, each with large descendant sets. + *Rationale*: Limit CPU usage required to update the mempool for so many transactions being + removed at once. -6. The replacement transaction's feerate is greater than the feerates of all directly conflicting - transactions. +6. The feerate diagram of the mempool must be strictly improved by the replacement transaction. - *Rationale*: This rule was originally intended to ensure that the replacement transaction is - preferable for block-inclusion, compared to what would be removed from the mempool. This rule - predates ancestor feerate-based transaction selection. + *Rationale*: This ensures that block fees in all future blocks will go up + after the replacement (ignoring tail effects at the end of a block). -This set of rules is similar but distinct from BIP125. ## History @@ -79,3 +67,5 @@ This set of rules is similar but distinct from BIP125. * Signaling for replace-by-fee is no longer required as of [PR 30592](https://github.com/bitcoin/bitcoin/pull/30592). * The incremental relay feerate default is 0.1sat/vB ([PR #33106](https://github.com/bitcoin/bitcoin/pull/33106)). + +* Feerate diagram policy enabled in conjunction with switch to cluster mempool as of **v31.0**. diff --git a/depend/bitcoin/doc/policy/mempool-terminology.md b/depend/bitcoin/doc/policy/mempool-terminology.md new file mode 100644 index 0000000..55c947e --- /dev/null +++ b/depend/bitcoin/doc/policy/mempool-terminology.md @@ -0,0 +1,19 @@ +## Fee and Size Terminology in Mempool Policy + + * Each transaction has a **weight** and virtual size as defined in BIP 141 (different from serialized size for witness transactions, as witness data is discounted and the value is rounded up to the nearest integer). + + * In the RPCs, "weight", refers to the weight as defined in BIP 141. + + * A transaction has a **sigops size**, defined as its sigop cost multiplied by the node's `-bytespersigop`, an adjustable policy. + + * A transaction's **virtual size (vsize)** refers to its **sigops-adjusted virtual size**: the maximum of its BIP 141 size and sigop size. This virtual size is used to simplify the process of building blocks that satisfy both the maximum weight limit and sigop limit. + + * In the RPCs, "vsize" refers to this sigops-adjusted virtual size. + + * Mempool entry data with the suffix "-size" (eg "ancestorsize") refer to the cumulative sigops-adjusted virtual size of the transactions in the associated set. + + * A transaction can also have a **sigops-adjusted weight**, defined similarly as the maximum of its BIP 141 weight and 4 times the sigops size. This value is used internally by the mempool to avoid losing precision, and mempool entry data with the suffix "-weight" (eg "chunkweight", "clusterweight") refer to this sigops-adjusted weight. + + * A transaction's **base fee** is the difference between its input and output values. + + * A transaction's **modified fee** is its base fee added to any **fee delta** introduced by using the `prioritisetransaction` RPC. Modified fee is used internally for all fee-related mempool policies and block building. diff --git a/depend/bitcoin/doc/policy/packages.md b/depend/bitcoin/doc/policy/packages.md index 7522a98..4795f71 100644 --- a/depend/bitcoin/doc/policy/packages.md +++ b/depend/bitcoin/doc/policy/packages.md @@ -38,9 +38,7 @@ The following rules are enforced for all packages: - Packages are 1-parent-1-child, with no in-mempool ancestors of the package. - - All conflicting clusters (connected components of mempool transactions) must be clusters of up to size 2. - - - No more than MAX_REPLACEMENT_CANDIDATES transactions can be replaced, analogous to + - The number of distinct clusters containing conflicting transactions can be no more than 100, analogous to regular [replacement rule](./mempool-replacements.md) 5). - Replacements must pay more total fees at the incremental relay fee (analogous to @@ -56,18 +54,6 @@ The following rules are enforced for all packages: result in more robust fee bumping. More general package RBF may be enabled in the future. -* When packages are evaluated against ancestor/descendant limits, the union of all transactions' - descendants and ancestors is considered. (#21800) - - - *Rationale*: This is essentially a "worst case" heuristic intended for packages that are - heavily connected, i.e. some transaction in the package is the ancestor or descendant of all - the other transactions. - -* [CPFP Carve Out](./mempool-limits.md#CPFP-Carve-Out) is disabled in packaged contexts. (#21800) - - - *Rationale*: This carve out cannot be accurately applied when there are multiple transactions' - ancestors and descendants being considered at the same time. - The following rules are only enforced for packages to be submitted to the mempool (not enforced for test accepts): diff --git a/depend/bitcoin/doc/release-notes-33629.md b/depend/bitcoin/doc/release-notes-33629.md new file mode 100644 index 0000000..16bdf0f --- /dev/null +++ b/depend/bitcoin/doc/release-notes-33629.md @@ -0,0 +1,43 @@ +Mempool +======= + +The mempool has been reimplemented with a new design ("cluster mempool"), to +facilitate better decision-making when constructing block templates, evicting +transactions, relaying transactions, and validating replacement transactions +(RBF). Most changes should be transparent to users, but some behavior changes +are noted: + +- The mempool no longer enforces ancestor or descendant size/count limits. + Instead, two new default policy limits are introduced governing connected + components, or clusters, in the mempool, limiting clusters to 64 transactions + and up to 101 kB in virtual size. Transactions are considered to be in the + same cluster if they are connected to each other via any combination of + parent/child relationships in the mempool. These limits can be overridden + using command line arguments; see the extended help (`-help-debug`) + for more information. + +- Within the mempool, transactions are ordered based on the feerate at which + they are expected to be mined, which takes into account the full set, or + "chunk", of transactions that would be included together (e.g., a parent and + its child, or more complicated subsets of transactions). This ordering is + utilized by the algorithms that implement transaction selection for + constructing block templates; eviction from the mempool when it is full; and + transaction relay announcements to peers. + +- The replace-by-fee validation logic has been updated so that transaction + replacements are only accepted if the resulting mempool's feerate diagram is + strictly better than before the replacement. This eliminates all known cases + of replacements occurring that make the mempool worse off, which was possible + under previous RBF rules. For singleton transactions (that are in clusters by + themselves) it's sufficient for a replacement to have a higher fee and + feerate than the original. See + [delvingbitcoin.org post](https://delvingbitcoin.org/t/an-overview-of-the-cluster-mempool-proposal/393#rbf-can-now-be-made-incentive-compatible-for-miners-11) + for more information. + +- Two new RPCs have been added: `getmempoolcluster` will provide the set of + transactions in the same cluster as the given transaction, along with the + ordering of those transactions and grouping into chunks; and + `getmempoolfeeratediagram` will return the feerate diagram of the entire + mempool. + +- Chunk size and chunk fees are now also included in the output of `getmempoolentry`. diff --git a/depend/bitcoin/doc/release-notes-33657.md b/depend/bitcoin/doc/release-notes-33657.md new file mode 100644 index 0000000..f9e6841 --- /dev/null +++ b/depend/bitcoin/doc/release-notes-33657.md @@ -0,0 +1,5 @@ +New REST API +------------ + +- A new REST API endpoint (`/rest/blockpart/BLOCKHASH.bin?offset=X&size=Y`) has been introduced + for efficiently fetching a range of bytes from block `BLOCKHASH`. diff --git a/depend/bitcoin/doc/release-notes-34031.md b/depend/bitcoin/doc/release-notes-34031.md new file mode 100644 index 0000000..c0f29a9 --- /dev/null +++ b/depend/bitcoin/doc/release-notes-34031.md @@ -0,0 +1,4 @@ +Net +--- +- `tor` has been removed as a network specification. It + was deprecated in favour of `onion` in v0.17.0. (#34031) diff --git a/depend/bitcoin/src/.clang-format b/depend/bitcoin/src/.clang-format index c1c0e89..c5fcd0b 100644 --- a/depend/bitcoin/src/.clang-format +++ b/depend/bitcoin/src/.clang-format @@ -99,17 +99,20 @@ IfMacros: - KJ_IF_MAYBE IncludeBlocks: Preserve IncludeCategories: - - Regex: '^"(llvm|llvm-c|clang|clang-c)/' + - Regex: '^' + Priority: -1 + CaseSensitive: true + - Regex: '^.]*>' Priority: 3 - SortPriority: 0 CaseSensitive: false - Regex: '.*' Priority: 1 - SortPriority: 0 CaseSensitive: false IncludeIsMainRegex: '(Test)?$' IncludeIsMainSourceRegex: '' diff --git a/depend/bitcoin/src/.clang-tidy b/depend/bitcoin/src/.clang-tidy index 544bd5a..da53a58 100644 --- a/depend/bitcoin/src/.clang-tidy +++ b/depend/bitcoin/src/.clang-tidy @@ -7,6 +7,7 @@ bugprone-string-constructor, bugprone-use-after-move, bugprone-lambda-function-name, bugprone-unhandled-self-assignment, +bugprone-unused-return-value, misc-unused-using-decls, misc-no-recursion, modernize-deprecated-headers, @@ -36,3 +37,5 @@ CheckOptions: value: false - key: bugprone-unhandled-self-assignment.WarnOnlyIfThisHasSuspiciousField value: false + - key: bugprone-unused-return-value.CheckedReturnTypes + value: '^::std::error_code$;^::std::error_condition$;^::std::errc$;^::std::expected$;^::util::Result$;^::util::Expected$' diff --git a/depend/bitcoin/src/CMakeLists.txt b/depend/bitcoin/src/CMakeLists.txt index 9df51eb..47aee93 100644 --- a/depend/bitcoin/src/CMakeLists.txt +++ b/depend/bitcoin/src/CMakeLists.txt @@ -325,22 +325,6 @@ if(ENABLE_IPC AND BUILD_DAEMON) install_binary_component(bitcoin-node INTERNAL) endif() -if(ENABLE_IPC AND BUILD_TESTS) - # bitcoin_ipc_test library target is defined here in src/CMakeLists.txt - # instead of src/test/CMakeLists.txt so capnp files in src/test/ are able to - # reference capnp files in src/ipc/capnp/ by relative path. The Cap'n Proto - # compiler only allows importing by relative path when the importing and - # imported files are underneath the same compilation source prefix, so the - # source prefix must be src/, not src/test/ - add_library(bitcoin_ipc_test STATIC EXCLUDE_FROM_ALL - test/ipc_test.cpp - ) - target_capnp_sources(bitcoin_ipc_test ${PROJECT_SOURCE_DIR} - test/ipc_test.capnp - ) - add_dependencies(bitcoin_ipc_test bitcoin_ipc_headers) -endif() - add_library(bitcoin_cli STATIC EXCLUDE_FROM_ALL compat/stdin.cpp diff --git a/depend/bitcoin/src/addrman.cpp b/depend/bitcoin/src/addrman.cpp index 9c3a24d..5cd1d41 100644 --- a/depend/bitcoin/src/addrman.cpp +++ b/depend/bitcoin/src/addrman.cpp @@ -1055,7 +1055,7 @@ void AddrManImpl::Check() const const int err{CheckAddrman()}; if (err) { - LogPrintf("ADDRMAN CONSISTENCY CHECK FAILED!!! err=%i\n", err); + LogError("ADDRMAN CONSISTENCY CHECK FAILED!!! err=%i", err); assert(false); } } diff --git a/depend/bitcoin/src/bench/blockencodings.cpp b/depend/bitcoin/src/bench/blockencodings.cpp index 8f66599..f12d22f 100644 --- a/depend/bitcoin/src/bench/blockencodings.cpp +++ b/depend/bitcoin/src/bench/blockencodings.cpp @@ -22,7 +22,7 @@ static void AddTx(const CTransactionRef& tx, const CAmount& fee, CTxMemPool& pool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) { LockPoints lp; - AddToMempool(pool, CTxMemPoolEntry(tx, fee, /*time=*/0, /*entry_height=*/1, /*entry_sequence=*/0, /*spends_coinbase=*/false, /*sigops_cost=*/4, lp)); + TryAddToMempool(pool, CTxMemPoolEntry(TxGraph::Ref(), tx, fee, /*time=*/0, /*entry_height=*/1, /*entry_sequence=*/0, /*spends_coinbase=*/false, /*sigops_cost=*/4, lp)); } namespace { diff --git a/depend/bitcoin/src/bench/coin_selection.cpp b/depend/bitcoin/src/bench/coin_selection.cpp index dfefa1b..2150d80 100644 --- a/depend/bitcoin/src/bench/coin_selection.cpp +++ b/depend/bitcoin/src/bench/coin_selection.cpp @@ -130,7 +130,7 @@ static void BnBExhaustion(benchmark::Bench& bench) bench.run([&] { // Benchmark CAmount target = make_hard_case(17, utxo_pool); - SelectCoinsBnB(utxo_pool, target, 0, MAX_STANDARD_TX_WEIGHT); // Should exhaust + [[maybe_unused]] auto _{SelectCoinsBnB(utxo_pool, target, /*cost_of_change=*/0, MAX_STANDARD_TX_WEIGHT)}; // Should exhaust // Cleanup utxo_pool.clear(); diff --git a/depend/bitcoin/src/bench/crypto_hash.cpp b/depend/bitcoin/src/bench/crypto_hash.cpp index 2f1ff56..05c7788 100644 --- a/depend/bitcoin/src/bench/crypto_hash.cpp +++ b/depend/bitcoin/src/bench/crypto_hash.cpp @@ -193,13 +193,11 @@ static void SHA512(benchmark::Bench& bench) static void SipHash_32b(benchmark::Bench& bench) { FastRandomContext rng{/*fDeterministic=*/true}; - auto k0{rng.rand64()}, k1{rng.rand64()}; + PresaltedSipHasher presalted_sip_hasher(rng.rand64(), rng.rand64()); auto val{rng.rand256()}; auto i{0U}; bench.run([&] { - ankerl::nanobench::doNotOptimizeAway(SipHashUint256(k0, k1, val)); - ++k0; - ++k1; + ankerl::nanobench::doNotOptimizeAway(presalted_sip_hasher(val)); ++i; val.data()[i % uint256::size()] ^= i & 0xFF; }); diff --git a/depend/bitcoin/src/bench/mempool_ephemeral_spends.cpp b/depend/bitcoin/src/bench/mempool_ephemeral_spends.cpp index 8f29411..c973c78 100644 --- a/depend/bitcoin/src/bench/mempool_ephemeral_spends.cpp +++ b/depend/bitcoin/src/bench/mempool_ephemeral_spends.cpp @@ -29,7 +29,7 @@ static void AddTx(const CTransactionRef& tx, CTxMemPool& pool) EXCLUSIVE_LOCKS_R unsigned int sigOpCost{4}; uint64_t fee{0}; LockPoints lp; - AddToMempool(pool, CTxMemPoolEntry( + TryAddToMempool(pool, CTxMemPoolEntry(TxGraph::Ref(), tx, fee, nTime, nHeight, sequence, spendsCoinbase, sigOpCost, lp)); } diff --git a/depend/bitcoin/src/bench/mempool_eviction.cpp b/depend/bitcoin/src/bench/mempool_eviction.cpp index aa2e868..dcbe124 100644 --- a/depend/bitcoin/src/bench/mempool_eviction.cpp +++ b/depend/bitcoin/src/bench/mempool_eviction.cpp @@ -27,7 +27,7 @@ static void AddTx(const CTransactionRef& tx, const CAmount& nFee, CTxMemPool& po bool spendsCoinbase = false; unsigned int sigOpCost = 4; LockPoints lp; - AddToMempool(pool, CTxMemPoolEntry( + TryAddToMempool(pool, CTxMemPoolEntry(TxGraph::Ref(), tx, nFee, nTime, nHeight, sequence, spendsCoinbase, sigOpCost, lp)); } diff --git a/depend/bitcoin/src/bench/mempool_stress.cpp b/depend/bitcoin/src/bench/mempool_stress.cpp index fbac25d..8bdeb75 100644 --- a/depend/bitcoin/src/bench/mempool_stress.cpp +++ b/depend/bitcoin/src/bench/mempool_stress.cpp @@ -21,7 +21,7 @@ class CCoinsViewCache; -static void AddTx(const CTransactionRef& tx, CTxMemPool& pool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) +static void AddTx(const CTransactionRef& tx, CTxMemPool& pool, FastRandomContext& det_rand) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) { int64_t nTime = 0; unsigned int nHeight = 1; @@ -29,7 +29,7 @@ static void AddTx(const CTransactionRef& tx, CTxMemPool& pool) EXCLUSIVE_LOCKS_R bool spendsCoinbase = false; unsigned int sigOpCost = 4; LockPoints lp; - AddToMempool(pool, CTxMemPoolEntry(tx, 1000, nTime, nHeight, sequence, spendsCoinbase, sigOpCost, lp)); + TryAddToMempool(pool, CTxMemPoolEntry(TxGraph::Ref(), tx, det_rand.randrange(10000)+1000, nTime, nHeight, sequence, spendsCoinbase, sigOpCost, lp)); } struct Available { @@ -39,15 +39,17 @@ struct Available { Available(CTransactionRef& ref, size_t tx_count) : ref(ref), tx_count(tx_count){} }; -static std::vector CreateOrderedCoins(FastRandomContext& det_rand, int childTxs, int min_ancestors) +// Create a cluster of transactions, randomly. +static std::vector CreateCoinCluster(FastRandomContext& det_rand, int childTxs, int min_ancestors) { std::vector available_coins; std::vector ordered_coins; // Create some base transactions size_t tx_counter = 1; - for (auto x = 0; x < 100; ++x) { + for (auto x = 0; x < 10; ++x) { CMutableTransaction tx = CMutableTransaction(); tx.vin.resize(1); + tx.vin[0].prevout = COutPoint(Txid::FromUint256(GetRandHash()), 1); tx.vin[0].scriptSig = CScript() << CScriptNum(tx_counter); tx.vin[0].scriptWitness.stack.push_back(CScriptNum(x).getvch()); tx.vout.resize(det_rand.randrange(10)+2); @@ -91,26 +93,106 @@ static std::vector CreateOrderedCoins(FastRandomContext& det_ra return ordered_coins; } +static void MemPoolAddTransactions(benchmark::Bench& bench) +{ + FastRandomContext det_rand{true}; + int childTxs = 50; + if (bench.complexityN() > 1) { + childTxs = static_cast(bench.complexityN()); + } + const auto testing_setup = MakeNoLogFileContext(ChainType::MAIN); + CTxMemPool& pool = *testing_setup.get()->m_node.mempool; + + std::vector transactions; + // Create 1000 clusters of 100 transactions each + for (int i=0; i<100; i++) { + auto new_txs = CreateCoinCluster(det_rand, childTxs, /*min_ancestors*/ 1); + transactions.insert(transactions.end(), new_txs.begin(), new_txs.end()); + } + + LOCK2(cs_main, pool.cs); + + bench.run([&]() NO_THREAD_SAFETY_ANALYSIS { + for (auto& tx : transactions) { + AddTx(tx, pool, det_rand); + } + pool.TrimToSize(0, nullptr); + }); +} + static void ComplexMemPool(benchmark::Bench& bench) { FastRandomContext det_rand{true}; - int childTxs = 800; + int childTxs = 50; if (bench.complexityN() > 1) { childTxs = static_cast(bench.complexityN()); } - std::vector ordered_coins = CreateOrderedCoins(det_rand, childTxs, /*min_ancestors=*/1); const auto testing_setup = MakeNoLogFileContext(ChainType::MAIN); CTxMemPool& pool = *testing_setup.get()->m_node.mempool; + + std::vector tx_remove_for_block; + std::vector hashes_remove_for_block; + LOCK2(cs_main, pool.cs); + + for (int i=0; i<1000; i++) { + std::vector transactions = CreateCoinCluster(det_rand, childTxs, /*min_ancestors=*/1); + + // Add all transactions to the mempool. + // Also store the first 10 transactions from each cluster as the + // transactions we'll "mine" in the the benchmark. + int tx_count = 0; + for (auto& tx : transactions) { + if (tx_count < 10) { + tx_remove_for_block.push_back(tx); + ++tx_count; + hashes_remove_for_block.emplace_back(tx->GetHash()); + } + AddTx(tx, pool, det_rand); + } + } + + // Since the benchmark will be run repeatedly, we have to leave the mempool + // in the same state at the end of the function, so we benchmark both + // mining a block and reorging the block's contents back into the mempool. bench.run([&]() NO_THREAD_SAFETY_ANALYSIS { - for (auto& tx : ordered_coins) { - AddTx(tx, pool); + pool.removeForBlock(tx_remove_for_block, /*nBlockHeight*/100); + for (auto& tx: tx_remove_for_block) { + AddTx(tx, pool, det_rand); } - pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4); - pool.TrimToSize(GetVirtualTransactionSize(*ordered_coins.front())); + pool.UpdateTransactionsFromBlock(hashes_remove_for_block); }); } +static void MemPoolAncestorsDescendants(benchmark::Bench& bench) +{ + FastRandomContext det_rand{true}; + int childTxs = 50; + if (bench.complexityN() > 1) { + childTxs = static_cast(bench.complexityN()); + } + const auto testing_setup = MakeNoLogFileContext(ChainType::MAIN); + CTxMemPool& pool = *testing_setup.get()->m_node.mempool; + + LOCK2(cs_main, pool.cs); + + std::vector transactions = CreateCoinCluster(det_rand, childTxs, /*min_ancestors=*/1); + for (auto& tx : transactions) { + AddTx(tx, pool, det_rand); + } + + CTxMemPool::txiter first_tx = *pool.GetIter(transactions[0]->GetHash()); + CTxMemPool::txiter last_tx = *pool.GetIter(transactions.back()->GetHash()); + + bench.run([&]() NO_THREAD_SAFETY_ANALYSIS { + CTxMemPool::setEntries dummy; + ankerl::nanobench::doNotOptimizeAway(dummy); + pool.CalculateDescendants({first_tx}, dummy); + ankerl::nanobench::doNotOptimizeAway(pool.CalculateMemPoolAncestors(*last_tx)); + }); +} + + static void MempoolCheck(benchmark::Bench& bench) { FastRandomContext det_rand{true}; @@ -126,5 +208,7 @@ static void MempoolCheck(benchmark::Bench& bench) }); } +BENCHMARK(MemPoolAncestorsDescendants, benchmark::PriorityLevel::HIGH); +BENCHMARK(MemPoolAddTransactions, benchmark::PriorityLevel::HIGH); BENCHMARK(ComplexMemPool, benchmark::PriorityLevel::HIGH); BENCHMARK(MempoolCheck, benchmark::PriorityLevel::HIGH); diff --git a/depend/bitcoin/src/bench/readwriteblock.cpp b/depend/bitcoin/src/bench/readwriteblock.cpp index 4ca5b8e..a742a1e 100644 --- a/depend/bitcoin/src/bench/readwriteblock.cpp +++ b/depend/bitcoin/src/bench/readwriteblock.cpp @@ -57,11 +57,9 @@ static void ReadRawBlockBench(benchmark::Bench& bench) const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; auto& blockman{testing_setup->m_node.chainman->m_blockman}; const auto pos{blockman.WriteBlock(CreateTestBlock(), 413'567)}; - std::vector block_data; - blockman.ReadRawBlock(block_data, pos); // warmup bench.run([&] { - const auto success{blockman.ReadRawBlock(block_data, pos)}; - assert(success); + const auto res{blockman.ReadRawBlock(pos)}; + assert(res); }); } diff --git a/depend/bitcoin/src/bench/rpc_mempool.cpp b/depend/bitcoin/src/bench/rpc_mempool.cpp index a61c660..069230f 100644 --- a/depend/bitcoin/src/bench/rpc_mempool.cpp +++ b/depend/bitcoin/src/bench/rpc_mempool.cpp @@ -22,7 +22,7 @@ static void AddTx(const CTransactionRef& tx, const CAmount& fee, CTxMemPool& pool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) { LockPoints lp; - AddToMempool(pool, CTxMemPoolEntry(tx, fee, /*time=*/0, /*entry_height=*/1, /*entry_sequence=*/0, /*spends_coinbase=*/false, /*sigops_cost=*/4, lp)); + TryAddToMempool(pool, CTxMemPoolEntry(TxGraph::Ref(), tx, fee, /*time=*/0, /*entry_height=*/1, /*entry_sequence=*/0, /*spends_coinbase=*/false, /*sigops_cost=*/4, lp)); } static void RpcMempool(benchmark::Bench& bench) diff --git a/depend/bitcoin/src/bitcoin-cli.cpp b/depend/bitcoin/src/bitcoin-cli.cpp index 279aa89..d6fcaa8 100644 --- a/depend/bitcoin/src/bitcoin-cli.cpp +++ b/depend/bitcoin/src/bitcoin-cli.cpp @@ -899,8 +899,7 @@ static UniValue CallRPC(BaseRequestHandler* rh, const std::string& strMethod, co throw CConnectionFailed("uri-encode failed"); } } - int r = evhttp_make_request(evcon.get(), req.get(), EVHTTP_REQ_POST, endpoint.c_str()); - req.release(); // ownership moved to evcon in above call + int r = evhttp_make_request(evcon.get(), req.release(), EVHTTP_REQ_POST, endpoint.c_str()); if (r != 0) { throw CConnectionFailed("send http request failed"); } diff --git a/depend/bitcoin/src/blockencodings.cpp b/depend/bitcoin/src/blockencodings.cpp index cf6da55..d48ba40 100644 --- a/depend/bitcoin/src/blockencodings.cpp +++ b/depend/bitcoin/src/blockencodings.cpp @@ -17,11 +17,14 @@ #include -CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, const uint64_t nonce) : - nonce(nonce), - shorttxids(block.vtx.size() - 1), prefilledtxn(1), header(block) { +CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, uint64_t nonce) + : nonce(nonce), + shorttxids(block.vtx.size() - 1), + prefilledtxn(1), + header(block) +{ FillShortTxIDSelector(); - //TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase + // TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase prefilledtxn[0] = {0, block.vtx[0]}; for (size_t i = 1; i < block.vtx.size(); i++) { const CTransaction& tx = *block.vtx[i]; @@ -29,20 +32,21 @@ CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, const } } -void CBlockHeaderAndShortTxIDs::FillShortTxIDSelector() const { +void CBlockHeaderAndShortTxIDs::FillShortTxIDSelector() const +{ DataStream stream{}; stream << header << nonce; CSHA256 hasher; hasher.Write((unsigned char*)&(*stream.begin()), stream.end() - stream.begin()); uint256 shorttxidhash; hasher.Finalize(shorttxidhash.begin()); - shorttxidk0 = shorttxidhash.GetUint64(0); - shorttxidk1 = shorttxidhash.GetUint64(1); + m_hasher.emplace(shorttxidhash.GetUint64(0), shorttxidhash.GetUint64(1)); } -uint64_t CBlockHeaderAndShortTxIDs::GetShortID(const Wtxid& wtxid) const { +uint64_t CBlockHeaderAndShortTxIDs::GetShortID(const Wtxid& wtxid) const +{ static_assert(SHORTTXIDS_LENGTH == 6, "shorttxids calculation assumes 6-byte shorttxids"); - return SipHashUint256(shorttxidk0, shorttxidk1, wtxid.ToUint256()) & 0xffffffffffffL; + return (*Assert(m_hasher))(wtxid.ToUint256()) & 0xffffffffffffL; } /* Reconstructing a compact block is in the hot-path for block relay, diff --git a/depend/bitcoin/src/blockencodings.h b/depend/bitcoin/src/blockencodings.h index 133724b..124df50 100644 --- a/depend/bitcoin/src/blockencodings.h +++ b/depend/bitcoin/src/blockencodings.h @@ -5,6 +5,7 @@ #ifndef BITCOIN_BLOCKENCODINGS_H #define BITCOIN_BLOCKENCODINGS_H +#include #include #include @@ -87,8 +88,7 @@ typedef enum ReadStatus_t } ReadStatus; class CBlockHeaderAndShortTxIDs { -private: - mutable uint64_t shorttxidk0, shorttxidk1; + mutable std::optional m_hasher; uint64_t nonce; void FillShortTxIDSelector() const; @@ -112,7 +112,7 @@ class CBlockHeaderAndShortTxIDs { /** * @param[in] nonce This should be randomly generated, and is used for the siphash secret key */ - CBlockHeaderAndShortTxIDs(const CBlock& block, const uint64_t nonce); + CBlockHeaderAndShortTxIDs(const CBlock& block, uint64_t nonce); uint64_t GetShortID(const Wtxid& wtxid) const; diff --git a/depend/bitcoin/src/coins.cpp b/depend/bitcoin/src/coins.cpp index 554a3eb..1ca5fca 100644 --- a/depend/bitcoin/src/coins.cpp +++ b/depend/bitcoin/src/coins.cpp @@ -185,18 +185,16 @@ void CCoinsViewCache::SetBestBlock(const uint256 &hashBlockIn) { bool CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256 &hashBlockIn) { for (auto it{cursor.Begin()}; it != cursor.End(); it = cursor.NextAndMaybeErase(*it)) { - // Ignore non-dirty entries (optimization). - if (!it->second.IsDirty()) { + if (!it->second.IsDirty()) { // TODO a cursor can only contain dirty entries continue; } - CCoinsMap::iterator itUs = cacheCoins.find(it->first); - if (itUs == cacheCoins.end()) { - // The parent cache does not have an entry, while the child cache does. - // We can ignore it if it's both spent and FRESH in the child - if (!(it->second.IsFresh() && it->second.coin.IsSpent())) { - // Create the coin in the parent cache, move the data up - // and mark it as dirty. - itUs = cacheCoins.try_emplace(it->first).first; + auto [itUs, inserted]{cacheCoins.try_emplace(it->first)}; + if (inserted) { + if (it->second.IsFresh() && it->second.coin.IsSpent()) { + cacheCoins.erase(itUs); // TODO fresh coins should have been removed at spend + } else { + // The parent cache does not have an entry, while the child cache does. + // Move the data up and mark it as dirty. CCoinsCacheEntry& entry{itUs->second}; assert(entry.coin.DynamicMemoryUsage() == 0); if (cursor.WillErase(*it)) { @@ -251,12 +249,14 @@ bool CCoinsViewCache::BatchWrite(CoinsViewCacheCursor& cursor, const uint256 &ha return true; } -bool CCoinsViewCache::Flush() { +bool CCoinsViewCache::Flush(bool will_reuse_cache) { auto cursor{CoinsViewCacheCursor(m_sentinel, cacheCoins, /*will_erase=*/true)}; bool fOk = base->BatchWrite(cursor, hashBlock); if (fOk) { cacheCoins.clear(); - ReallocateCache(); + if (will_reuse_cache) { + ReallocateCache(); + } cachedCoinsUsage = 0; } return fOk; diff --git a/depend/bitcoin/src/coins.h b/depend/bitcoin/src/coins.h index 2fcc764..8d07f7f 100644 --- a/depend/bitcoin/src/coins.h +++ b/depend/bitcoin/src/coins.h @@ -439,9 +439,11 @@ class CCoinsViewCache : public CCoinsViewBacked * Push the modifications applied to this cache to its base and wipe local state. * Failure to call this method or Sync() before destruction will cause the changes * to be forgotten. + * If will_reuse_cache is false, the cache will retain the same memory footprint + * after flushing and should be destroyed to deallocate. * If false is returned, the state of this cache (and its backing view) will be undefined. */ - bool Flush(); + bool Flush(bool will_reuse_cache = true); /** * Push the modifications applied to this cache to its base while retaining diff --git a/depend/bitcoin/src/common/args.cpp b/depend/bitcoin/src/common/args.cpp index 50b9902..88a28ed 100644 --- a/depend/bitcoin/src/common/args.cpp +++ b/depend/bitcoin/src/common/args.cpp @@ -113,7 +113,7 @@ std::optional InterpretValue(const KeyInfo& key, const st } // Double negatives like -nofoo=0 are supported (but discouraged) if (value && !InterpretBool(*value)) { - LogPrintf("Warning: parsed potentially confusing double-negative -%s=%s\n", key.name, *value); + LogWarning("Parsed potentially confusing double-negative -%s=%s", key.name, *value); return true; } return false; @@ -398,7 +398,7 @@ static void SaveErrors(const std::vector errors, std::vectoremplace_back(error); } else { - LogPrintf("%s\n", error); + LogWarning("%s", error); } } } @@ -420,7 +420,7 @@ bool ArgsManager::ReadSettingsFile(std::vector* errors) for (const auto& setting : m_settings.rw_settings) { KeyInfo key = InterpretKey(setting.first); // Split setting key into section and argname if (!GetArgFlags('-' + key.name)) { - LogPrintf("Ignoring unknown rw_settings value %s\n", setting.first); + LogWarning("Ignoring unknown rw_settings value %s", setting.first); } } return true; @@ -860,7 +860,7 @@ void ArgsManager::logArgsPrefix( std::optional flags = GetArgFlags('-' + arg.first); if (flags) { std::string value_str = (*flags & SENSITIVE) ? "****" : value.write(); - LogPrintf("%s %s%s=%s\n", prefix, section_str, arg.first, value_str); + LogInfo("%s %s%s=%s\n", prefix, section_str, arg.first, value_str); } } } @@ -873,7 +873,7 @@ void ArgsManager::LogArgs() const logArgsPrefix("Config file arg:", section.first, section.second); } for (const auto& setting : m_settings.rw_settings) { - LogPrintf("Setting file arg: %s = %s\n", setting.first, setting.second.write()); + LogInfo("Setting file arg: %s = %s\n", setting.first, setting.second.write()); } logArgsPrefix("Command-line arg:", "", m_settings.command_line_options); } diff --git a/depend/bitcoin/src/common/config.cpp b/depend/bitcoin/src/common/config.cpp index 7216022..79c0424 100644 --- a/depend/bitcoin/src/common/config.cpp +++ b/depend/bitcoin/src/common/config.cpp @@ -84,7 +84,7 @@ bool IsConfSupported(KeyInfo& key, std::string& error) { if (key.name == "reindex") { // reindex can be set in a config file but it is strongly discouraged as this will cause the node to reindex on // every restart. Allow the config but throw a warning - LogPrintf("Warning: reindex=1 is set in the configuration file, which will significantly slow down startup. Consider removing or commenting out this option for better performance, unless there is currently a condition which makes rebuilding the indexes necessary\n"); + LogWarning("reindex=1 is set in the configuration file, which will significantly slow down startup. Consider removing or commenting out this option for better performance, unless there is currently a condition which makes rebuilding the indexes necessary"); return true; } return true; @@ -109,7 +109,7 @@ bool ArgsManager::ReadConfigStream(std::istream& stream, const std::string& file m_settings.ro_config[key.section][key.name].push_back(*value); } else { if (ignore_invalid_keys) { - LogPrintf("Ignoring unknown configuration value %s\n", option.first); + LogWarning("Ignoring unknown configuration value %s", option.first); } else { error = strprintf("Invalid configuration value %s", option.first); return false; @@ -192,7 +192,7 @@ bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys) if (!ReadConfigStream(conf_file_stream, conf_file_name, error, ignore_invalid_keys)) { return false; } - LogPrintf("Included configuration file %s\n", conf_file_name); + LogInfo("Included configuration file %s\n", conf_file_name); } else { error = "Failed to include configuration file " + conf_file_name; return false; diff --git a/depend/bitcoin/src/common/netif.cpp b/depend/bitcoin/src/common/netif.cpp index 378f0f5..ed891f1 100644 --- a/depend/bitcoin/src/common/netif.cpp +++ b/depend/bitcoin/src/common/netif.cpp @@ -73,7 +73,7 @@ std::optional QueryDefaultGatewayImpl(sa_family_t family) // Create a netlink socket. auto sock{CreateSock(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE)}; if (!sock) { - LogPrintLevel(BCLog::NET, BCLog::Level::Error, "socket(AF_NETLINK): %s\n", NetworkErrorString(errno)); + LogError("socket(AF_NETLINK): %s\n", NetworkErrorString(errno)); return std::nullopt; } @@ -110,7 +110,7 @@ std::optional QueryDefaultGatewayImpl(sa_family_t family) request.dst_hdr.nla_len = sizeof(nlattr) + dst_data_len; if (sock->Send(&request, request.hdr.nlmsg_len, 0) != static_cast(request.hdr.nlmsg_len)) { - LogPrintLevel(BCLog::NET, BCLog::Level::Error, "send() to netlink socket: %s\n", NetworkErrorString(errno)); + LogError("send() to netlink socket: %s\n", NetworkErrorString(errno)); return std::nullopt; } @@ -124,13 +124,13 @@ std::optional QueryDefaultGatewayImpl(sa_family_t family) recv_result = sock->Recv(response, sizeof(response), 0); } while (recv_result < 0 && (errno == EINTR || errno == EAGAIN)); if (recv_result < 0) { - LogPrintLevel(BCLog::NET, BCLog::Level::Error, "recv() from netlink socket: %s\n", NetworkErrorString(errno)); + LogError("recv() from netlink socket: %s\n", NetworkErrorString(errno)); return std::nullopt; } total_bytes_read += recv_result; if (total_bytes_read > NETLINK_MAX_RESPONSE_SIZE) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "Netlink response exceeded size limit (%zu bytes, family=%d)\n", NETLINK_MAX_RESPONSE_SIZE, family); + LogWarning("Netlink response exceeded size limit (%zu bytes, family=%d)\n", NETLINK_MAX_RESPONSE_SIZE, family); return std::nullopt; } @@ -200,7 +200,7 @@ std::optional QueryDefaultGatewayImpl(sa_family_t family) destination_address.si_family = family; status = GetBestInterfaceEx((sockaddr*)&destination_address, &best_if_idx); if (status != NO_ERROR) { - LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Could not get best interface for default route: %s\n", NetworkErrorString(status)); + LogError("Could not get best interface for default route: %s\n", NetworkErrorString(status)); return std::nullopt; } @@ -208,7 +208,7 @@ std::optional QueryDefaultGatewayImpl(sa_family_t family) // Leave interface_luid at all-zeros to use interface index instead. status = GetBestRoute2(&interface_luid, best_if_idx, nullptr, &destination_address, 0, &best_route, &best_source_address); if (status != NO_ERROR) { - LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Could not get best route for default route for interface index %d: %s\n", + LogError("Could not get best route for default route for interface index %d: %s\n", best_if_idx, NetworkErrorString(status)); return std::nullopt; } @@ -235,12 +235,12 @@ std::optional QueryDefaultGatewayImpl(sa_family_t family) // The size of the available data is determined by calling sysctl() with oldp=nullptr. See sysctl(3). size_t l = 0; if (sysctl(/*name=*/mib, /*namelen=*/sizeof(mib) / sizeof(int), /*oldp=*/nullptr, /*oldlenp=*/&l, /*newp=*/nullptr, /*newlen=*/0) < 0) { - LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Could not get sysctl length of routing table: %s\n", SysErrorString(errno)); + LogError("Could not get sysctl length of routing table: %s\n", SysErrorString(errno)); return std::nullopt; } std::vector buf(l); if (sysctl(/*name=*/mib, /*namelen=*/sizeof(mib) / sizeof(int), /*oldp=*/buf.data(), /*oldlenp=*/&l, /*newp=*/nullptr, /*newlen=*/0) < 0) { - LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Could not get sysctl data of routing table: %s\n", SysErrorString(errno)); + LogError("Could not get sysctl data of routing table: %s\n", SysErrorString(errno)); return std::nullopt; } // Iterate over messages (each message is a routing table entry). @@ -340,7 +340,7 @@ std::vector GetLocalAddresses() if (status != NO_ERROR) { // This includes ERROR_NO_DATA if there are no addresses and thus there's not even one PIP_ADAPTER_ADDRESSES // record in the returned structure. - LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Could not get local adapter addresses: %s\n", NetworkErrorString(status)); + LogError("Could not get local adapter addresses: %s\n", NetworkErrorString(status)); return addresses; } diff --git a/depend/bitcoin/src/common/pcp.cpp b/depend/bitcoin/src/common/pcp.cpp index 6112dbf..4864136 100644 --- a/depend/bitcoin/src/common/pcp.cpp +++ b/depend/bitcoin/src/common/pcp.cpp @@ -228,11 +228,11 @@ std::optional> PCPSendRecv(Sock &sock, const std::string &p int recvsz = 0; for (int ntry = 0; !got_response && ntry < num_tries; ++ntry) { if (ntry > 0) { - LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Retrying (%d)\n", protocol, ntry); + LogDebug(BCLog::NET, "%s: Retrying (%d)\n", protocol, ntry); } // Dispatch packet to gateway. if (sock.Send(request.data(), request.size(), 0) != static_cast(request.size())) { - LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Could not send request: %s\n", protocol, NetworkErrorString(WSAGetLastError())); + LogDebug(BCLog::NET, "%s: Could not send request: %s\n", protocol, NetworkErrorString(WSAGetLastError())); return std::nullopt; // Network-level error, probably no use retrying. } @@ -243,21 +243,21 @@ std::optional> PCPSendRecv(Sock &sock, const std::string &p if (interrupt) return std::nullopt; Sock::Event occurred = 0; if (!sock.Wait(deadline - cur_time, Sock::RECV, &occurred)) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "%s: Could not wait on socket: %s\n", protocol, NetworkErrorString(WSAGetLastError())); + LogWarning("%s: Could not wait on socket: %s\n", protocol, NetworkErrorString(WSAGetLastError())); return std::nullopt; // Network-level error, probably no use retrying. } if (!occurred) { - LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Timeout\n", protocol); + LogDebug(BCLog::NET, "%s: Timeout\n", protocol); break; // Retry. } // Receive response. recvsz = sock.Recv(response, sizeof(response), MSG_DONTWAIT); if (recvsz < 0) { - LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Could not receive response: %s\n", protocol, NetworkErrorString(WSAGetLastError())); + LogDebug(BCLog::NET, "%s: Could not receive response: %s\n", protocol, NetworkErrorString(WSAGetLastError())); return std::nullopt; // Network-level error, probably no use retrying. } - LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Received response of %d bytes: %s\n", protocol, recvsz, HexStr(std::span(response, recvsz))); + LogDebug(BCLog::NET, "%s: Received response of %d bytes: %s\n", protocol, recvsz, HexStr(std::span(response, recvsz))); if (check_packet(std::span(response, recvsz))) { got_response = true; // Got expected response, break from receive loop as well as from retry loop. @@ -266,7 +266,7 @@ std::optional> PCPSendRecv(Sock &sock, const std::string &p } } if (!got_response) { - LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Giving up after %d tries\n", protocol, num_tries); + LogDebug(BCLog::NET, "%s: Giving up after %d tries\n", protocol, num_tries); return std::nullopt; } return std::vector(response, response + recvsz); @@ -279,7 +279,7 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g struct sockaddr_storage dest_addr; socklen_t dest_addrlen = sizeof(struct sockaddr_storage); - LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "natpmp: Requesting port mapping port %d from gateway %s\n", port, gateway.ToStringAddr()); + LogDebug(BCLog::NET, "natpmp: Requesting port mapping port %d from gateway %s\n", port, gateway.ToStringAddr()); // Validate gateway, make sure it's IPv4. NAT-PMP does not support IPv6. if (!CService(gateway, PCP_SERVER_PORT).GetSockAddr((struct sockaddr*)&dest_addr, &dest_addrlen)) return MappingError::NETWORK_ERROR; @@ -288,13 +288,13 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g // Create IPv4 UDP socket auto sock{CreateSock(AF_INET, SOCK_DGRAM, IPPROTO_UDP)}; if (!sock) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Could not create UDP socket: %s\n", NetworkErrorString(WSAGetLastError())); + LogWarning("natpmp: Could not create UDP socket: %s\n", NetworkErrorString(WSAGetLastError())); return MappingError::NETWORK_ERROR; } // Associate UDP socket to gateway. if (sock->Connect((struct sockaddr*)&dest_addr, dest_addrlen) != 0) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Could not connect to gateway: %s\n", NetworkErrorString(WSAGetLastError())); + LogWarning("natpmp: Could not connect to gateway: %s\n", NetworkErrorString(WSAGetLastError())); return MappingError::NETWORK_ERROR; } @@ -302,7 +302,7 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g struct sockaddr_in internal; socklen_t internal_addrlen = sizeof(struct sockaddr_in); if (sock->GetSockName((struct sockaddr*)&internal, &internal_addrlen) != 0) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Could not get sock name: %s\n", NetworkErrorString(WSAGetLastError())); + LogWarning("natpmp: Could not get sock name: %s\n", NetworkErrorString(WSAGetLastError())); return MappingError::NETWORK_ERROR; } @@ -314,11 +314,11 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g auto recv_res = PCPSendRecv(*sock, "natpmp", request, num_tries, timeout_per_try, [&](const std::span response) -> bool { if (response.size() < NATPMP_GETEXTERNAL_RESPONSE_SIZE) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Response too small\n"); + LogWarning("natpmp: Response too small\n"); return false; // Wasn't response to what we expected, try receiving next packet. } if (response[NATPMP_HDR_VERSION_OFS] != NATPMP_VERSION || response[NATPMP_HDR_OP_OFS] != (NATPMP_RESPONSE | NATPMP_OP_GETEXTERNAL)) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Response to wrong command\n"); + LogWarning("natpmp: Response to wrong command\n"); return false; // Wasn't response to what we expected, try receiving next packet. } return true; @@ -332,7 +332,7 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g Assume(response.size() >= NATPMP_GETEXTERNAL_RESPONSE_SIZE); uint16_t result_code = ReadBE16(response.data() + NATPMP_RESPONSE_HDR_RESULT_OFS); if (result_code != NATPMP_RESULT_SUCCESS) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Getting external address failed with result %s\n", NATPMPResultString(result_code)); + LogWarning("natpmp: Getting external address failed with result %s\n", NATPMPResultString(result_code)); return MappingError::PROTOCOL_ERROR; } @@ -352,16 +352,16 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g recv_res = PCPSendRecv(*sock, "natpmp", request, num_tries, timeout_per_try, [&](const std::span response) -> bool { if (response.size() < NATPMP_MAP_RESPONSE_SIZE) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Response too small\n"); + LogWarning("natpmp: Response too small\n"); return false; // Wasn't response to what we expected, try receiving next packet. } if (response[0] != NATPMP_VERSION || response[1] != (NATPMP_RESPONSE | NATPMP_OP_MAP_TCP)) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Response to wrong command\n"); + LogWarning("natpmp: Response to wrong command\n"); return false; // Wasn't response to what we expected, try receiving next packet. } uint16_t internal_port = ReadBE16(response.data() + NATPMP_MAP_RESPONSE_INTERNAL_PORT_OFS); if (internal_port != port) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Response port doesn't match request\n"); + LogWarning("natpmp: Response port doesn't match request\n"); return false; // Wasn't response to what we expected, try receiving next packet. } return true; @@ -374,7 +374,7 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g Assume(response.size() >= NATPMP_MAP_RESPONSE_SIZE); uint16_t result_code = ReadBE16(response.data() + NATPMP_RESPONSE_HDR_RESULT_OFS); if (result_code != NATPMP_RESULT_SUCCESS) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); + LogWarning("natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); if (result_code == NATPMP_RESULT_NO_RESOURCES) { return MappingError::NO_RESOURCES; } @@ -394,7 +394,7 @@ std::variant PCPRequestPortMap(const PCPMappingNonc struct sockaddr_storage dest_addr, bind_addr; socklen_t dest_addrlen = sizeof(struct sockaddr_storage), bind_addrlen = sizeof(struct sockaddr_storage); - LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "pcp: Requesting port mapping for addr %s port %d from gateway %s\n", bind.ToStringAddr(), port, gateway.ToStringAddr()); + LogDebug(BCLog::NET, "pcp: Requesting port mapping for addr %s port %d from gateway %s\n", bind.ToStringAddr(), port, gateway.ToStringAddr()); // Validate addresses, make sure they're the same network family. if (!CService(gateway, PCP_SERVER_PORT).GetSockAddr((struct sockaddr*)&dest_addr, &dest_addrlen)) return MappingError::NETWORK_ERROR; @@ -404,20 +404,20 @@ std::variant PCPRequestPortMap(const PCPMappingNonc // Create UDP socket (IPv4 or IPv6 based on provided gateway). auto sock{CreateSock(dest_addr.ss_family, SOCK_DGRAM, IPPROTO_UDP)}; if (!sock) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Could not create UDP socket: %s\n", NetworkErrorString(WSAGetLastError())); + LogWarning("pcp: Could not create UDP socket: %s\n", NetworkErrorString(WSAGetLastError())); return MappingError::NETWORK_ERROR; } // Make sure that we send from requested destination address, anything else will be // rejected by a security-conscious router. if (sock->Bind((struct sockaddr*)&bind_addr, bind_addrlen) != 0) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Could not bind to address: %s\n", NetworkErrorString(WSAGetLastError())); + LogWarning("pcp: Could not bind to address: %s\n", NetworkErrorString(WSAGetLastError())); return MappingError::NETWORK_ERROR; } // Associate UDP socket to gateway. if (sock->Connect((struct sockaddr*)&dest_addr, dest_addrlen) != 0) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Could not connect to gateway: %s\n", NetworkErrorString(WSAGetLastError())); + LogWarning("pcp: Could not connect to gateway: %s\n", NetworkErrorString(WSAGetLastError())); return MappingError::NETWORK_ERROR; } @@ -427,12 +427,12 @@ std::variant PCPRequestPortMap(const PCPMappingNonc struct sockaddr_storage internal_addr; socklen_t internal_addrlen = sizeof(struct sockaddr_storage); if (sock->GetSockName((struct sockaddr*)&internal_addr, &internal_addrlen) != 0) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Could not get sock name: %s\n", NetworkErrorString(WSAGetLastError())); + LogWarning("pcp: Could not get sock name: %s\n", NetworkErrorString(WSAGetLastError())); return MappingError::NETWORK_ERROR; } CService internal; if (!internal.SetSockAddr((struct sockaddr*)&internal_addr, internal_addrlen)) return MappingError::NETWORK_ERROR; - LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "pcp: Internal address after connect: %s\n", internal.ToStringAddr()); + LogDebug(BCLog::NET, "pcp: Internal address after connect: %s\n", internal.ToStringAddr()); // Build request packet. Make sure the packet is zeroed so that reserved fields are zero // as required by the spec (and not potentially leak data). @@ -469,23 +469,23 @@ std::variant PCPRequestPortMap(const PCPMappingNonc return true; // Let it through to caller. } if (response.size() < (PCP_HDR_SIZE + PCP_MAP_SIZE)) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Response too small\n"); + LogWarning("pcp: Response too small\n"); return false; // Wasn't response to what we expected, try receiving next packet. } if (response[PCP_HDR_VERSION_OFS] != PCP_VERSION || response[PCP_HDR_OP_OFS] != (PCP_RESPONSE | PCP_OP_MAP)) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Response to wrong command\n"); + LogWarning("pcp: Response to wrong command\n"); return false; // Wasn't response to what we expected, try receiving next packet. } // Handle MAP opcode response. See RFC6887 Figure 10. // Check that returned mapping nonce matches our request. if (!std::ranges::equal(response.subspan(PCP_HDR_SIZE + PCP_MAP_NONCE_OFS, PCP_MAP_NONCE_SIZE), nonce)) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Mapping nonce mismatch\n"); + LogWarning("pcp: Mapping nonce mismatch\n"); return false; // Wasn't response to what we expected, try receiving next packet. } uint8_t protocol = response[PCP_HDR_SIZE + 12]; uint16_t internal_port = ReadBE16(response.data() + PCP_HDR_SIZE + 16); if (protocol != PCP_PROTOCOL_TCP || internal_port != port) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Response protocol or port doesn't match request\n"); + LogWarning("pcp: Response protocol or port doesn't match request\n"); return false; // Wasn't response to what we expected, try receiving next packet. } return true; @@ -508,7 +508,7 @@ std::variant PCPRequestPortMap(const PCPMappingNonc uint16_t external_port = ReadBE16(response.data() + PCP_HDR_SIZE + PCP_MAP_EXTERNAL_PORT_OFS); CNetAddr external_addr{PCPUnwrapAddress(response.subspan(PCP_HDR_SIZE + PCP_MAP_EXTERNAL_IP_OFS, ADDR_IPV6_SIZE))}; if (result_code != PCP_RESULT_SUCCESS) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Mapping failed with result %s\n", PCPResultString(result_code)); + LogWarning("pcp: Mapping failed with result %s\n", PCPResultString(result_code)); if (result_code == PCP_RESULT_NO_RESOURCES) { return MappingError::NO_RESOURCES; } diff --git a/depend/bitcoin/src/common/system.cpp b/depend/bitcoin/src/common/system.cpp index 3833763..72e9de1 100644 --- a/depend/bitcoin/src/common/system.cpp +++ b/depend/bitcoin/src/common/system.cpp @@ -58,8 +58,9 @@ void runCommand(const std::string& strCommand) #else int nErr = ::_wsystem(std::wstring_convert,wchar_t>().from_bytes(strCommand).c_str()); #endif - if (nErr) - LogPrintf("runCommand error: system(%s) returned %d\n", strCommand, nErr); + if (nErr) { + LogWarning("runCommand error: system(%s) returned %d", strCommand, nErr); + } } #endif diff --git a/depend/bitcoin/src/consensus/merkle.cpp b/depend/bitcoin/src/consensus/merkle.cpp index e274ed8..703a824 100644 --- a/depend/bitcoin/src/consensus/merkle.cpp +++ b/depend/bitcoin/src/consensus/merkle.cpp @@ -73,7 +73,7 @@ uint256 BlockMerkleRoot(const CBlock& block, bool* mutated) return ComputeMerkleRoot(std::move(leaves), mutated); } -uint256 BlockWitnessMerkleRoot(const CBlock& block, bool* mutated) +uint256 BlockWitnessMerkleRoot(const CBlock& block) { std::vector leaves; leaves.resize(block.vtx.size()); @@ -81,20 +81,17 @@ uint256 BlockWitnessMerkleRoot(const CBlock& block, bool* mutated) for (size_t s = 1; s < block.vtx.size(); s++) { leaves[s] = block.vtx[s]->GetWitnessHash().ToUint256(); } - return ComputeMerkleRoot(std::move(leaves), mutated); + return ComputeMerkleRoot(std::move(leaves)); } -/* This implements a constant-space merkle root/path calculator, limited to 2^32 leaves. */ -static void MerkleComputation(const std::vector& leaves, uint256* proot, bool* pmutated, uint32_t leaf_pos, std::vector* path) +/* This implements a constant-space merkle path calculator, limited to 2^32 leaves. */ +static void MerkleComputation(const std::vector& leaves, uint32_t leaf_pos, std::vector& path) { - if (path) path->clear(); + path.clear(); Assume(leaves.size() <= UINT32_MAX); if (leaves.size() == 0) { - if (pmutated) *pmutated = false; - if (proot) *proot = uint256(); return; } - bool mutated = false; // count is the number of leaves processed so far. uint32_t count = 0; // inner is an array of eagerly computed subtree hashes, indexed by tree @@ -115,15 +112,12 @@ static void MerkleComputation(const std::vector& leaves, uint256* proot // corresponds to an inner value that existed before processing the // current leaf, and each needs a hash to combine it. for (level = 0; !(count & ((uint32_t{1}) << level)); level++) { - if (path) { - if (matchh) { - path->push_back(inner[level]); - } else if (matchlevel == level) { - path->push_back(h); - matchh = true; - } + if (matchh) { + path.push_back(inner[level]); + } else if (matchlevel == level) { + path.push_back(h); + matchh = true; } - mutated |= (inner[level] == h); h = Hash(inner[level], h); } // Store the resulting hash at inner position level. @@ -147,8 +141,8 @@ static void MerkleComputation(const std::vector& leaves, uint256* proot // If we reach this point, h is an inner value that is not the top. // We combine it with itself (Bitcoin's special rule for odd levels in // the tree) to produce a higher level one. - if (path && matchh) { - path->push_back(h); + if (matchh) { + path.push_back(h); } h = Hash(h, h); // Increment count to the value it would have if two entries at this @@ -157,26 +151,21 @@ static void MerkleComputation(const std::vector& leaves, uint256* proot level++; // And propagate the result upwards accordingly. while (!(count & ((uint32_t{1}) << level))) { - if (path) { - if (matchh) { - path->push_back(inner[level]); - } else if (matchlevel == level) { - path->push_back(h); - matchh = true; - } + if (matchh) { + path.push_back(inner[level]); + } else if (matchlevel == level) { + path.push_back(h); + matchh = true; } h = Hash(inner[level], h); level++; } } - // Return result. - if (pmutated) *pmutated = mutated; - if (proot) *proot = h; } static std::vector ComputeMerklePath(const std::vector& leaves, uint32_t position) { std::vector ret; - MerkleComputation(leaves, nullptr, nullptr, position, &ret); + MerkleComputation(leaves, position, ret); return ret; } diff --git a/depend/bitcoin/src/consensus/merkle.h b/depend/bitcoin/src/consensus/merkle.h index c722cbe..29282d2 100644 --- a/depend/bitcoin/src/consensus/merkle.h +++ b/depend/bitcoin/src/consensus/merkle.h @@ -20,9 +20,8 @@ uint256 BlockMerkleRoot(const CBlock& block, bool* mutated = nullptr); /* * Compute the Merkle root of the witness transactions in a block. - * *mutated is set to true if a duplicated subtree was found. */ -uint256 BlockWitnessMerkleRoot(const CBlock& block, bool* mutated = nullptr); +uint256 BlockWitnessMerkleRoot(const CBlock& block); /** * Compute merkle path to the specified transaction diff --git a/depend/bitcoin/src/crypto/siphash.cpp b/depend/bitcoin/src/crypto/siphash.cpp index 1a9eb77..89dbad6 100644 --- a/depend/bitcoin/src/crypto/siphash.cpp +++ b/depend/bitcoin/src/crypto/siphash.cpp @@ -19,41 +19,33 @@ v2 = std::rotl(v2, 32); \ } while (0) -CSipHasher::CSipHasher(uint64_t k0, uint64_t k1) -{ - v[0] = 0x736f6d6570736575ULL ^ k0; - v[1] = 0x646f72616e646f6dULL ^ k1; - v[2] = 0x6c7967656e657261ULL ^ k0; - v[3] = 0x7465646279746573ULL ^ k1; - count = 0; - tmp = 0; -} +CSipHasher::CSipHasher(uint64_t k0, uint64_t k1) : m_state{k0, k1} {} CSipHasher& CSipHasher::Write(uint64_t data) { - uint64_t v0 = v[0], v1 = v[1], v2 = v[2], v3 = v[3]; + uint64_t v0 = m_state.v[0], v1 = m_state.v[1], v2 = m_state.v[2], v3 = m_state.v[3]; - assert(count % 8 == 0); + assert(m_count % 8 == 0); v3 ^= data; SIPROUND; SIPROUND; v0 ^= data; - v[0] = v0; - v[1] = v1; - v[2] = v2; - v[3] = v3; + m_state.v[0] = v0; + m_state.v[1] = v1; + m_state.v[2] = v2; + m_state.v[3] = v3; - count += 8; + m_count += 8; return *this; } CSipHasher& CSipHasher::Write(std::span data) { - uint64_t v0 = v[0], v1 = v[1], v2 = v[2], v3 = v[3]; - uint64_t t = tmp; - uint8_t c = count; + uint64_t v0 = m_state.v[0], v1 = m_state.v[1], v2 = m_state.v[2], v3 = m_state.v[3]; + uint64_t t = m_tmp; + uint8_t c = m_count; while (data.size() > 0) { t |= uint64_t{data.front()} << (8 * (c % 8)); @@ -68,21 +60,21 @@ CSipHasher& CSipHasher::Write(std::span data) data = data.subspan(1); } - v[0] = v0; - v[1] = v1; - v[2] = v2; - v[3] = v3; - count = c; - tmp = t; + m_state.v[0] = v0; + m_state.v[1] = v1; + m_state.v[2] = v2; + m_state.v[3] = v3; + m_count = c; + m_tmp = t; return *this; } uint64_t CSipHasher::Finalize() const { - uint64_t v0 = v[0], v1 = v[1], v2 = v[2], v3 = v[3]; + uint64_t v0 = m_state.v[0], v1 = m_state.v[1], v2 = m_state.v[2], v3 = m_state.v[3]; - uint64_t t = tmp | (((uint64_t)count) << 56); + uint64_t t = m_tmp | (((uint64_t)m_count) << 56); v3 ^= t; SIPROUND; @@ -96,15 +88,11 @@ uint64_t CSipHasher::Finalize() const return v0 ^ v1 ^ v2 ^ v3; } -uint64_t SipHashUint256(uint64_t k0, uint64_t k1, const uint256& val) +uint64_t PresaltedSipHasher::operator()(const uint256& val) const noexcept { - /* Specialized implementation for efficiency */ + uint64_t v0 = m_state.v[0], v1 = m_state.v[1], v2 = m_state.v[2], v3 = m_state.v[3]; uint64_t d = val.GetUint64(0); - - uint64_t v0 = 0x736f6d6570736575ULL ^ k0; - uint64_t v1 = 0x646f72616e646f6dULL ^ k1; - uint64_t v2 = 0x6c7967656e657261ULL ^ k0; - uint64_t v3 = 0x7465646279746573ULL ^ k1 ^ d; + v3 ^= d; SIPROUND; SIPROUND; @@ -136,16 +124,12 @@ uint64_t SipHashUint256(uint64_t k0, uint64_t k1, const uint256& val) return v0 ^ v1 ^ v2 ^ v3; } -uint64_t SipHashUint256Extra(uint64_t k0, uint64_t k1, const uint256& val, uint32_t extra) +/** Specialized implementation for efficiency */ +uint64_t PresaltedSipHasher::operator()(const uint256& val, uint32_t extra) const noexcept { - /* Specialized implementation for efficiency */ + uint64_t v0 = m_state.v[0], v1 = m_state.v[1], v2 = m_state.v[2], v3 = m_state.v[3]; uint64_t d = val.GetUint64(0); - - uint64_t v0 = 0x736f6d6570736575ULL ^ k0; - uint64_t v1 = 0x646f72616e646f6dULL ^ k1; - uint64_t v2 = 0x6c7967656e657261ULL ^ k0; - uint64_t v3 = 0x7465646279746573ULL ^ k1 ^ d; - + v3 ^= d; SIPROUND; SIPROUND; v0 ^= d; diff --git a/depend/bitcoin/src/crypto/siphash.h b/depend/bitcoin/src/crypto/siphash.h index 8d41a08..2f28473 100644 --- a/depend/bitcoin/src/crypto/siphash.h +++ b/depend/bitcoin/src/crypto/siphash.h @@ -5,23 +5,34 @@ #ifndef BITCOIN_CRYPTO_SIPHASH_H #define BITCOIN_CRYPTO_SIPHASH_H +#include #include #include class uint256; -/** SipHash-2-4 */ +/** Shared SipHash internal state v[0..3], initialized from (k0, k1). */ +class SipHashState +{ + static constexpr uint64_t C0{0x736f6d6570736575ULL}, C1{0x646f72616e646f6dULL}, C2{0x6c7967656e657261ULL}, C3{0x7465646279746573ULL}; + +public: + explicit SipHashState(uint64_t k0, uint64_t k1) noexcept : v{C0 ^ k0, C1 ^ k1, C2 ^ k0, C3 ^ k1} {} + + std::array v{}; +}; + +/** General SipHash-2-4 implementation. */ class CSipHasher { -private: - uint64_t v[4]; - uint64_t tmp; - uint8_t count; // Only the low 8 bits of the input size matter. + SipHashState m_state; + uint64_t m_tmp{0}; + uint8_t m_count{0}; //!< Only the low 8 bits of the input size matter. public: - /** Construct a SipHash calculator initialized with 128-bit key (k0, k1) */ + /** Construct a SipHash calculator initialized with 128-bit key (k0, k1). */ CSipHasher(uint64_t k0, uint64_t k1); - /** Hash a 64-bit integer worth of data + /** Hash a 64-bit integer worth of data. * It is treated as if this was the little-endian interpretation of 8 bytes. * This function can only be used when a multiple of 8 bytes have been written so far. */ @@ -32,17 +43,30 @@ class CSipHasher uint64_t Finalize() const; }; -/** Optimized SipHash-2-4 implementation for uint256. +/** + * Optimized SipHash-2-4 implementation for uint256. * - * It is identical to: - * SipHasher(k0, k1) - * .Write(val.GetUint64(0)) - * .Write(val.GetUint64(1)) - * .Write(val.GetUint64(2)) - * .Write(val.GetUint64(3)) - * .Finalize() + * This class caches the initial SipHash v[0..3] state derived from (k0, k1) + * and implements a specialized hashing path for uint256 values, with or + * without an extra 32-bit word. The internal state is immutable, so + * PresaltedSipHasher instances can be reused for multiple hashes with the + * same key. */ -uint64_t SipHashUint256(uint64_t k0, uint64_t k1, const uint256& val); -uint64_t SipHashUint256Extra(uint64_t k0, uint64_t k1, const uint256& val, uint32_t extra); +class PresaltedSipHasher +{ + const SipHashState m_state; + +public: + explicit PresaltedSipHasher(uint64_t k0, uint64_t k1) noexcept : m_state{k0, k1} {} + + /** Equivalent to CSipHasher(k0, k1).Write(val).Finalize(). */ + uint64_t operator()(const uint256& val) const noexcept; + + /** + * Equivalent to CSipHasher(k0, k1).Write(val).Write(extra).Finalize(), + * with `extra` encoded as 4 little-endian bytes. + */ + uint64_t operator()(const uint256& val, uint32_t extra) const noexcept; +}; #endif // BITCOIN_CRYPTO_SIPHASH_H diff --git a/depend/bitcoin/src/dbwrapper.cpp b/depend/bitcoin/src/dbwrapper.cpp index cb9abee..608f61f 100644 --- a/depend/bitcoin/src/dbwrapper.cpp +++ b/depend/bitcoin/src/dbwrapper.cpp @@ -47,8 +47,8 @@ static void HandleError(const leveldb::Status& status) if (status.ok()) return; const std::string errmsg = "Fatal LevelDB error: " + status.ToString(); - LogPrintf("%s\n", errmsg); - LogPrintf("You can use -debug=leveldb to get more complete diagnostic messages\n"); + LogError("%s", errmsg); + LogInfo("You can use -debug=leveldb to get more complete diagnostic messages"); throw dbwrapper_error(errmsg); } @@ -309,7 +309,7 @@ std::optional CDBWrapper::ReadImpl(std::span key) if (!status.ok()) { if (status.IsNotFound()) return std::nullopt; - LogPrintf("LevelDB read failure: %s\n", status.ToString()); + LogError("LevelDB read failure: %s", status.ToString()); HandleError(status); } return strValue; @@ -324,7 +324,7 @@ bool CDBWrapper::ExistsImpl(std::span key) const if (!status.ok()) { if (status.IsNotFound()) return false; - LogPrintf("LevelDB read failure: %s\n", status.ToString()); + LogError("LevelDB read failure: %s", status.ToString()); HandleError(status); } return true; diff --git a/depend/bitcoin/src/flatfile.cpp b/depend/bitcoin/src/flatfile.cpp index df6596e..33d8baf 100644 --- a/depend/bitcoin/src/flatfile.cpp +++ b/depend/bitcoin/src/flatfile.cpp @@ -41,11 +41,11 @@ FILE* FlatFileSeq::Open(const FlatFilePos& pos, bool read_only) const if (!file && !read_only) file = fsbridge::fopen(path, "wb+"); if (!file) { - LogPrintf("Unable to open file %s\n", fs::PathToString(path)); + LogError("Unable to open file %s", fs::PathToString(path)); return nullptr; } if (pos.nPos && fseek(file, pos.nPos, SEEK_SET)) { - LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, fs::PathToString(path)); + LogError("Unable to seek to position %u of %s", pos.nPos, fs::PathToString(path)); if (fclose(file) != 0) { LogError("Unable to close file %s", fs::PathToString(path)); } diff --git a/depend/bitcoin/src/flatfile.h b/depend/bitcoin/src/flatfile.h index 3ec4eaf..e902fbc 100644 --- a/depend/bitcoin/src/flatfile.h +++ b/depend/bitcoin/src/flatfile.h @@ -29,10 +29,6 @@ struct FlatFilePos return (a.nFile == b.nFile && a.nPos == b.nPos); } - friend bool operator!=(const FlatFilePos &a, const FlatFilePos &b) { - return !(a == b); - } - bool IsNull() const { return (nFile == -1); } std::string ToString() const; diff --git a/depend/bitcoin/src/httprpc.cpp b/depend/bitcoin/src/httprpc.cpp index 947f1d8..1766d1a 100644 --- a/depend/bitcoin/src/httprpc.cpp +++ b/depend/bitcoin/src/httprpc.cpp @@ -120,7 +120,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) jreq.context = context; jreq.peerAddr = req->GetPeer().ToStringAddrPort(); if (!RPCAuthorized(authHeader.second, jreq.authUser)) { - LogPrintf("ThreadRPCServer incorrect password attempt from %s\n", jreq.peerAddr); + LogWarning("ThreadRPCServer incorrect password attempt from %s", jreq.peerAddr); /* Deter brute-forcing If this results in a DoS the user really @@ -144,7 +144,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) UniValue reply; bool user_has_whitelist = g_rpc_whitelist.count(jreq.authUser); if (!user_has_whitelist && g_rpc_whitelist_default) { - LogPrintf("RPC User %s not allowed to call any methods\n", jreq.authUser); + LogWarning("RPC User %s not allowed to call any methods", jreq.authUser); req->WriteReply(HTTP_FORBIDDEN); return false; @@ -152,7 +152,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) } else if (valRequest.isObject()) { jreq.parse(valRequest); if (user_has_whitelist && !g_rpc_whitelist[jreq.authUser].count(jreq.strMethod)) { - LogPrintf("RPC User %s not allowed to call method %s\n", jreq.authUser, jreq.strMethod); + LogWarning("RPC User %s not allowed to call method %s", jreq.authUser, jreq.strMethod); req->WriteReply(HTTP_FORBIDDEN); return false; } @@ -182,7 +182,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) // Parse method std::string strMethod = request.find_value("method").get_str(); if (!g_rpc_whitelist[jreq.authUser].count(strMethod)) { - LogPrintf("RPC User %s not allowed to call method %s\n", jreq.authUser, strMethod); + LogWarning("RPC User %s not allowed to call method %s", jreq.authUser, strMethod); req->WriteReply(HTTP_FORBIDDEN); return false; } @@ -297,7 +297,7 @@ static bool InitRPCAuthentication() fields.insert(fields.end(), salt_hmac.begin(), salt_hmac.end()); g_rpcauth.push_back(fields); } else { - LogPrintf("Invalid -rpcauth argument.\n"); + LogWarning("Invalid -rpcauth argument."); return false; } } diff --git a/depend/bitcoin/src/httpserver.cpp b/depend/bitcoin/src/httpserver.cpp index 684b85f..abfcb45 100644 --- a/depend/bitcoin/src/httpserver.cpp +++ b/depend/bitcoin/src/httpserver.cpp @@ -330,9 +330,9 @@ static void http_request_cb(struct evhttp_request* req, void* arg) std::unique_ptr item(new HTTPWorkItem(std::move(hreq), path, i->handler)); assert(g_work_queue); if (g_work_queue->Enqueue(item.get())) { - item.release(); /* if true, queue took ownership */ + [[maybe_unused]] auto _{item.release()}; /* if true, queue took ownership */ } else { - LogPrintf("WARNING: request rejected because http work queue depth exceeded, it can be increased with the -rpcworkqueue= setting\n"); + LogWarning("Request rejected because http work queue depth exceeded, it can be increased with the -rpcworkqueue= setting"); item->req->WriteReply(HTTP_SERVICE_UNAVAILABLE, "Work queue depth exceeded"); } } else { @@ -372,10 +372,10 @@ static bool HTTPBindAddresses(struct evhttp* http) endpoints.emplace_back("::1", http_port); endpoints.emplace_back("127.0.0.1", http_port); if (!gArgs.GetArgs("-rpcallowip").empty()) { - LogPrintf("WARNING: option -rpcallowip was specified without -rpcbind; this doesn't usually make sense\n"); + LogWarning("Option -rpcallowip was specified without -rpcbind; this doesn't usually make sense"); } if (!gArgs.GetArgs("-rpcbind").empty()) { - LogPrintf("WARNING: option -rpcbind was ignored because -rpcallowip was not specified, refusing to allow everyone to connect\n"); + LogWarning("Option -rpcbind was ignored because -rpcallowip was not specified, refusing to allow everyone to connect"); } } else { // Specific bind addresses for (const std::string& strRPCBind : gArgs.GetArgs("-rpcbind")) { @@ -396,7 +396,7 @@ static bool HTTPBindAddresses(struct evhttp* http) if (bind_handle) { const std::optional addr{LookupHost(i->first, false)}; if (i->first.empty() || (addr.has_value() && addr->IsBindAny())) { - LogPrintf("WARNING: the RPC server is not safe to expose to untrusted networks such as the public internet\n"); + LogWarning("The RPC server is not safe to expose to untrusted networks such as the public internet"); } // Set the no-delay option (disable Nagle's algorithm) on the TCP socket. evutil_socket_t fd = evhttp_bound_socket_get_fd(bind_handle); @@ -406,7 +406,7 @@ static bool HTTPBindAddresses(struct evhttp* http) } boundSockets.push_back(bind_handle); } else { - LogPrintf("Binding RPC on address %s port %i failed.\n", i->first, i->second); + LogWarning("Binding RPC on address %s port %i failed.", i->first, i->second); } } return !boundSockets.empty(); @@ -462,7 +462,7 @@ bool InitHTTPServer(const util::SignalInterrupt& interrupt) raii_evhttp http_ctr = obtain_evhttp(base_ctr.get()); struct evhttp* http = http_ctr.get(); if (!http) { - LogPrintf("couldn't create evhttp. Exiting.\n"); + LogError("Couldn't create evhttp. Exiting."); return false; } @@ -472,7 +472,7 @@ bool InitHTTPServer(const util::SignalInterrupt& interrupt) evhttp_set_gencb(http, http_request_cb, (void*)&interrupt); if (!HTTPBindAddresses(http)) { - LogPrintf("Unable to bind any endpoint for RPC server\n"); + LogError("Unable to bind any endpoint for RPC server"); return false; } @@ -602,7 +602,7 @@ HTTPRequest::~HTTPRequest() { if (!replySent) { // Keep track of whether reply was sent to avoid request leaks - LogPrintf("%s: Unhandled request\n", __func__); + LogWarning("Unhandled HTTP request"); WriteReply(HTTP_INTERNAL_SERVER_ERROR, "Unhandled request"); } // evhttpd cleans up the request, as long as a reply was sent. diff --git a/depend/bitcoin/src/i2p.cpp b/depend/bitcoin/src/i2p.cpp index 80f3bde..b435747 100644 --- a/depend/bitcoin/src/i2p.cpp +++ b/depend/bitcoin/src/i2p.cpp @@ -149,7 +149,7 @@ bool Session::Listen(Connection& conn) conn.sock = StreamAccept(); return true; } catch (const std::runtime_error& e) { - LogPrintLevel(BCLog::I2P, BCLog::Level::Error, "Couldn't listen: %s\n", e.what()); + LogError("Couldn't listen: %s\n", e.what()); CheckControlSock(); } return false; @@ -206,9 +206,9 @@ bool Session::Accept(Connection& conn) } if (m_interrupt->interrupted()) { - LogPrintLevel(BCLog::I2P, BCLog::Level::Debug, "Accept was interrupted\n"); + LogDebug(BCLog::I2P, "Accept was interrupted\n"); } else { - LogPrintLevel(BCLog::I2P, BCLog::Level::Debug, "Error accepting%s: %s\n", disconnect ? " (will close the session)" : "", errmsg); + LogDebug(BCLog::I2P, "Error accepting%s: %s\n", disconnect ? " (will close the session)" : "", errmsg); } if (disconnect) { LOCK(m_mutex); @@ -224,7 +224,7 @@ bool Session::Connect(const CService& to, Connection& conn, bool& proxy_error) // Refuse connecting to arbitrary ports. We don't specify any destination port to the SAM proxy // when connecting (SAM 3.1 does not use ports) and it forces/defaults it to I2P_SAM31_PORT. if (to.GetPort() != I2P_SAM31_PORT) { - LogPrintLevel(BCLog::I2P, BCLog::Level::Debug, "Error connecting to %s, connection refused due to arbitrary port %s\n", to.ToStringAddrPort(), to.GetPort()); + LogDebug(BCLog::I2P, "Error connecting to %s, connection refused due to arbitrary port %s\n", to.ToStringAddrPort(), to.GetPort()); proxy_error = false; return false; } @@ -272,7 +272,7 @@ bool Session::Connect(const CService& to, Connection& conn, bool& proxy_error) throw std::runtime_error(strprintf("\"%s\"", connect_reply.full)); } catch (const std::runtime_error& e) { - LogPrintLevel(BCLog::I2P, BCLog::Level::Debug, "Error connecting to %s: %s\n", to.ToStringAddrPort(), e.what()); + LogDebug(BCLog::I2P, "Error connecting to %s: %s\n", to.ToStringAddrPort(), e.what()); CheckControlSock(); return false; } @@ -345,7 +345,7 @@ void Session::CheckControlSock() std::string errmsg; if (m_control_sock && !m_control_sock->IsConnected(errmsg)) { - LogPrintLevel(BCLog::I2P, BCLog::Level::Debug, "Control socket error: %s\n", errmsg); + LogDebug(BCLog::I2P, "Control socket error: %s\n", errmsg); Disconnect(); } } @@ -415,7 +415,7 @@ void Session::CreateIfNotCreatedAlready() const auto session_type = m_transient ? "transient" : "persistent"; const auto session_id = GetRandHash().GetHex().substr(0, 10); // full is overkill, too verbose in the logs - LogPrintLevel(BCLog::I2P, BCLog::Level::Debug, "Creating %s SAM session %s with %s\n", session_type, session_id, m_control_host.ToString()); + LogDebug(BCLog::I2P, "Creating %s SAM session %s with %s\n", session_type, session_id, m_control_host.ToString()); auto sock = Hello(); diff --git a/depend/bitcoin/src/index/base.cpp b/depend/bitcoin/src/index/base.cpp index d5c4044..6d42480 100644 --- a/depend/bitcoin/src/index/base.cpp +++ b/depend/bitcoin/src/index/base.cpp @@ -73,13 +73,16 @@ BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool .options = [] { DBOptions options; node::ReadDatabaseArgs(gArgs, options); return options; }()}} {} -bool BaseIndex::DB::ReadBestBlock(CBlockLocator& locator) const +CBlockLocator BaseIndex::DB::ReadBestBlock() const { + CBlockLocator locator; + bool success = Read(DB_BEST_BLOCK, locator); if (!success) { locator.SetNull(); } - return success; + + return locator; } void BaseIndex::DB::WriteBestBlock(CDBBatch& batch, const CBlockLocator& locator) @@ -111,10 +114,7 @@ bool BaseIndex::Init() // callbacks are not missed once m_synced is true. m_chain->context()->validation_signals->RegisterValidationInterface(this); - CBlockLocator locator; - if (!GetDB().ReadBestBlock(locator)) { - locator.SetNull(); - } + const auto locator{GetDB().ReadBestBlock()}; LOCK(cs_main); CChain& index_chain = m_chainstate->m_chain; diff --git a/depend/bitcoin/src/index/base.h b/depend/bitcoin/src/index/base.h index 5a6f0a4..cbd0211 100644 --- a/depend/bitcoin/src/index/base.h +++ b/depend/bitcoin/src/index/base.h @@ -68,7 +68,8 @@ class BaseIndex : public CValidationInterface bool f_memory = false, bool f_wipe = false, bool f_obfuscate = false); /// Read block locator of the chain that the index is in sync with. - bool ReadBestBlock(CBlockLocator& locator) const; + /// Note, the returned locator will be empty if no record exists. + CBlockLocator ReadBestBlock() const; /// Write block locator of the chain that the index is in sync with. void WriteBestBlock(CDBBatch& batch, const CBlockLocator& locator); diff --git a/depend/bitcoin/src/init.cpp b/depend/bitcoin/src/init.cpp index ecdce0c..e283e89 100644 --- a/depend/bitcoin/src/init.cpp +++ b/depend/bitcoin/src/init.cpp @@ -198,7 +198,7 @@ static void RemovePidFile(const ArgsManager& args) const auto pid_path{GetPidFile(args)}; if (std::error_code error; !fs::remove(pid_path, error)) { std::string msg{error ? error.message() : "File does not exist"}; - LogPrintf("Unable to remove PID file (%s): %s\n", fs::PathToString(pid_path), msg); + LogWarning("Unable to remove PID file (%s): %s", fs::PathToString(pid_path), msg); } } @@ -632,12 +632,17 @@ void SetupServerArgs(ArgsManager& argsman, bool can_listen_ipc) argsman.AddArg("-checkpoints", "", ArgsManager::ALLOW_ANY, OptionsCategory::HIDDEN); argsman.AddArg("-deprecatedrpc=", "Allows deprecated RPC method(s) to be used", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("-limitancestorcount=", strprintf("Do not accept transactions if number of in-mempool ancestors is or more (default: %u)", DEFAULT_ANCESTOR_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("-limitancestorsize=", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT_KVB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("-limitdescendantcount=", strprintf("Do not accept transactions if any ancestor would have or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("-limitdescendantsize=", strprintf("Do not accept transactions if any ancestor would have more than kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT_KVB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u). Blocks after target height may be processed during shutdown.", DEFAULT_STOPATHEIGHT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-limitancestorcount=", strprintf("Deprecated setting to not accept transactions if number of in-mempool ancestors is or more (default: %u); replaced by cluster limits (see -limitclustercount) and only used by wallet for coin selection", DEFAULT_ANCESTOR_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + // Ancestor and descendant size limits were removed. We keep + // -limitancestorsize/-limitdescendantsize as hidden args to display a more + // user friendly error when set. + argsman.AddArg("-limitancestorsize", "", ArgsManager::ALLOW_ANY, OptionsCategory::HIDDEN); + argsman.AddArg("-limitdescendantsize", "", ArgsManager::ALLOW_ANY, OptionsCategory::HIDDEN); + argsman.AddArg("-limitdescendantcount=", strprintf("Deprecated setting to not accept transactions if any ancestor would have or more in-mempool descendants (default: %u); replaced by cluster limits (see -limitclustercount) and only used by wallet for coin selection", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-test=