From 10cbf2255d9e74166f5f41e82fefa4a7f890e73f Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Wed, 12 Feb 2025 17:39:52 +0100 Subject: [PATCH 001/115] ci: Use APT_LLVM_V in msan task Also, use update-alternatives to avoid having to manually specify clang-${APT_LLVM_V} or llvm-symbolizer-${APT_LLVM_V} everywhere. Github-Pull: #32999 Rebased-From: fad040a5787a8ac0a13aef5c54e5a675de239e92 --- ci/test/00_setup_env_native_asan.sh | 4 +-- ci/test/00_setup_env_native_fuzz.sh | 5 ++-- ci/test/00_setup_env_native_msan.sh | 3 ++- ci/test/00_setup_env_native_tsan.sh | 2 +- ci/test/01_base_install.sh | 40 ++++++++++++++++++----------- 5 files changed, 32 insertions(+), 22 deletions(-) diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index ead550a43ced..9f562df4647a 100755 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -26,8 +26,8 @@ export GOAL="install" export BITCOIN_CONFIG="\ -DWITH_USDT=ON -DWITH_ZMQ=ON -DWITH_BDB=ON -DWARN_INCOMPATIBLE_BDB=OFF -DBUILD_GUI=ON \ -DSANITIZERS=address,float-divide-by-zero,integer,undefined \ - -DCMAKE_C_COMPILER=clang-${APT_LLVM_V} \ - -DCMAKE_CXX_COMPILER=clang++-${APT_LLVM_V} \ + -DCMAKE_C_COMPILER=clang \ + -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_FLAGS='-ftrivial-auto-var-init=pattern' \ -DCMAKE_CXX_FLAGS='-ftrivial-auto-var-init=pattern -Wno-error=deprecated-declarations' \ -DAPPEND_CXXFLAGS='-std=c++23' \ diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh index c5220211fc56..f80c4d988deb 100755 --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -19,9 +19,8 @@ export CI_CONTAINER_CAP="--cap-add SYS_PTRACE" # If run with (ASan + LSan), the export BITCOIN_CONFIG="\ -DBUILD_FOR_FUZZING=ON \ -DSANITIZERS=fuzzer,address,undefined,float-divide-by-zero,integer \ - -DCMAKE_C_COMPILER=clang-${APT_LLVM_V} \ - -DCMAKE_CXX_COMPILER=clang++-${APT_LLVM_V} \ + -DCMAKE_C_COMPILER=clang \ + -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_FLAGS='-ftrivial-auto-var-init=pattern' \ -DCMAKE_CXX_FLAGS='-ftrivial-auto-var-init=pattern' \ " -export LLVM_SYMBOLIZER_PATH="/usr/bin/llvm-symbolizer-${APT_LLVM_V}" diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh index 8784aaa5b7bf..effd8a9413d9 100755 --- a/ci/test/00_setup_env_native_msan.sh +++ b/ci/test/00_setup_env_native_msan.sh @@ -7,13 +7,14 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" +export APT_LLVM_V="20" LIBCXX_DIR="/msan/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" export CONTAINER_NAME="ci_native_msan" -export PACKAGES="ninja-build" +export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev ninja-build" # BDB generates false-positives and will be removed in future export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" export GOAL="install" diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index b341adfec53c..c8d9c8455f40 100755 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -10,7 +10,7 @@ export CONTAINER_NAME=ci_native_tsan export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" export APT_LLVM_V="20" export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libc++abi-${APT_LLVM_V}-dev libc++-${APT_LLVM_V}-dev python3-zmq" -export DEP_OPTS="CC=clang-${APT_LLVM_V} CXX='clang++-${APT_LLVM_V} -stdlib=libc++'" +export DEP_OPTS="CC=clang CXX='clang++ -stdlib=libc++'" export GOAL="install" export BITCOIN_CONFIG="-DWITH_ZMQ=ON -DSANITIZERS=thread \ -DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'" diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index 1344563268f6..36a7c43b3f1f 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -43,27 +43,37 @@ elif [ "$CI_OS_NAME" != "macos" ]; then ${CI_RETRY_EXE} bash -c "apt-get install --no-install-recommends --no-upgrade -y $PACKAGES $CI_BASE_PACKAGES" fi +if [ -n "${APT_LLVM_V}" ]; then + update-alternatives --install /usr/bin/clang++ clang++ "/usr/bin/clang++-${APT_LLVM_V}" 100 + update-alternatives --install /usr/bin/clang clang "/usr/bin/clang-${APT_LLVM_V}" 100 + update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer "/usr/bin/llvm-symbolizer-${APT_LLVM_V}" 100 +fi + if [ -n "$PIP_PACKAGES" ]; then # shellcheck disable=SC2086 ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES fi if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-20.1.0" /msan/llvm-project - - cmake -G Ninja -B /msan/clang_build/ \ - -DLLVM_ENABLE_PROJECTS="clang" \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_TARGETS_TO_BUILD=Native \ - -DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \ - -S /msan/llvm-project/llvm - - ninja -C /msan/clang_build/ "$MAKEJOBS" - ninja -C /msan/clang_build/ install-runtimes - - update-alternatives --install /usr/bin/clang++ clang++ /msan/clang_build/bin/clang++ 100 - update-alternatives --install /usr/bin/clang clang /msan/clang_build/bin/clang 100 - update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /msan/clang_build/bin/llvm-symbolizer 100 + if [ -n "${APT_LLVM_V}" ]; then + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-$( clang --version | sed --silent 's@.*clang version \([0-9.]*\).*@\1@p' )" /msan/llvm-project + else + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-20.1.8" /msan/llvm-project + + cmake -G Ninja -B /msan/clang_build/ \ + -DLLVM_ENABLE_PROJECTS="clang" \ + -DCMAKE_BUILD_TYPE=Release \ + -DLLVM_TARGETS_TO_BUILD=Native \ + -DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \ + -S /msan/llvm-project/llvm + + ninja -C /msan/clang_build/ "$MAKEJOBS" + ninja -C /msan/clang_build/ install-runtimes + + update-alternatives --install /usr/bin/clang++ clang++ /msan/clang_build/bin/clang++ 100 + update-alternatives --install /usr/bin/clang clang /msan/clang_build/bin/clang 100 + update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /msan/clang_build/bin/llvm-symbolizer 100 + fi cmake -G Ninja -B /msan/cxx_build/ \ -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \ From 0fba5ae02101b358aa4938d35471356b75e0e615 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 30 Jul 2025 11:10:16 +0100 Subject: [PATCH 002/115] ci: allow libc++ instrumentation other than msan Github-Pull: #33099 Rebased-From: 6653cafd0b70b0e7a29c6cfe236d3bf9d1bce91e --- ci/test/00_setup_env_native_fuzz_with_msan.sh | 4 +-- ci/test/00_setup_env_native_msan.sh | 4 +-- ci/test/01_base_install.sh | 32 +++++++++---------- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh index a6e53dc8a2fd..27b704017c42 100755 --- a/ci/test/00_setup_env_native_fuzz_with_msan.sh +++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" -LIBCXX_DIR="/msan/cxx_build/" +LIBCXX_DIR="/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" @@ -27,7 +27,7 @@ export BITCOIN_CONFIG="\ -DSANITIZERS=fuzzer,memory \ -DAPPEND_CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE -U_FORTIFY_SOURCE' \ " -export USE_MEMORY_SANITIZER="true" +export USE_INSTRUMENTED_LIBCPP="MemoryWithOrigins" export RUN_UNIT_TESTS="false" export RUN_FUNCTIONAL_TESTS="false" export RUN_FUZZ_TESTS=true diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh index effd8a9413d9..b450a2ea1eb6 100755 --- a/ci/test/00_setup_env_native_msan.sh +++ b/ci/test/00_setup_env_native_msan.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" export APT_LLVM_V="20" -LIBCXX_DIR="/msan/cxx_build/" +LIBCXX_DIR="/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" @@ -27,4 +27,4 @@ export BITCOIN_CONFIG="\ -DSANITIZERS=memory \ -DAPPEND_CPPFLAGS='-U_FORTIFY_SOURCE' \ " -export USE_MEMORY_SANITIZER="true" +export USE_INSTRUMENTED_LIBCPP="MemoryWithOrigins" diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index 36a7c43b3f1f..4746a1f69d09 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -54,31 +54,31 @@ if [ -n "$PIP_PACKAGES" ]; then ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES fi -if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then +if [[ -n "${USE_INSTRUMENTED_LIBCPP}" ]]; then if [ -n "${APT_LLVM_V}" ]; then - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-$( clang --version | sed --silent 's@.*clang version \([0-9.]*\).*@\1@p' )" /msan/llvm-project + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-$( clang --version | sed --silent 's@.*clang version \([0-9.]*\).*@\1@p' )" /llvm-project else - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-20.1.8" /msan/llvm-project + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-20.1.8" /llvm-project - cmake -G Ninja -B /msan/clang_build/ \ + cmake -G Ninja -B /clang_build/ \ -DLLVM_ENABLE_PROJECTS="clang" \ -DCMAKE_BUILD_TYPE=Release \ -DLLVM_TARGETS_TO_BUILD=Native \ -DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \ - -S /msan/llvm-project/llvm + -S /llvm-project/llvm - ninja -C /msan/clang_build/ "$MAKEJOBS" - ninja -C /msan/clang_build/ install-runtimes + ninja -C /clang_build/ "$MAKEJOBS" + ninja -C /clang_build/ install-runtimes - update-alternatives --install /usr/bin/clang++ clang++ /msan/clang_build/bin/clang++ 100 - update-alternatives --install /usr/bin/clang clang /msan/clang_build/bin/clang 100 - update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /msan/clang_build/bin/llvm-symbolizer 100 + update-alternatives --install /usr/bin/clang++ clang++ /clang_build/bin/clang++ 100 + update-alternatives --install /usr/bin/clang clang /clang_build/bin/clang 100 + update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /clang_build/bin/llvm-symbolizer 100 fi - cmake -G Ninja -B /msan/cxx_build/ \ + cmake -G Ninja -B /cxx_build/ \ -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \ -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_USE_SANITIZER=MemoryWithOrigins \ + -DLLVM_USE_SANITIZER="${USE_INSTRUMENTED_LIBCPP}" \ -DCMAKE_C_COMPILER=clang \ -DCMAKE_CXX_COMPILER=clang++ \ -DLLVM_TARGETS_TO_BUILD=Native \ @@ -86,13 +86,13 @@ if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then -DLIBCXXABI_USE_LLVM_UNWINDER=OFF \ -DLIBCXX_ABI_DEFINES="_LIBCPP_ABI_BOUNDED_ITERATORS;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_STD_ARRAY;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_STRING;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_VECTOR;_LIBCPP_ABI_BOUNDED_UNIQUE_PTR" \ -DLIBCXX_HARDENING_MODE=debug \ - -S /msan/llvm-project/runtimes + -S /llvm-project/runtimes - ninja -C /msan/cxx_build/ "$MAKEJOBS" + ninja -C /cxx_build/ "$MAKEJOBS" # Clear no longer needed source folder - du -sh /msan/llvm-project - rm -rf /msan/llvm-project + du -sh /llvm-project + rm -rf /llvm-project fi if [[ "${RUN_TIDY}" == "true" ]]; then From f9939cdbe01fa090bd2ece90f5cbfb17120c2f24 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 30 Jul 2025 11:15:33 +0100 Subject: [PATCH 003/115] ci: instrument libc++ in TSAN job Qt is disabled, as the build is now taking a very long time. Github-Pull: #33099 Rebased-From: b09af2ce508185086bb551bfeb1409355c897e7b --- .cirrus.yml | 2 +- ci/test/00_setup_env_native_tsan.sh | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.cirrus.yml b/.cirrus.yml index 393237af66b7..6e70dc15fecc 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -158,7 +158,7 @@ task: FILE_ENV: "./ci/test/00_setup_env_native_previous_releases.sh" task: - name: 'TSan, depends, gui' + name: 'TSan, depends, no gui' << : *GLOBAL_TASK_TEMPLATE persistent_worker: labels: diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index c8d9c8455f40..dce63d51515c 100755 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -9,8 +9,11 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_tsan export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" export APT_LLVM_V="20" -export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libc++abi-${APT_LLVM_V}-dev libc++-${APT_LLVM_V}-dev python3-zmq" -export DEP_OPTS="CC=clang CXX='clang++ -stdlib=libc++'" +LIBCXX_DIR="/cxx_build/" +LIBCXX_FLAGS="-fsanitize=thread -nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" +export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev python3-zmq ninja-build" +export DEP_OPTS="CC=clang CXX=clang++ CXXFLAGS='${LIBCXX_FLAGS}' NO_QT=1" export GOAL="install" export BITCOIN_CONFIG="-DWITH_ZMQ=ON -DSANITIZERS=thread \ -DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'" +export USE_INSTRUMENTED_LIBCPP="Thread" From 5513516241463333548600f691a861dba4c1d5c5 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 30 Jul 2025 11:15:54 +0100 Subject: [PATCH 004/115] ci: remove DEBUG_LOCKORDER from TSAN job Github-Pull: #33099 Rebased-From: 7aa5b67132dfb71e915675a3dbcb806284e08197 --- ci/test/00_setup_env_native_tsan.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index dce63d51515c..7d8d0cf203cb 100755 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -15,5 +15,5 @@ export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev l export DEP_OPTS="CC=clang CXX=clang++ CXXFLAGS='${LIBCXX_FLAGS}' NO_QT=1" export GOAL="install" export BITCOIN_CONFIG="-DWITH_ZMQ=ON -DSANITIZERS=thread \ --DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'" +-DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'" export USE_INSTRUMENTED_LIBCPP="Thread" From ea40fa95d9af004d85187bee9d8efe278c888d8f Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 26 Aug 2025 16:49:38 +0100 Subject: [PATCH 005/115] ci: use LLVM 21 Github-Pull: #33258 Rebased-From: 4cf0ae474ba03830c86653f1abae4ab4d38c94e4 --- ci/test/00_setup_env_native_asan.sh | 2 +- ci/test/00_setup_env_native_fuzz.sh | 2 +- ci/test/00_setup_env_native_msan.sh | 2 +- ci/test/00_setup_env_native_tsan.sh | 2 +- ci/test/01_base_install.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index 9f562df4647a..dbfcc259d661 100755 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -19,7 +19,7 @@ else fi export CONTAINER_NAME=ci_native_asan -export APT_LLVM_V="20" +export APT_LLVM_V="21" export PACKAGES="systemtap-sdt-dev clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev python3-zmq qtbase5-dev qttools5-dev qttools5-dev-tools libevent-dev libboost-dev libdb5.3++-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE}" export NO_DEPENDS=1 export GOAL="install" diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh index f80c4d988deb..d81cbcf228fd 100755 --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" export CONTAINER_NAME=ci_native_fuzz -export APT_LLVM_V="20" +export APT_LLVM_V="21" export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libevent-dev libboost-dev libsqlite3-dev" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh index b450a2ea1eb6..879e82d55a46 100755 --- a/ci/test/00_setup_env_native_msan.sh +++ b/ci/test/00_setup_env_native_msan.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" -export APT_LLVM_V="20" +export APT_LLVM_V="21" LIBCXX_DIR="/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index 7d8d0cf203cb..6286e39d8421 100755 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_tsan export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" -export APT_LLVM_V="20" +export APT_LLVM_V="21" LIBCXX_DIR="/cxx_build/" LIBCXX_FLAGS="-fsanitize=thread -nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev python3-zmq ninja-build" diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index 4746a1f69d09..25a03d5f5034 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -58,7 +58,7 @@ if [[ -n "${USE_INSTRUMENTED_LIBCPP}" ]]; then if [ -n "${APT_LLVM_V}" ]; then ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-$( clang --version | sed --silent 's@.*clang version \([0-9.]*\).*@\1@p' )" /llvm-project else - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-20.1.8" /llvm-project + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-21.1.0" /llvm-project cmake -G Ninja -B /clang_build/ \ -DLLVM_ENABLE_PROJECTS="clang" \ From 7c6be9acae5a16956a7f8e53ae3f944a187a6713 Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 5 Sep 2025 12:04:09 +0100 Subject: [PATCH 006/115] doc: update release notes for 29.x --- doc/release-notes.md | 183 ++----------------------------------------- 1 file changed, 5 insertions(+), 178 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index 923d34a99bb1..b73e52dc570b 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.1 is now available from: +Bitcoin Core version 29.x is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. @@ -37,192 +37,19 @@ unsupported systems. Notable changes =============== -### Mempool Policy - -- The maximum number of potentially executed legacy signature operations in a - single standard transaction is now limited to 2500. Signature operations in all - previous output scripts, in all input scripts, as well as all P2SH redeem - scripts (if there are any) are counted toward the limit. The new limit is - assumed to not affect any known typically formed standard transactions. The - change was done to prepare for a possible BIP54 deployment in the future. - -- #32521 policy: make pathological transactions packed with legacy sigops non-standard - -- The minimum block feerate (`-blockmintxfee`) has been changed to 1 satoshi per kvB. It can still be changed using the -configuration option. - -- The default minimum relay feerate (`-minrelaytxfee`) and incremental relay feerate (`-incrementalrelayfee`) have been -changed to 100 satoshis per kvB. They can still be changed using their respective configuration options, but it is -recommended to change both together if you decide to do so. - - Other minimum feerates (e.g. the dust feerate, the minimum returned by the fee estimator, and all feerates used by - the wallet) remain unchanged. The mempool minimum feerate still changes in response to high volume. - - Note that unless these lower defaults are widely adopted across the network, transactions created with lower fee - rates are not guaranteed to propagate or confirm. The wallet feerates remain unchanged; `-mintxfee` must be changed - before attempting to create transactions with lower feerates using the wallet. - -- #33106 policy: lower the default blockmintxfee, incrementalrelayfee, minrelaytxfee - -### Logging - -Unconditional logging to disk is now rate limited by giving each source location -a quota of 1MiB per hour. Unconditional logging is any logging with a log level -higher than debug, that is `info`, `warning`, and `error`. All logs will be -prefixed with `[*]` if there is at least one source location that is currently -being suppressed. (#32604) - -When `-logsourcelocations` is enabled, the log output now contains the entire -function signature instead of just the function name. (#32604) - -### RPC - -- The `dumptxoutset` RPC now requires a `type` parameter to be specified. To maintain pre - v29.0 behavior, use the `latest` parameter. Documenting this change was missed in the v29.0 - release notes. (#30808) - -### Updated Settings - -- The `-maxmempool` and `-dbcache` startup parameters are now capped on - 32-bit systems to 500MB and 1GiB respectively. - -- #32530 node: cap -maxmempool and -dbcache values for 32-bit - -### Wallet - -- #31757 wallet: fix crash on double block disconnection -- #32553 wallet: Fix logging of wallet version - -### P2P - -- #32826 p2p: add more bad ports - -### Test - -- #32069 test: fix intermittent failure in wallet_reorgsrestore.py -- #32286 test: Handle empty string returned by CLI as None in RPC tests -- #32312 test: Fix feature_pruning test after nTime typo fix -- #32336 test: Suppress upstream -Wduplicate-decl-specifier in bpfcc -- #32463 test: fix an incorrect feature_fee_estimation.py subtest -- #32483 test: fix two intermittent failures in wallet_basic.py -- #32630 test: fix sync function in rpc_psbt.py -- #32765 test: Fix list index out of range error in feature_bip68_sequence.py -- #32742 test: fix catchup loop in outbound eviction functional test -- #32823 test: Fix wait_for_getheaders() call in test_outbound_eviction_blocks_relay_only() -- #32833 test: Add msgtype to msg_generic slots -- #32841 feature_taproot: sample tx version border values more -- #32850 test: check P2SH sigop count for coinbase tx -- #32859 test: correctly detect nonstd TRUC tx vsize in feature_taproot -- #33001 test: Do not pass tests on unhandled exceptions - -### Indexes - -- #33212 index: Don't commit state in BaseIndex::Rewind - -### Util - -- #32248 Remove support for RNDR/RNDRRS for aarch64 - -### Build - -- #32356 cmake: Respect user-provided configuration-specific flags -- #32437 crypto: disable ASan for sha256_sse4 with Clang -- #32469 cmake: Allow WITH_DBUS on all Unix-like systems -- #32439 guix: accomodate migration to codeberg -- #32551 cmake: Add missed SSE41_CXXFLAGS -- #32568 depends: use "mkdir -p" when installing xproto -- #32678 guix: warn and abort when SOURCE_DATE_EPOCH is set -- #32690 depends: fix SHA256SUM command on OpenBSD (use GNU mode output) -- #32716 depends: Override host compilers for FreeBSD and OpenBSD -- #32760 depends: capnp 1.2.0 -- #32798 build: add root dir to CMAKE_PREFIX_PATH in toolchain -- #32805 cmake: Use HINTS instead of PATHS in find_* commands -- #32814 cmake: Explicitly specify Boost_ROOT for Homebrew's package -- #32837 depends: fix libevent _WIN32_WINNT usage -- #32943 depends: Force CMAKE_EXPORT_NO_PACKAGE_REGISTRY=TRUE -- #32954 cmake: Drop no longer necessary "cmakeMinimumRequired" object -- #33073 guix: warn SOURCE_DATE_EPOCH set in guix-codesign - -### Gui - -- #864 Crash fix, disconnect numBlocksChanged() signal during shutdown -- #868 Replace stray tfm::format to cerr with qWarning - -### Doc - -- #32333 doc: Add missing top-level description to pruneblockchain RPC -- #32353 doc: Fix fuzz test_runner.py path -- #32389 doc: Fix test_bitcoin path -- #32607 rpc: Note in fundrawtransaction doc, fee rate is for package -- #32679 doc: update tor docs to use bitcoind binary from path -- #32693 depends: fix cmake compatibility error for freetype -- #32696 doc: make -DWITH_ZMQ=ON explicit on build-unix.md -- #32708 rpc, doc: update listdescriptors RCP help -- #32711 doc: add missing packages for BSDs (cmake, gmake, curl) to depends/README.md -- #32719 doc, windows: CompanyName "Bitcoin" => "Bitcoin Core project" -- #32776 doc: taproot became always active in v24.0 -- #32777 doc: fix Transifex 404s -- #32846 doc: clarify that the "-j N" goes after the "--build build" part -- #32858 doc: Add workaround for vcpkg issue with paths with embedded spaces -- #33070 doc/zmq: fix unix socket path example -- #33088 doc: move cmake -B build -LH up in Unix build docs -- #33133 rpc: fix getpeerinfo ping duration unit docs -- #33119 rpc: Fix 'getdescriptoractivity' RPCHelpMan, add test to verify fix -- #33236 doc: Remove wrong and redundant doxygen tag - ### CI -- #32184 ci: Add workaround for vcpkg's libevent package -- #33261 ci: return to using dash in CentOS job - -### Misc - -- #32187 refactor: Remove spurious virtual from final ~CZMQNotificationInterface -- #32454 tracing: fix invalid argument in mempool_monitor -- #32771 contrib: tracing: Fix read of pmsg_type in p2p_monitor.py -- #33086 contrib: [tracing] fix pointer argument handling in mempool_monitor.py +- #32999 ci: Use APT_LLVM_V in msan task +- #33099 ci: allow for any libc++ intrumentation & use it for TSAN +- #33258 ci: use LLVM 21 Credits ======= Thanks to everyone who directly contributed to this release: -- 0xB10C -- achow101 -- Antoine Poinsot -- benthecarman -- bigspider -- Brandon Odiwuor -- brunoerg -- Bufo -- Christewart -- Crypt-iQ -- davidgumberg -- deadmanoz -- dergoegge -- enirox001 - fanquake -- furszy -- glozow -- instagibbs -- Hennadii Stepanov -- hodlinator -- ismaelsadeeq -- jb55 -- jlopp -- josibake -- laanwj -- luisschwab - MarcoFalke -- Martin Zumsande -- monlovesmango -- nervana21 -- pablomartin4btc -- rkrux -- romanz -- ryanofsky -- Sjors -- theStack -- willcl-ark -- zaidmstrr As well as to everyone that helped with translations on [Transifex](https://explore.transifex.com/bitcoin/bitcoin/). From 2717331981ec94fd616a08f31e643391a2118639 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Mon, 8 Sep 2025 12:11:47 +0100 Subject: [PATCH 007/115] Fix benchmark CSV output The `SHA256AutoDetect` return output is used, among other use cases, to name benchmarks. Using a comma breaks the CSV output. This change replaces the comma with a semicolon, which fixes the issue. Github-Pull: #33340 Rebased-From: 790b440197bde322432a5bab161f1869b667e681 --- src/crypto/sha256.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/crypto/sha256.cpp b/src/crypto/sha256.cpp index 09c5d3123e8b..c5f495708d64 100644 --- a/src/crypto/sha256.cpp +++ b/src/crypto/sha256.cpp @@ -627,7 +627,7 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem Transform = sha256_x86_shani::Transform; TransformD64 = TransformD64Wrapper; TransformD64_2way = sha256d64_x86_shani::Transform_2way; - ret = "x86_shani(1way,2way)"; + ret = "x86_shani(1way;2way)"; have_sse4 = false; // Disable SSE4/AVX2; have_avx2 = false; } @@ -641,14 +641,14 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem #endif #if defined(ENABLE_SSE41) TransformD64_4way = sha256d64_sse41::Transform_4way; - ret += ",sse41(4way)"; + ret += ";sse41(4way)"; #endif } #if defined(ENABLE_AVX2) if (have_avx2 && have_avx && enabled_avx) { TransformD64_8way = sha256d64_avx2::Transform_8way; - ret += ",avx2(8way)"; + ret += ";avx2(8way)"; } #endif #endif // defined(HAVE_GETCPUID) @@ -682,7 +682,7 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem Transform = sha256_arm_shani::Transform; TransformD64 = TransformD64Wrapper; TransformD64_2way = sha256d64_arm_shani::Transform_2way; - ret = "arm_shani(1way,2way)"; + ret = "arm_shani(1way;2way)"; } #endif #endif // DISABLE_OPTIMIZED_SHA256 From 324caa84977cc74ac19df605503483e59739773e Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 10 Sep 2025 09:12:40 +0100 Subject: [PATCH 008/115] ci: always use tag for LLVM checkout Rather than trying to match the apt installed clang version, which is prone to intermittent issues. i.e #33345. Github-Pull: #33364 Rebased-From: b736052e39f1f466f63f261ace3dd2deba171e8a --- ci/test/01_base_install.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index 25a03d5f5034..1b624f389424 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -55,10 +55,9 @@ if [ -n "$PIP_PACKAGES" ]; then fi if [[ -n "${USE_INSTRUMENTED_LIBCPP}" ]]; then + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-21.1.1" /llvm-project + if [ -n "${APT_LLVM_V}" ]; then - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-$( clang --version | sed --silent 's@.*clang version \([0-9.]*\).*@\1@p' )" /llvm-project - else - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-21.1.0" /llvm-project cmake -G Ninja -B /clang_build/ \ -DLLVM_ENABLE_PROJECTS="clang" \ From e97588fc3d1e1a02382312ade7d529c5b4b60016 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 4 Sep 2025 19:25:33 +0000 Subject: [PATCH 009/115] trace: Workaround GCC bug compiling with old systemtap Github-Pull: #33310 Rebased-From: 93a29ff2830162c8129d35c7b9beb43fab984503 --- cmake/module/FindUSDT.cmake | 4 ++++ src/util/trace.h | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/cmake/module/FindUSDT.cmake b/cmake/module/FindUSDT.cmake index 0be7c28ff58b..234a099f3fd3 100644 --- a/cmake/module/FindUSDT.cmake +++ b/cmake/module/FindUSDT.cmake @@ -36,6 +36,10 @@ if(USDT_INCLUDE_DIR) include(CheckCXXSourceCompiles) set(CMAKE_REQUIRED_INCLUDES ${USDT_INCLUDE_DIR}) check_cxx_source_compiles(" + #if defined(__arm__) + # define STAP_SDT_ARG_CONSTRAINT g + #endif + // Setting SDT_USE_VARIADIC lets systemtap (sys/sdt.h) know that we want to use // the optional variadic macros to define tracepoints. #define SDT_USE_VARIADIC 1 diff --git a/src/util/trace.h b/src/util/trace.h index 3deefeade370..ab005dd8bce2 100644 --- a/src/util/trace.h +++ b/src/util/trace.h @@ -9,6 +9,13 @@ #ifdef ENABLE_TRACING +// Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103395 +// systemtap 4.6 on 32-bit ARM triggers internal compiler error +// (this workaround is included in systemtap 4.7+) +#if defined(__arm__) +# define STAP_SDT_ARG_CONSTRAINT g +#endif + // Setting SDT_USE_VARIADIC lets systemtap (sys/sdt.h) know that we want to use // the optional variadic macros to define tracepoints. #define SDT_USE_VARIADIC 1 From 9b95ab5e9db1691be5f26fc5bc1c186777d2dc5b Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Wed, 21 May 2025 13:36:43 -0400 Subject: [PATCH 010/115] p2p: Add witness mutation check inside FillBlock Since #29412, we have not allowed mutated blocks to continue being processed immediately the block is received, but this is only done for the legacy BLOCK message. Extend these checks as belt-and-suspenders to not allow similar mutation strategies to affect relay by honest peers by applying the check inside PartiallyDownloadedBlock::FillBlock, immediately before returning READ_STATUS_OK. This also removes the extraneous CheckBlock call. Github-Pull: #32646 Rebased-From: bac9ee4830664c86c1cb3d38a5b19c722aae2f54 --- src/blockencodings.cpp | 17 +++------ src/blockencodings.h | 7 ++-- src/net_processing.cpp | 10 ++++- src/test/blockencodings_tests.cpp | 16 ++++---- src/test/fuzz/partially_downloaded_block.cpp | 39 ++++++-------------- 5 files changed, 37 insertions(+), 52 deletions(-) diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp index 5f4061a71dca..5975a99faab4 100644 --- a/src/blockencodings.cpp +++ b/src/blockencodings.cpp @@ -180,7 +180,7 @@ bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const return txn_available[index] != nullptr; } -ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector& vtx_missing) +ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector& vtx_missing, bool segwit_active) { if (header.IsNull()) return READ_STATUS_INVALID; @@ -205,16 +205,11 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector< if (vtx_missing.size() != tx_missing_offset) return READ_STATUS_INVALID; - BlockValidationState state; - CheckBlockFn check_block = m_check_block_mock ? m_check_block_mock : CheckBlock; - if (!check_block(block, state, Params().GetConsensus(), /*fCheckPoW=*/true, /*fCheckMerkleRoot=*/true)) { - // TODO: We really want to just check merkle tree manually here, - // but that is expensive, and CheckBlock caches a block's - // "checked-status" (in the CBlock?). CBlock should be able to - // check its own merkle root and cache that check. - if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) - return READ_STATUS_FAILED; // Possible Short ID collision - return READ_STATUS_CHECKBLOCK_FAILED; + // Check for possible mutations early now that we have a seemingly good block + IsBlockMutatedFn check_mutated{m_check_block_mutated_mock ? m_check_block_mutated_mock : IsBlockMutated}; + if (check_mutated(/*block=*/block, + /*check_witness_root=*/segwit_active)) { + return READ_STATUS_FAILED; // Possible Short ID collision } LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size()); diff --git a/src/blockencodings.h b/src/blockencodings.h index c92aa05e8057..b1f82d18c5dc 100644 --- a/src/blockencodings.h +++ b/src/blockencodings.h @@ -141,15 +141,16 @@ class PartiallyDownloadedBlock { CBlockHeader header; // Can be overridden for testing - using CheckBlockFn = std::function; - CheckBlockFn m_check_block_mock{nullptr}; + using IsBlockMutatedFn = std::function; + IsBlockMutatedFn m_check_block_mutated_mock{nullptr}; explicit PartiallyDownloadedBlock(CTxMemPool* poolIn) : pool(poolIn) {} // extra_txn is a list of extra orphan/conflicted/etc transactions to look at ReadStatus InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector& extra_txn); bool IsTxAvailable(size_t index) const; - ReadStatus FillBlock(CBlock& block, const std::vector& vtx_missing); + // segwit_active enforces witness mutation checks just before reporting a healthy status + ReadStatus FillBlock(CBlock& block, const std::vector& vtx_missing, bool segwit_active); }; #endif // BITCOIN_BLOCKENCODINGS_H diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 1da3ec9d211e..0f1d6d98aa44 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3314,7 +3314,11 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl } PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock; - ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn); + + // We should not have gotten this far in compact block processing unless it's attached to a known header + const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(partialBlock.header.hashPrevBlock))}; + ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn, + /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)); if (status == READ_STATUS_INVALID) { RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect Misbehaving(peer, "invalid compact block/non-matching block transactions"); @@ -4462,7 +4466,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } std::vector dummy; - status = tempBlock.FillBlock(*pblock, dummy); + const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock))}; + status = tempBlock.FillBlock(*pblock, dummy, + /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)); if (status == READ_STATUS_OK) { fBlockReconstructed = true; } diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp index ed95a8831e36..d40a0a94aef1 100644 --- a/src/test/blockencodings_tests.cpp +++ b/src/test/blockencodings_tests.cpp @@ -95,21 +95,21 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) CBlock block2; { PartiallyDownloadedBlock tmp = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); // No transactions + BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_INVALID); // No transactions partialBlock = tmp; } // Wrong transaction { PartiallyDownloadedBlock tmp = partialBlock; - partialBlock.FillBlock(block2, {block.vtx[2]}); // Current implementation doesn't check txn here, but don't require that + partialBlock.FillBlock(block2, {block.vtx[2]}, /*segwit_active=*/true); // Current implementation doesn't check txn here, but don't require that partialBlock = tmp; } bool mutated; BOOST_CHECK(block.hashMerkleRoot != BlockMerkleRoot(block2, &mutated)); CBlock block3; - BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString()); BOOST_CHECK(!mutated); @@ -182,14 +182,14 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) CBlock block2; { PartiallyDownloadedBlock tmp = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); // No transactions + BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_INVALID); // No transactions partialBlock = tmp; } // Wrong transaction { PartiallyDownloadedBlock tmp = partialBlock; - partialBlock.FillBlock(block2, {block.vtx[1]}); // Current implementation doesn't check txn here, but don't require that + partialBlock.FillBlock(block2, {block.vtx[1]}, /*segwit_active=*/true); // Current implementation doesn't check txn here, but don't require that partialBlock = tmp; } BOOST_CHECK_EQUAL(pool.get(block.vtx[2]->GetHash()).use_count(), SHARED_TX_OFFSET + 2); // +2 because of partialBlock and block2 @@ -198,7 +198,7 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) CBlock block3; PartiallyDownloadedBlock partialBlockCopy = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString()); BOOST_CHECK(!mutated); @@ -252,7 +252,7 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) CBlock block2; PartiallyDownloadedBlock partialBlockCopy = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); bool mutated; BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); @@ -300,7 +300,7 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) CBlock block2; std::vector vtx_missing; - BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); BOOST_CHECK(!mutated); diff --git a/src/test/fuzz/partially_downloaded_block.cpp b/src/test/fuzz/partially_downloaded_block.cpp index 82d781cd53c2..1a06ef8b0afc 100644 --- a/src/test/fuzz/partially_downloaded_block.cpp +++ b/src/test/fuzz/partially_downloaded_block.cpp @@ -32,14 +32,10 @@ void initialize_pdb() g_setup = testing_setup.get(); } -PartiallyDownloadedBlock::CheckBlockFn FuzzedCheckBlock(std::optional result) +PartiallyDownloadedBlock::IsBlockMutatedFn FuzzedIsBlockMutated(bool result) { - return [result](const CBlock&, BlockValidationState& state, const Consensus::Params&, bool, bool) { - if (result) { - return state.Invalid(*result); - } - - return true; + return [result](const CBlock& block, bool) { + return result; }; } @@ -111,36 +107,23 @@ FUZZ_TARGET(partially_downloaded_block, .init = initialize_pdb) skipped_missing |= (!pdb.IsTxAvailable(i) && skip); } - // Mock CheckBlock - bool fail_check_block{fuzzed_data_provider.ConsumeBool()}; - auto validation_result = - fuzzed_data_provider.PickValueInArray( - {BlockValidationResult::BLOCK_RESULT_UNSET, - BlockValidationResult::BLOCK_CONSENSUS, - BlockValidationResult::BLOCK_CACHED_INVALID, - BlockValidationResult::BLOCK_INVALID_HEADER, - BlockValidationResult::BLOCK_MUTATED, - BlockValidationResult::BLOCK_MISSING_PREV, - BlockValidationResult::BLOCK_INVALID_PREV, - BlockValidationResult::BLOCK_TIME_FUTURE, - BlockValidationResult::BLOCK_CHECKPOINT, - BlockValidationResult::BLOCK_HEADER_LOW_WORK}); - pdb.m_check_block_mock = FuzzedCheckBlock( - fail_check_block ? - std::optional{validation_result} : - std::nullopt); + bool segwit_active{fuzzed_data_provider.ConsumeBool()}; + + // Mock IsBlockMutated + bool fail_block_mutated{fuzzed_data_provider.ConsumeBool()}; + pdb.m_check_block_mutated_mock = FuzzedIsBlockMutated(fail_block_mutated); CBlock reconstructed_block; - auto fill_status{pdb.FillBlock(reconstructed_block, missing)}; + auto fill_status{pdb.FillBlock(reconstructed_block, missing, segwit_active)}; switch (fill_status) { case READ_STATUS_OK: assert(!skipped_missing); - assert(!fail_check_block); + assert(!fail_block_mutated); assert(block->GetHash() == reconstructed_block.GetHash()); break; case READ_STATUS_CHECKBLOCK_FAILED: [[fallthrough]]; case READ_STATUS_FAILED: - assert(fail_check_block); + assert(fail_block_mutated); break; case READ_STATUS_INVALID: break; From 4c940d47897bc380d3387dd6663c37c46b4020ec Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Tue, 3 Jun 2025 10:29:00 -0400 Subject: [PATCH 011/115] p2p: remove vestigial READ_STATUS_CHECKBLOCK_FAILED Github-Pull: #32646 Rebased-From: 28299ce77636d7563ec545d043cf1b61bd2f01c1 --- src/blockencodings.h | 2 -- src/net_processing.cpp | 18 +----------------- src/test/fuzz/partially_downloaded_block.cpp | 1 - 3 files changed, 1 insertion(+), 20 deletions(-) diff --git a/src/blockencodings.h b/src/blockencodings.h index b1f82d18c5dc..fce59bc56149 100644 --- a/src/blockencodings.h +++ b/src/blockencodings.h @@ -84,8 +84,6 @@ typedef enum ReadStatus_t READ_STATUS_OK, READ_STATUS_INVALID, // Invalid object, peer is sending bogus crap READ_STATUS_FAILED, // Failed to process object - READ_STATUS_CHECKBLOCK_FAILED, // Used only by FillBlock to indicate a - // failure in CheckBlock. } ReadStatus; class CBlockHeaderAndShortTxIDs { diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 0f1d6d98aa44..fa27ceb38acd 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3335,23 +3335,7 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl return; } } else { - // Block is either okay, or possibly we received - // READ_STATUS_CHECKBLOCK_FAILED. - // Note that CheckBlock can only fail for one of a few reasons: - // 1. bad-proof-of-work (impossible here, because we've already - // accepted the header) - // 2. merkleroot doesn't match the transactions given (already - // caught in FillBlock with READ_STATUS_FAILED, so - // impossible here) - // 3. the block is otherwise invalid (eg invalid coinbase, - // block is too big, too many legacy sigops, etc). - // So if CheckBlock failed, #3 is the only possibility. - // Under BIP 152, we don't discourage the peer unless proof of work is - // invalid (we don't require all the stateless checks to have - // been run). This is handled below, so just treat this as - // though the block was successfully read, and rely on the - // handling in ProcessNewBlock to ensure the block index is - // updated, etc. + // Block is okay for further processing RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer fBlockRead = true; // mapBlockSource is used for potentially punishing peers and diff --git a/src/test/fuzz/partially_downloaded_block.cpp b/src/test/fuzz/partially_downloaded_block.cpp index 1a06ef8b0afc..c9635cae8cd6 100644 --- a/src/test/fuzz/partially_downloaded_block.cpp +++ b/src/test/fuzz/partially_downloaded_block.cpp @@ -121,7 +121,6 @@ FUZZ_TARGET(partially_downloaded_block, .init = initialize_pdb) assert(!fail_block_mutated); assert(block->GetHash() == reconstructed_block.GetHash()); break; - case READ_STATUS_CHECKBLOCK_FAILED: [[fallthrough]]; case READ_STATUS_FAILED: assert(fail_block_mutated); break; From 569ceb0df46fc619eed33f56b5b36f617c37bae7 Mon Sep 17 00:00:00 2001 From: Eugene Siegel Date: Wed, 3 Sep 2025 12:44:23 -0400 Subject: [PATCH 012/115] net: check for empty header before calling FillBlock Previously in debug builds, this would cause an Assume crash if FillBlock had been called previously. This could happen when multiple blocktxn messages were received. Co-Authored-By: Greg Sanders Github-Pull: #33296 Rebased-From: 5e585a0fc4fd68dd7b4982054b34deae2e7aeb89 --- src/net_processing.cpp | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index fa27ceb38acd..d9c2163c1d28 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3315,6 +3315,16 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock; + if (partialBlock.header.IsNull()) { + // It is possible for the header to be empty if a previous call to FillBlock wiped the header, but left + // the PartiallyDownloadedBlock pointer around (i.e. did not call RemoveBlockRequest). In this case, we + // should not call LookupBlockIndex below. + RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); + Misbehaving(peer, "previous compact block reconstruction attempt failed"); + LogDebug(BCLog::NET, "Peer %d sent compact block transactions multiple times", pfrom.GetId()); + return; + } + // We should not have gotten this far in compact block processing unless it's attached to a known header const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(partialBlock.header.hashPrevBlock))}; ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn, @@ -3326,6 +3336,9 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl } else if (status == READ_STATUS_FAILED) { if (first_in_flight) { // Might have collided, fall back to getdata now :( + // We keep the failed partialBlock to disallow processing another compact block announcement from the same + // peer for the same block. We let the full block download below continue under the same m_downloading_since + // timer. std::vector invs; invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash); MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs); From 1288d44804cd6ecd8601d0aef55e6fbf500d2f31 Mon Sep 17 00:00:00 2001 From: Eugene Siegel Date: Wed, 3 Sep 2025 12:44:52 -0400 Subject: [PATCH 013/115] test: send duplicate blocktxn message in p2p_compactblocks.py Add test_multiple_blocktxn_response that checks that the peer is disconnected. Github-Pull: #33296 Rebased-From: 8b6264768030db1840041abeeaeefd6c227a2644 --- test/functional/p2p_compactblocks.py | 42 ++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index ca36b2fbc069..da8a0aed9ac5 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -566,6 +566,42 @@ def test_incorrect_blocktxn_response(self, test_node): test_node.send_and_ping(msg_block(block)) assert_equal(int(node.getbestblockhash(), 16), block.sha256) + # Multiple blocktxn responses will cause a node to get disconnected. + def test_multiple_blocktxn_response(self, test_node): + node = self.nodes[0] + utxo = self.utxos[0] + + block = self.build_block_with_transactions(node, utxo, 2) + + # Send compact block + comp_block = HeaderAndShortIDs() + comp_block.initialize_from_block(block, prefill_list=[0], use_witness=True) + test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) + absolute_indexes = [] + with p2p_lock: + assert "getblocktxn" in test_node.last_message + absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute() + assert_equal(absolute_indexes, [1, 2]) + + # Send a blocktxn that does not succeed in reconstruction, triggering + # getdata fallback. + msg = msg_blocktxn() + msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[2]] + [block.vtx[1]]) + test_node.send_and_ping(msg) + + # Tip should not have updated + assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) + + # We should receive a getdata request + test_node.wait_for_getdata([block.sha256], timeout=10) + assert test_node.last_message["getdata"].inv[0].type == MSG_BLOCK or \ + test_node.last_message["getdata"].inv[0].type == MSG_BLOCK | MSG_WITNESS_FLAG + + # Send the same blocktxn and assert the sender gets disconnected. + with node.assert_debug_log(['previous compact block reconstruction attempt failed']): + test_node.send_message(msg) + test_node.wait_for_disconnect() + def test_getblocktxn_handler(self, test_node): node = self.nodes[0] # bitcoind will not send blocktxn responses for blocks whose height is @@ -957,6 +993,12 @@ def run_test(self): self.log.info("Testing handling of invalid compact blocks...") self.test_invalid_tx_in_compactblock(self.segwit_node) + self.log.info("Testing handling of multiple blocktxn responses...") + self.test_multiple_blocktxn_response(self.segwit_node) + + # The previous test will lead to a disconnection. Reconnect before continuing. + self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn()) + self.log.info("Testing invalid index in cmpctblock message...") self.test_invalid_cmpctblock_message() From 61cdc04a832cc5dfe98c48f8592c4de513258304 Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Fri, 12 Sep 2025 17:29:04 -0400 Subject: [PATCH 014/115] net: Do not apply whitelist permission to onion inbounds Tor inbound connections do not reveal the peer's actual network address. Therefore do not apply whitelist permissions to them. Co-authored-by: Vasil Dimov Github-Pull: #33395 Rebased-From: f563ce90818d486d2a199439d2f6ba39cd106352 --- src/net.cpp | 11 +++++++---- src/net.h | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index 735985a84145..7684877ec352 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -575,9 +575,9 @@ void CNode::CloseSocketDisconnect() m_i2p_sam_session.reset(); } -void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector& ranges) const { +void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, std::optional addr, const std::vector& ranges) const { for (const auto& subnet : ranges) { - if (subnet.m_subnet.Match(addr)) { + if (addr.has_value() && subnet.m_subnet.Match(addr.value())) { NetPermissions::AddFlag(flags, subnet.m_flags); } } @@ -1767,7 +1767,11 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr&& sock, { int nInbound = 0; - AddWhitelistPermissionFlags(permission_flags, addr, vWhitelistedRangeIncoming); + const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end(); + + // Tor inbound connections do not reveal the peer's actual network address. + // Therefore do not apply address-based whitelist permissions to them. + AddWhitelistPermissionFlags(permission_flags, inbound_onion ? std::optional{} : addr, vWhitelistedRangeIncoming); { LOCK(m_nodes_mutex); @@ -1822,7 +1826,6 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr&& sock, NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize(); - const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end(); // The V2Transport transparently falls back to V1 behavior when an incoming V1 connection is // detected, so use it whenever we signal NODE_P2P_V2. ServiceFlags local_services = GetLocalServices(); diff --git a/src/net.h b/src/net.h index e64d9a67f460..e025b20bcdef 100644 --- a/src/net.h +++ b/src/net.h @@ -1364,7 +1364,7 @@ class CConnman bool AttemptToEvictConnection(); CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex); - void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector& ranges) const; + void AddWhitelistPermissionFlags(NetPermissionFlags& flags, std::optional addr, const std::vector& ranges) const; void DeleteNode(CNode* pnode); From 9bc4afb62cf04a41b62fe279f0db3d87e700cb3d Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 9 Sep 2025 10:15:08 +0100 Subject: [PATCH 015/115] doc: update release notes for 29.x --- doc/release-notes.md | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index b73e52dc570b..0325d3a3e288 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.x is now available from: +Bitcoin Core version 29.2rc1 is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. @@ -37,19 +37,37 @@ unsupported systems. Notable changes =============== +### P2P + +- #32646 p2p: Add witness mutation check inside FillBlock +- #33296 net: check for empty header before calling FillBlock +- #33395 net: do not apply whitelist permissions to onion inbounds + ### CI - #32999 ci: Use APT_LLVM_V in msan task - #33099 ci: allow for any libc++ intrumentation & use it for TSAN - #33258 ci: use LLVM 21 +- #33364 ci: always use tag for LLVM checkout + +### Misc + +- #33310 trace: Workaround GCC bug compiling with old systemtap +- #33340 Fix benchmark CSV output Credits ======= Thanks to everyone who directly contributed to this release: +- Eugene Siegel - fanquake +- Greg Sanders +- Hennadii Stepanov +- Luke Dashjr - MarcoFalke +- Martin Zumsande +- Vasil Dimov As well as to everyone that helped with translations on [Transifex](https://explore.transifex.com/bitcoin/bitcoin/). From 461dd13fafa6f8175e2be4d96e8728e667ba4d69 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 17 Sep 2025 15:47:34 +0100 Subject: [PATCH 016/115] build: bump version to v29.2rc1 --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index dc613a7655f3..05a86a1d97ff 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,9 +28,9 @@ get_directory_property(precious_variables CACHE_VARIABLES) #============================= set(CLIENT_NAME "Bitcoin Core") set(CLIENT_VERSION_MAJOR 29) -set(CLIENT_VERSION_MINOR 1) +set(CLIENT_VERSION_MINOR 2) set(CLIENT_VERSION_BUILD 0) -set(CLIENT_VERSION_RC 0) +set(CLIENT_VERSION_RC 1) set(CLIENT_VERSION_IS_RELEASE "true") set(COPYRIGHT_YEAR "2025") From f2bd79f80c74a2b77f14954ac65679417697a332 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 17 Sep 2025 15:54:29 +0100 Subject: [PATCH 017/115] doc: update manual pages for v29.2rc1 --- doc/man/bitcoin-cli.1 | 6 +++--- doc/man/bitcoin-qt.1 | 6 +++--- doc/man/bitcoin-tx.1 | 6 +++--- doc/man/bitcoin-util.1 | 6 +++--- doc/man/bitcoin-wallet.1 | 6 +++--- doc/man/bitcoind.1 | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/doc/man/bitcoin-cli.1 b/doc/man/bitcoin-cli.1 index 428ddd3e2a24..a8dc092a6ccc 100644 --- a/doc/man/bitcoin-cli.1 +++ b/doc/man/bitcoin-cli.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-CLI "1" "September 2025" "bitcoin-cli v29.1.0" "User Commands" +.TH BITCOIN-CLI "1" "September 2025" "bitcoin-cli v29.2.0rc1" "User Commands" .SH NAME -bitcoin-cli \- manual page for bitcoin-cli v29.1.0 +bitcoin-cli \- manual page for bitcoin-cli v29.2.0rc1 .SH SYNOPSIS .B bitcoin-cli [\fI\,options\/\fR] \fI\, \/\fR[\fI\,params\/\fR] @@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.1.0 .B bitcoin-cli [\fI\,options\/\fR] \fI\,help \/\fR .SH DESCRIPTION -Bitcoin Core RPC client version v29.1.0 +Bitcoin Core RPC client version v29.2.0rc1 .PP The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Core RPC server. .PP diff --git a/doc/man/bitcoin-qt.1 b/doc/man/bitcoin-qt.1 index 3665a6a48ae8..7821b8fb440f 100644 --- a/doc/man/bitcoin-qt.1 +++ b/doc/man/bitcoin-qt.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-QT "1" "September 2025" "bitcoin-qt v29.1.0" "User Commands" +.TH BITCOIN-QT "1" "September 2025" "bitcoin-qt v29.2.0rc1" "User Commands" .SH NAME -bitcoin-qt \- manual page for bitcoin-qt v29.1.0 +bitcoin-qt \- manual page for bitcoin-qt v29.2.0rc1 .SH SYNOPSIS .B bitcoin-qt [\fI\,options\/\fR] [\fI\,URI\/\fR] .SH DESCRIPTION -Bitcoin Core version v29.1.0 +Bitcoin Core version v29.2.0rc1 .PP The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Core. .PP diff --git a/doc/man/bitcoin-tx.1 b/doc/man/bitcoin-tx.1 index 16058f1bf941..a14a6be60243 100644 --- a/doc/man/bitcoin-tx.1 +++ b/doc/man/bitcoin-tx.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-TX "1" "September 2025" "bitcoin-tx v29.1.0" "User Commands" +.TH BITCOIN-TX "1" "September 2025" "bitcoin-tx v29.2.0rc1" "User Commands" .SH NAME -bitcoin-tx \- manual page for bitcoin-tx v29.1.0 +bitcoin-tx \- manual page for bitcoin-tx v29.2.0rc1 .SH SYNOPSIS .B bitcoin-tx [\fI\,options\/\fR] \fI\, \/\fR[\fI\,commands\/\fR] @@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.1.0 .B bitcoin-tx [\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR] .SH DESCRIPTION -Bitcoin Core bitcoin\-tx utility version v29.1.0 +Bitcoin Core bitcoin\-tx utility version v29.2.0rc1 .PP The bitcoin\-tx tool is used for creating and modifying bitcoin transactions. .PP diff --git a/doc/man/bitcoin-util.1 b/doc/man/bitcoin-util.1 index a103bf40a5bd..e0cc27e2d772 100644 --- a/doc/man/bitcoin-util.1 +++ b/doc/man/bitcoin-util.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-UTIL "1" "September 2025" "bitcoin-util v29.1.0" "User Commands" +.TH BITCOIN-UTIL "1" "September 2025" "bitcoin-util v29.2.0rc1" "User Commands" .SH NAME -bitcoin-util \- manual page for bitcoin-util v29.1.0 +bitcoin-util \- manual page for bitcoin-util v29.2.0rc1 .SH SYNOPSIS .B bitcoin-util [\fI\,options\/\fR] [\fI\,command\/\fR] @@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.1.0 .B bitcoin-util [\fI\,options\/\fR] \fI\,grind \/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-util utility version v29.1.0 +Bitcoin Core bitcoin\-util utility version v29.2.0rc1 .PP The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below. .SH OPTIONS diff --git a/doc/man/bitcoin-wallet.1 b/doc/man/bitcoin-wallet.1 index b63494dc479b..58bbf2715b3b 100644 --- a/doc/man/bitcoin-wallet.1 +++ b/doc/man/bitcoin-wallet.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-WALLET "1" "September 2025" "bitcoin-wallet v29.1.0" "User Commands" +.TH BITCOIN-WALLET "1" "September 2025" "bitcoin-wallet v29.2.0rc1" "User Commands" .SH NAME -bitcoin-wallet \- manual page for bitcoin-wallet v29.1.0 +bitcoin-wallet \- manual page for bitcoin-wallet v29.2.0rc1 .SH SYNOPSIS .B bitcoin-wallet [\fI\,options\/\fR] \fI\,\/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-wallet utility version v29.1.0 +Bitcoin Core bitcoin\-wallet utility version v29.2.0rc1 .PP bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files. .PP diff --git a/doc/man/bitcoind.1 b/doc/man/bitcoind.1 index b8ee6bab52a1..0846f3e0619e 100644 --- a/doc/man/bitcoind.1 +++ b/doc/man/bitcoind.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIND "1" "September 2025" "bitcoind v29.1.0" "User Commands" +.TH BITCOIND "1" "September 2025" "bitcoind v29.2.0rc1" "User Commands" .SH NAME -bitcoind \- manual page for bitcoind v29.1.0 +bitcoind \- manual page for bitcoind v29.2.0rc1 .SH SYNOPSIS .B bitcoind [\fI\,options\/\fR] .SH DESCRIPTION -Bitcoin Core daemon version v29.1.0 +Bitcoin Core daemon version v29.2.0rc1 .PP The Bitcoin Core daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses. .PP From f63b8e960d5d06cdbbc360aaf781c13fd5aca172 Mon Sep 17 00:00:00 2001 From: will Date: Sat, 26 Jul 2025 09:41:10 +0100 Subject: [PATCH 018/115] ci: add configure environment action Github-Pull: #32989 Rebased-From: b8fcc9fcbcd --- .../actions/configure-environment/action.yml | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .github/actions/configure-environment/action.yml diff --git a/.github/actions/configure-environment/action.yml b/.github/actions/configure-environment/action.yml new file mode 100644 index 000000000000..aae5016bdce9 --- /dev/null +++ b/.github/actions/configure-environment/action.yml @@ -0,0 +1,27 @@ +name: 'Configure environment' +description: 'Configure CI, cache and container name environment variables' +runs: + using: 'composite' + steps: + - name: Set CI and cache directories + shell: bash + run: | + echo "BASE_ROOT_DIR=${{ runner.temp }}" >> "$GITHUB_ENV" + echo "BASE_BUILD_DIR=${{ runner.temp }}/build" >> "$GITHUB_ENV" + echo "CCACHE_DIR=${{ runner.temp }}/ccache_dir" >> $GITHUB_ENV + echo "DEPENDS_DIR=${{ runner.temp }}/depends" >> "$GITHUB_ENV" + echo "BASE_CACHE=${{ runner.temp }}/depends/built" >> $GITHUB_ENV + echo "SOURCES_PATH=${{ runner.temp }}/depends/sources" >> $GITHUB_ENV + echo "PREVIOUS_RELEASES_DIR=${{ runner.temp }}/previous_releases" >> $GITHUB_ENV + + - name: Set cache hashes + shell: bash + run: | + echo "DEPENDS_HASH=$(git ls-tree HEAD depends "ci/test/$FILE_ENV" | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV + echo "PREVIOUS_RELEASES_HASH=$(git ls-tree HEAD test/get_previous_releases.py | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV + + - name: Get container name + shell: bash + run: | + source $FILE_ENV + echo "CONTAINER_NAME=$CONTAINER_NAME" >> "$GITHUB_ENV" From 301aa5d814b620287b65d93399a20a794659cc79 Mon Sep 17 00:00:00 2001 From: will Date: Mon, 16 Jun 2025 20:02:20 +0100 Subject: [PATCH 019/115] ci: add caching actions Github-Pull: #32989 Rebased-From: b232b0fa5e9 Add "Restore" and "Save" caching actions. These actions reduce boilerplate in the main ci.yml configuration file. These actions are implemented so that caches will be saved on `push` only. When a pull request is opened it will cache hit on the caches from the lastest push, or in the case of depends will hit on any matching depends hash, falling back to partial matches. Depends caches are hashed using `$(git ls-tree HEAD depends "ci/test/$FILE_ENV" | sha256sum | cut -d' ' -f1)` and this hash is passed in as an input to the actions. This means we direct cache hit in cases where depends would not be re-built, otherwise falling back to a partial match. Previous releases cache is hashed similarly to depends, but using the test/get_previous_releases.py file. The cirruslabs cache action will fallback transparently to GitHub's cache in the case that the job is not being run on a Cirrus Runner, making these compatible with running on forks (on free GH hardware). --- .github/actions/restore-caches/action.yml | 47 +++++++++++++++++++++++ .github/actions/save-caches/action.yml | 39 +++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 .github/actions/restore-caches/action.yml create mode 100644 .github/actions/save-caches/action.yml diff --git a/.github/actions/restore-caches/action.yml b/.github/actions/restore-caches/action.yml new file mode 100644 index 000000000000..8dc35d4902ed --- /dev/null +++ b/.github/actions/restore-caches/action.yml @@ -0,0 +1,47 @@ +name: 'Restore Caches' +description: 'Restore ccache, depends sources, and built depends caches' +runs: + using: 'composite' + steps: + - name: Restore Ccache cache + id: ccache-cache + uses: cirruslabs/cache/restore@v4 + with: + path: ${{ env.CCACHE_DIR }} + key: ccache-${{ env.CONTAINER_NAME }}-${{ github.run_id }} + restore-keys: | + ccache-${{ env.CONTAINER_NAME }}- + + - name: Restore depends sources cache + id: depends-sources + uses: cirruslabs/cache/restore@v4 + with: + path: ${{ env.SOURCES_PATH }} + key: depends-sources-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }} + restore-keys: | + depends-sources-${{ env.CONTAINER_NAME }}- + + - name: Restore built depends cache + id: depends-built + uses: cirruslabs/cache/restore@v4 + with: + path: ${{ env.BASE_CACHE }} + key: depends-built-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }} + restore-keys: | + depends-built-${{ env.CONTAINER_NAME }}- + + - name: Restore previous releases cache + id: previous-releases + uses: cirruslabs/cache/restore@v4 + with: + path: ${{ env.PREVIOUS_RELEASES_DIR }} + key: previous-releases-${{ env.CONTAINER_NAME }}-${{ env.PREVIOUS_RELEASES_HASH }} + restore-keys: | + previous-releases-${{ env.CONTAINER_NAME }}- + + - name: export cache hits + shell: bash + run: | + echo "depends-sources-cache-hit=${{ steps.depends-sources.outputs.cache-hit }}" >> $GITHUB_ENV + echo "depends-built-cache-hit=${{ steps.depends-built.outputs.cache-hit }}" >> $GITHUB_ENV + echo "previous-releases-cache-hit=${{ steps.previous-releases.outputs.cache-hit }}" >> $GITHUB_ENV diff --git a/.github/actions/save-caches/action.yml b/.github/actions/save-caches/action.yml new file mode 100644 index 000000000000..0e3b31246c61 --- /dev/null +++ b/.github/actions/save-caches/action.yml @@ -0,0 +1,39 @@ +name: 'Save Caches' +description: 'Save ccache, depends sources, and built depends caches' +runs: + using: 'composite' + steps: + - name: debug cache hit inputs + shell: bash + run: | + echo "depends sources direct cache hit to primary key: ${{ env.depends-sources-cache-hit }}" + echo "depends built direct cache hit to primary key: ${{ env.depends-built-cache-hit }}" + echo "previous releases direct cache hit to primary key: ${{ env.previous-releases-cache-hit }}" + + - name: Save Ccache cache + uses: cirruslabs/cache/save@v4 + if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) }} + with: + path: ${{ env.CCACHE_DIR }} + key: ccache-${{ env.CONTAINER_NAME }}-${{ github.run_id }} + + - name: Save depends sources cache + uses: cirruslabs/cache/save@v4 + if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.depends-sources-cache-hit != 'true') }} + with: + path: ${{ env.SOURCES_PATH }} + key: depends-sources-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }} + + - name: Save built depends cache + uses: cirruslabs/cache/save@v4 + if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.depends-built-cache-hit != 'true' )}} + with: + path: ${{ env.BASE_CACHE }} + key: depends-built-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }} + + - name: Save previous releases cache + uses: cirruslabs/cache/save@v4 + if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.previous-releases-cache-hit != 'true' )}} + with: + path: ${{ env.PREVIOUS_RELEASES_DIR }} + key: previous-releases-${{ env.CONTAINER_NAME }}-${{ env.PREVIOUS_RELEASES_HASH }} From 954c1a55e4a6322267071f5bffeb3188a6ac7d59 Mon Sep 17 00:00:00 2001 From: will Date: Mon, 28 Jul 2025 16:05:32 +0100 Subject: [PATCH 020/115] ci: add REPO_USE_CIRRUS_RUNNERS Github-Pull: #32989 Rebased-From: 33ba073df7a If set, Cirrus runners will be used on pushes to, and pull requests against, this repository. Forks can set this if they have their own cirrus runners. --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2cac4eab0b31..3ce17dff3e5d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,6 +20,7 @@ concurrency: env: CI_FAILFAST_TEST_LEAVE_DANGLING: 1 # GHA does not care about dangling processes and setting this variable avoids killing the CI script itself on error MAKEJOBS: '-j10' + REPO_USE_CIRRUS_RUNNERS: 'bitcoin/bitcoin' # Use cirrus runners and cache for this repo, instead of falling back to the slow GHA runners jobs: test-each-commit: From 1faf918a169b76e69a486eb7fc8d88429b77b4b6 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:37:26 +0100 Subject: [PATCH 021/115] ci: add configure-docker action Github-Pull: #32989 Rebased-From: fdf64e55324 Another action to reduce boilerplate in the main ci.yml file. This action will set up a docker builder compatible with caching build layers to a container registry using the `gha` build driver. It will then configure the docker build cache args. --- .github/actions/configure-docker/action.yml | 52 +++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 .github/actions/configure-docker/action.yml diff --git a/.github/actions/configure-docker/action.yml b/.github/actions/configure-docker/action.yml new file mode 100644 index 000000000000..c78df86b6cf1 --- /dev/null +++ b/.github/actions/configure-docker/action.yml @@ -0,0 +1,52 @@ +name: 'Configure Docker' +description: 'Set up Docker build driver and configure build cache args' +inputs: + use-cirrus: + description: 'Use cirrus cache' + required: true +runs: + using: 'composite' + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + # Use host network to allow access to cirrus gha cache running on the host + driver-opts: | + network=host + + # This is required to allow buildkit to access the actions cache + - name: Expose actions cache variables + uses: actions/github-script@v6 + with: + script: | + core.exportVariable('ACTIONS_CACHE_URL', process.env['ACTIONS_CACHE_URL']) + core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env['ACTIONS_RUNTIME_TOKEN']) + + - name: Construct docker build cache args + shell: bash + run: | + # Configure docker build cache backend + # + # On forks the gha cache will work but will use Github's cache backend. + # Docker will check for variables $ACTIONS_CACHE_URL, $ACTIONS_RESULTS_URL and $ACTIONS_RUNTIME_TOKEN + # which are set automatically when running on GitHub infra: https://docs.docker.com/build/cache/backends/gha/#synopsis + + # Use cirrus cache host + if [[ ${{ inputs.use-cirrus }} == 'true' ]]; then + url_args="url=${CIRRUS_CACHE_HOST},url_v2=${CIRRUS_CACHE_HOST}" + else + url_args="" + fi + + # Always optimistically --cache‑from in case a cache blob exists + args=(--cache-from "type=gha${url_args:+,${url_args}},scope=${CONTAINER_NAME}") + + # If this is a push to the default branch, also add --cache‑to to save the cache + if [[ ${{ github.event_name }} == "push" && ${{ github.ref_name }} == ${{ github.event.repository.default_branch }} ]]; then + args+=(--cache-to "type=gha${url_args:+,${url_args}},mode=max,ignore-error=true,scope=${CONTAINER_NAME}") + fi + + # Always `--load` into docker images (needed when using the `docker-container` build driver). + args+=(--load) + + echo "DOCKER_BUILD_CACHE_ARG=${args[*]}" >> $GITHUB_ENV From f3089fb2cfdba533fba1298e909628e5fe7dabb9 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 022/115] ci: use buildx in ci Github-Pull: #32989 Rebased-From: 94a09325475 Using buildx is required to properly load the correct driver, for use with registry caching. Neither build, nor BUILDKIT=1 currently do this properly. Use of `docker buildx build` is compatible with podman. --- ci/test/02_run_container.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ci/test/02_run_container.sh b/ci/test/02_run_container.sh index 8351fd4e02ad..087b4c67809c 100755 --- a/ci/test/02_run_container.sh +++ b/ci/test/02_run_container.sh @@ -46,8 +46,10 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then DOCKER_BUILD_CACHE_ARG="--cache-from type=local,src=${DOCKER_BUILD_CACHE_OLD_DIR} --cache-to type=local,dest=${DOCKER_BUILD_CACHE_NEW_DIR},mode=max" fi + # Use buildx unconditionally + # Using buildx is required to properly load the correct driver, for use with registry caching. Neither build, nor BUILDKIT=1 currently do this properly # shellcheck disable=SC2086 - DOCKER_BUILDKIT=1 docker build \ + docker buildx build \ --file "${BASE_READ_ONLY_DIR}/ci/test_imagefile" \ --build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \ --build-arg "FILE_ENV=${FILE_ENV}" \ From 0a649d07c994b1a6957131c8bb3a1d2e8d53e559 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 023/115] ci: use docker build cache arg directly Github-Pull: #32989 Rebased-From: 18f6be09d02 Reverts: e87429a2d0f23eb59526d335844fa5ff5b50b21f This was added in PR #31545 with the intention that self-hosted runners might use it to save build cache. As we are not using hosted runners with a registry build cache, the bulk of this commit can be reverted, simply using the value of $DOCKER_BUILD_CACHE_ARG in the script. link: https://github.com/bitcoin/bitcoin/pull/31545 --- ci/test/02_run_container.sh | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/ci/test/02_run_container.sh b/ci/test/02_run_container.sh index 087b4c67809c..2031dbd85a55 100755 --- a/ci/test/02_run_container.sh +++ b/ci/test/02_run_container.sh @@ -23,29 +23,6 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then fi echo "Creating $CI_IMAGE_NAME_TAG container to run in" - DOCKER_BUILD_CACHE_ARG="" - DOCKER_BUILD_CACHE_TEMPDIR="" - DOCKER_BUILD_CACHE_OLD_DIR="" - DOCKER_BUILD_CACHE_NEW_DIR="" - # If set, use an `docker build` cache directory on the CI host - # to cache docker image layers for the CI container image. - # This cache can be multiple GB in size. Prefixed with DANGER - # as setting it removes (old cache) files from the host. - if [ "$DANGER_DOCKER_BUILD_CACHE_HOST_DIR" ]; then - # Directory where the current cache for this run could be. If not existing - # or empty, "docker build" will warn, but treat it as cache-miss and continue. - DOCKER_BUILD_CACHE_OLD_DIR="${DANGER_DOCKER_BUILD_CACHE_HOST_DIR}/${CONTAINER_NAME}" - # Temporary directory for a newly created cache. We can't write the new - # cache into OLD_DIR directly, as old cache layers would not be removed. - # The NEW_DIR contents are moved to OLD_DIR after OLD_DIR has been cleared. - # This happens after `docker build`. If a task fails or is aborted, the - # DOCKER_BUILD_CACHE_TEMPDIR might be retained on the host. If the host isn't - # ephemeral, it has to take care of cleaning old TEMPDIR's up. - DOCKER_BUILD_CACHE_TEMPDIR="$(mktemp --directory ci-docker-build-cache-XXXXXXXXXX)" - DOCKER_BUILD_CACHE_NEW_DIR="${DOCKER_BUILD_CACHE_TEMPDIR}/${CONTAINER_NAME}" - DOCKER_BUILD_CACHE_ARG="--cache-from type=local,src=${DOCKER_BUILD_CACHE_OLD_DIR} --cache-to type=local,dest=${DOCKER_BUILD_CACHE_NEW_DIR},mode=max" - fi - # Use buildx unconditionally # Using buildx is required to properly load the correct driver, for use with registry caching. Neither build, nor BUILDKIT=1 currently do this properly # shellcheck disable=SC2086 @@ -60,15 +37,6 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then $DOCKER_BUILD_CACHE_ARG \ "${BASE_READ_ONLY_DIR}" - if [ "$DANGER_DOCKER_BUILD_CACHE_HOST_DIR" ]; then - if [ -e "${DOCKER_BUILD_CACHE_NEW_DIR}/index.json" ]; then - echo "Removing the existing docker build cache in ${DOCKER_BUILD_CACHE_OLD_DIR}" - rm -rf "${DOCKER_BUILD_CACHE_OLD_DIR}" - echo "Moving the contents of ${DOCKER_BUILD_CACHE_NEW_DIR} to ${DOCKER_BUILD_CACHE_OLD_DIR}" - mv "${DOCKER_BUILD_CACHE_NEW_DIR}" "${DOCKER_BUILD_CACHE_OLD_DIR}" - fi - fi - docker volume create "${CONTAINER_NAME}_ccache" || true docker volume create "${CONTAINER_NAME}_depends" || true docker volume create "${CONTAINER_NAME}_depends_sources" || true From af086431e86c82a5e40b05270f39c70cfe413c7b Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 024/115] ci: have base install run in right dir Github-Pull: #32989 Rebased-From: 9c2b96e0d03 This sets the build dir at build time so that Apple SDK gets installed in the correct/expected location for the runtime to find it. Co-authored-by: Max Edwards --- ci/test/02_run_container.sh | 1 + ci/test_imagefile | 3 +++ 2 files changed, 4 insertions(+) diff --git a/ci/test/02_run_container.sh b/ci/test/02_run_container.sh index 2031dbd85a55..131b3c614810 100755 --- a/ci/test/02_run_container.sh +++ b/ci/test/02_run_container.sh @@ -30,6 +30,7 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then --file "${BASE_READ_ONLY_DIR}/ci/test_imagefile" \ --build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \ --build-arg "FILE_ENV=${FILE_ENV}" \ + --build-arg "BASE_ROOT_DIR=${BASE_ROOT_DIR}" \ $MAYBE_CPUSET \ --platform="${CI_IMAGE_PLATFORM}" \ --label="${CI_IMAGE_LABEL}" \ diff --git a/ci/test_imagefile b/ci/test_imagefile index f8b5eea1c88a..224141b138f5 100644 --- a/ci/test_imagefile +++ b/ci/test_imagefile @@ -10,6 +10,9 @@ FROM ${CI_IMAGE_NAME_TAG} ARG FILE_ENV ENV FILE_ENV=${FILE_ENV} +ARG BASE_ROOT_DIR +ENV BASE_ROOT_DIR=${BASE_ROOT_DIR} + COPY ./ci/retry/retry /usr/bin/retry COPY ./ci/test/00_setup_env.sh ./${FILE_ENV} ./ci/test/01_base_install.sh /ci_container_base/ci/test/ From f9f3e8b68616dfb9e18082d191b87a457c4100da Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 025/115] ci: add Cirrus cache host Github-Pull: #32989 Rebased-From: 020069e6b71 Whilst the action cirruslabs/actions/cache will automatically set this host, the docker `gha` build cache backend will not be aware of it. Set the value here, which will later be used in the docker build args to enable docker build cache on the cirrus cache. --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3ce17dff3e5d..cf4c02e1d7e7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,6 +19,7 @@ concurrency: env: CI_FAILFAST_TEST_LEAVE_DANGLING: 1 # GHA does not care about dangling processes and setting this variable avoids killing the CI script itself on error + CIRRUS_CACHE_HOST: http://127.0.0.1:12321/ # When using Cirrus Runners this host can be used by the docker `gha` build cache type. MAKEJOBS: '-j10' REPO_USE_CIRRUS_RUNNERS: 'bitcoin/bitcoin' # Use cirrus runners and cache for this repo, instead of falling back to the slow GHA runners From 849993377d76c64cc5ea14336e6523434608deb3 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 026/115] ci: add job to determine runner type Github-Pull: #32989 Rebased-From: cc1735d7771 To remove multiple occurances of the respository name, against which we compare `${{ github.repository }}` to check if we should use Cirrus Runners, introduce a helper job which can check a single environment variable and output this as an input to subsequent jobs. Forks can maintain a trivial patch of their repo name against the `REPO_USE_CIRRUS_RUNNERS` variable in ci.yml if they have Cirrus Runners of their own, which will then enable cache actions and docker build cache to use Cirrus Cache. It's not possible to use `${{ env.USE_CIRRUS_RUNNERS }}` in the `runs-on:` directive as the context is not supported by GitHub. If it was, this job would no longer be necessary. --- .github/workflows/ci.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cf4c02e1d7e7..4bd496d7a849 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,6 +24,22 @@ env: REPO_USE_CIRRUS_RUNNERS: 'bitcoin/bitcoin' # Use cirrus runners and cache for this repo, instead of falling back to the slow GHA runners jobs: + runners: + name: 'determine runners' + runs-on: ubuntu-latest + outputs: + use-cirrus-runners: ${{ steps.runners.outputs.use-cirrus-runners }} + steps: + - id: runners + run: | + if [[ "${REPO_USE_CIRRUS_RUNNERS}" == "${{ github.repository }}" ]]; then + echo "use-cirrus-runners=true" >> "$GITHUB_OUTPUT" + echo "::notice title=Runner Selection::Using Cirrus Runners" + else + echo "use-cirrus-runners=false" >> "$GITHUB_OUTPUT" + echo "::notice title=Runner Selection::Using GitHub-hosted runners" + fi + test-each-commit: name: 'test each commit' runs-on: ubuntu-24.04 From 82c60a31515a2004976faaa26f8caad9e2bb022d Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 027/115] ci: port arm 32-bit job Github-Pull: #32989 Rebased-From: f253031cb8e Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 43 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4bd496d7a849..297f8c0eec52 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -324,3 +324,46 @@ jobs: path: ${{ env.CCACHE_DIR }} # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache key: ${{ github.job }}-ccache-${{ github.run_id }} + + ci-matrix: + name: ${{ matrix.name }} + needs: runners + runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && matrix.cirrus-runner || matrix.fallback-runner }} + if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }} + timeout-minutes: ${{ matrix.timeout-minutes }} + + env: + DANGER_CI_ON_HOST_FOLDERS: 1 + FILE_ENV: ${{ matrix.file-env }} + + strategy: + fail-fast: false + matrix: + include: + - name: '32 bit ARM, unit tests, no functional tests' + cirrus-runner: 'ubuntu-24.04-arm' # Cirrus' Arm runners are Apple (with virtual Linux aarch64), which doesn't support 32-bit mode + fallback-runner: 'ubuntu-24.04-arm' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_arm.sh' + + steps: + - name: Checkout + uses: actions/checkout@v5 + + - name: Configure environment + uses: ./.github/actions/configure-environment + + - name: Restore caches + id: restore-cache + uses: ./.github/actions/restore-caches + + - name: Configure Docker + uses: ./.github/actions/configure-docker + with: + use-cirrus: ${{ needs.runners.outputs.use-cirrus-runners }} + + - name: CI script + run: ./ci/test_run_all.sh + + - name: Save caches + uses: ./.github/actions/save-caches From 894a3cbe42bf900788b858faf59b3d97412e7d47 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 028/115] ci: update windows-cross job Github-Pull: #32989 Rebased-From: 04e7bfbceb0 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 297f8c0eec52..d88bb462e3cc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -346,6 +346,11 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_arm.sh' + - name: 'win64 Cross' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_win64.sh' steps: - name: Checkout uses: actions/checkout@v5 From 819ee09af31687dedd38de68aef98b0ecc19608f Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 029/115] ci: update asan-lsan-ubsan Github-Pull: #32989 Rebased-From: 884251441bb Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 55 ++++++++++------------------------------ 1 file changed, 13 insertions(+), 42 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d88bb462e3cc..0dab0e8a5f76 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -283,48 +283,6 @@ jobs: run: | py -3 test\fuzz\test_runner.py --par %NUMBER_OF_PROCESSORS% --loglevel DEBUG %RUNNER_TEMP%\qa-assets\fuzz_corpora - asan-lsan-ubsan-integer-no-depends-usdt: - name: 'ASan + LSan + UBSan + integer, no depends, USDT' - runs-on: ubuntu-24.04 # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools - if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }} - timeout-minutes: 120 - env: - FILE_ENV: "./ci/test/00_setup_env_native_asan.sh" - DANGER_CI_ON_HOST_FOLDERS: 1 - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Set CI directories - run: | - echo "CCACHE_DIR=${{ runner.temp }}/ccache_dir" >> "$GITHUB_ENV" - echo "BASE_ROOT_DIR=${{ runner.temp }}" >> "$GITHUB_ENV" - echo "BASE_BUILD_DIR=${{ runner.temp }}/build-asan" >> "$GITHUB_ENV" - - - name: Restore Ccache cache - id: ccache-cache - uses: actions/cache/restore@v4 - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ github.job }}-ccache-${{ github.run_id }} - restore-keys: ${{ github.job }}-ccache- - - - name: Enable bpfcc script - # In the image build step, no external environment variables are available, - # so any settings will need to be written to the settings env file: - run: sed -i "s|\${INSTALL_BCC_TRACING_TOOLS}|true|g" ./ci/test/00_setup_env_native_asan.sh - - - name: CI script - run: ./ci/test_run_all.sh - - - name: Save Ccache cache - uses: actions/cache/save@v4 - if: github.event_name != 'pull_request' && steps.ccache-cache.outputs.cache-hit != 'true' - with: - path: ${{ env.CCACHE_DIR }} - # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache - key: ${{ github.job }}-ccache-${{ github.run_id }} - ci-matrix: name: ${{ matrix.name }} needs: runners @@ -351,6 +309,13 @@ jobs: fallback-runner: 'ubuntu-24.04' timeout-minutes: 120 file-env: './ci/test/00_setup_env_win64.sh' + + - name: 'ASan + LSan + UBSan + integer, no depends, USDT' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_asan.sh' + steps: - name: Checkout uses: actions/checkout@v5 @@ -367,6 +332,12 @@ jobs: with: use-cirrus: ${{ needs.runners.outputs.use-cirrus-runners }} + - name: Enable bpfcc script + if: ${{ env.CONTAINER_NAME == 'ci_native_asan' }} + # In the image build step, no external environment variables are available, + # so any settings will need to be written to the settings env file: + run: sed -i "s|\${INSTALL_BCC_TRACING_TOOLS}|true|g" ./ci/test/00_setup_env_native_asan.sh + - name: CI script run: ./ci/test_run_all.sh From a91567a980adb93a05f12ec63b628ee3faaa4681 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 030/115] ci: force reinstall of kernel headers in asan Github-Pull: #32989 Rebased-From: 2c990d84a3d When using hosted runners in combination with cached docker images, there is the possibility that the host runner image is updated, rendering the linux-headers package (stored in the cached docker image) incompatible. Fix this by doing a re-install of the headers package in 03_test_script.sh. If the underlying runner kernel has not changed thie has no effect, but prevents the job from failing if it has. --- ci/test/03_test_script.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ci/test/03_test_script.sh b/ci/test/03_test_script.sh index b218e7b9d1a1..c2ef2291bd55 100755 --- a/ci/test/03_test_script.sh +++ b/ci/test/03_test_script.sh @@ -24,6 +24,14 @@ fi echo "Free disk space:" df -h +# We force an install of linux-headers again here via $PACKAGES to fix any +# kernel mismatch between a cached docker image and the underlying host. +# This can happen occasionally on hosted runners if the runner image is updated. +if [[ "$CONTAINER_NAME" == "ci_native_asan" ]]; then + $CI_RETRY_EXE apt-get update + ${CI_RETRY_EXE} bash -c "apt-get install --no-install-recommends --no-upgrade -y $PACKAGES" +fi + # What host to compile for. See also ./depends/README.md # Tests that need cross-compilation export the appropriate HOST. # Tests that run natively guess the host From 835b5b8bb18a318026ada74d3c63b89d6aab742b Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 031/115] ci: port mac-cross-gui-notests Github-Pull: #32989 Rebased-From: 9c2514de534 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0dab0e8a5f76..941ab095095f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -316,6 +316,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_asan.sh' + - name: 'macOS-cross, gui, no tests' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_mac_cross.sh' + steps: - name: Checkout uses: actions/checkout@v5 From e826c3daa55d3b4cbd0e2c13765f9158eb225bfd Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 032/115] ci: port nowallet-libbitcoinkernel Github-Pull: #32989 Rebased-From: 2a00b12d73b Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 941ab095095f..37556d42c973 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -322,6 +322,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_mac_cross.sh' + - name: 'No wallet, libbitcoinkernel' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh' + steps: - name: Checkout uses: actions/checkout@v5 From 544f902b2a9cf14cd0445f27802cd11c5c945b00 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 033/115] ci: port i686-multiprocess-DEBUG Github-Pull: #32989 Rebased-From: f2068f26c12 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 37556d42c973..5b15fdb26529 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -328,6 +328,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh' + - name: 'i686, multiprocess, DEBUG' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_i686_multiprocess.sh' + steps: - name: Checkout uses: actions/checkout@v5 From 85ec6c6882b40adb35c9cb88d37d22e3e58eaa68 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 034/115] ci: port fuzzer-address-undefined-integer-nodepends Github-Pull: #32989 Rebased-From: 341196d75c3 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5b15fdb26529..1eabc0ec8a91 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -334,6 +334,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_i686_multiprocess.sh' + - name: 'fuzzer,address,undefined,integer, no depends' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 240 + file-env: './ci/test/00_setup_env_native_fuzz.sh' + steps: - name: Checkout uses: actions/checkout@v5 From 5057b9a6ffd360dbd96ad8585e10852961392361 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 035/115] ci: port previous-releases-depends-debug Github-Pull: #32989 Rebased-From: 58e38c3a042 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1eabc0ec8a91..ca8748fd80a9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -340,6 +340,12 @@ jobs: timeout-minutes: 240 file-env: './ci/test/00_setup_env_native_fuzz.sh' + - name: 'previous releases, depends DEBUG' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_previous_releases.sh' + steps: - name: Checkout uses: actions/checkout@v5 From b4286cf354a8111ed54fb63547dc1a7be7257b92 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 036/115] ci: port centos-depends-gui Github-Pull: #32989 Rebased-From: 549074bc643 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ca8748fd80a9..69fe9f0f978e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -346,6 +346,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_previous_releases.sh' + - name: 'CentOS, depends, gui' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_centos.sh' + steps: - name: Checkout uses: actions/checkout@v5 From 3b2dcc8b9aea8706a25690a0cd08ba60896d3542 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 037/115] ci: port tidy Github-Pull: #32989 Rebased-From: bf7d5364527 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 69fe9f0f978e..65903b40fd0e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -352,6 +352,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_centos.sh' + - name: 'tidy' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_tidy.sh' + steps: - name: Checkout uses: actions/checkout@v5 From 643385b22d9908f7665bf2addc734ba0323967b0 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 038/115] ci: port tsan-depends Github-Pull: #32989 Rebased-From: 9bbae61e3b4 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 65903b40fd0e..95d5686f2f2c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -358,6 +358,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_tidy.sh' + - name: 'TSan, depends, no gui' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_tsan.sh' + steps: - name: Checkout uses: actions/checkout@v5 @@ -380,6 +386,11 @@ jobs: # so any settings will need to be written to the settings env file: run: sed -i "s|\${INSTALL_BCC_TRACING_TOOLS}|true|g" ./ci/test/00_setup_env_native_asan.sh + - name: Set mmap_rnd_bits + if: ${{ env.CONTAINER_NAME == 'ci_native_tsan' }} + # Prevents crashes due to high ASLR entropy + run: sudo sysctl -w vm.mmap_rnd_bits=28 + - name: CI script run: ./ci/test_run_all.sh From 0f0378fe3c590e835aa30be092f37109ddd63b86 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 039/115] ci: port msan-depends Github-Pull: #32989 Rebased-From: d290a8e6eab Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 95d5686f2f2c..607d96f96639 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -364,6 +364,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_tsan.sh' + - name: 'MSan, depends' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_msan.sh' + steps: - name: Checkout uses: actions/checkout@v5 @@ -387,7 +393,7 @@ jobs: run: sed -i "s|\${INSTALL_BCC_TRACING_TOOLS}|true|g" ./ci/test/00_setup_env_native_asan.sh - name: Set mmap_rnd_bits - if: ${{ env.CONTAINER_NAME == 'ci_native_tsan' }} + if: ${{ env.CONTAINER_NAME == 'ci_native_tsan' || env.CONTAINER_NAME == 'ci_native_msan' }} # Prevents crashes due to high ASLR entropy run: sudo sysctl -w vm.mmap_rnd_bits=28 From 06424fb004f916b06e4f0ab90fd6f7623049a360 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 040/115] ci: port lint Github-Pull: #32989 Rebased-From: bc41848d00f Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 607d96f96639..6d9f920e68b1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -402,3 +402,32 @@ jobs: - name: Save caches uses: ./.github/actions/save-caches + + lint: + name: 'lint' + needs: runners + runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' || 'ubuntu-24.04' }} + if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }} + timeout-minutes: 20 + env: + CONTAINER_NAME: "bitcoin-linter" + steps: + - name: Checkout + uses: actions/checkout@v5 + with: + fetch-depth: 0 + + - name: Configure Docker + uses: ./.github/actions/configure-docker + with: + use-cirrus: ${{ needs.runners.outputs.use-cirrus-runners }} + + - name: CI script + run: | + set -o xtrace + docker buildx build -t "$CONTAINER_NAME" $DOCKER_BUILD_CACHE_ARG --file "./ci/lint_imagefile" . + CIRRUS_PR_FLAG="" + if [ "${{ github.event_name }}" = "pull_request" ]; then + CIRRUS_PR_FLAG="-e CIRRUS_PR=1" + fi + docker run --rm $CIRRUS_PR_FLAG -v "$(pwd)":/bitcoin "$CONTAINER_NAME" From a08c3cc51c6875ba67f25c85143fdb61a8ba3e03 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 041/115] ci: remove .cirrus.yml Github-Pull: #32989 Rebased-From: 4393ffdd837 Removed as unused. --- .cirrus.yml | 214 ---------------------------------------------------- 1 file changed, 214 deletions(-) delete mode 100644 .cirrus.yml diff --git a/.cirrus.yml b/.cirrus.yml deleted file mode 100644 index 6e70dc15fecc..000000000000 --- a/.cirrus.yml +++ /dev/null @@ -1,214 +0,0 @@ -env: # Global defaults - CIRRUS_CLONE_DEPTH: 1 - CIRRUS_LOG_TIMESTAMP: true - MAKEJOBS: "-j10" - TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache - CI_FAILFAST_TEST_LEAVE_DANGLING: "1" # Cirrus CI does not care about dangling processes and setting this variable avoids killing the CI script itself on error - -# A self-hosted machine(s) can be used via Cirrus CI. It can be configured with -# multiple users to run tasks in parallel. No sudo permission is required. -# -# https://cirrus-ci.org/guide/persistent-workers/ -# -# Generally, a persistent worker must run Ubuntu 23.04+ or Debian 12+. -# -# The following specific types should exist, with the following requirements: -# - small: For an x86_64 machine, with at least 2 vCPUs and 8 GB of memory. -# - medium: For an x86_64 machine, with at least 4 vCPUs and 16 GB of memory. -# - arm64: For an aarch64 machine, with at least 2 vCPUs and 8 GB of memory. -# -# CI jobs for the latter configuration can be run on x86_64 hardware -# by installing qemu-user-static, which works out of the box with -# podman or docker. Background: https://stackoverflow.com/a/72890225/313633 -# -# The above machine types are matched to each task by their label. Refer to the -# Cirrus CI docs for more details. -# -# When a contributor maintains a fork of the repo, any pull request they make -# to their own fork, or to the main repository, will trigger two CI runs: -# one for the branch push and one for the pull request. -# This can be avoided by setting SKIP_BRANCH_PUSH=true as a custom env variable -# in Cirrus repository settings, accessible from -# https://cirrus-ci.com/github/my-organization/my-repository -# -# On machines that are persisted between CI jobs, RESTART_CI_DOCKER_BEFORE_RUN=1 -# ensures that previous containers and artifacts are cleared before each run. -# This requires installing Podman instead of Docker. -# -# Futhermore: -# - podman-docker-4.1+ is required due to the bugfix in 4.1 -# (https://github.com/bitcoin/bitcoin/pull/21652#issuecomment-1657098200) -# - The ./ci/ dependencies (with cirrus-cli) should be installed. One-liner example -# for a single user setup with sudo permission: -# -# ``` -# apt update && apt install git screen python3 bash podman-docker uidmap slirp4netns curl -y && curl -L -o cirrus "https://github.com/cirruslabs/cirrus-cli/releases/latest/download/cirrus-linux-$(dpkg --print-architecture)" && mv cirrus /usr/local/bin/cirrus && chmod +x /usr/local/bin/cirrus -# ``` -# -# - There are no strict requirements on the hardware. Having fewer CPU threads -# than recommended merely causes the CI script to run slower. -# To avoid rare and intermittent OOM due to short memory usage spikes, -# it is recommended to add (and persist) swap: -# -# ``` -# fallocate -l 16G /swapfile_ci && chmod 600 /swapfile_ci && mkswap /swapfile_ci && swapon /swapfile_ci && ( echo '/swapfile_ci none swap sw 0 0' | tee -a /etc/fstab ) -# ``` -# -# - To register the persistent worker, open a `screen` session and run: -# -# ``` -# RESTART_CI_DOCKER_BEFORE_RUN=1 screen cirrus worker run --labels type=todo_fill_in_type --token todo_fill_in_token -# ``` - -# https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks -filter_template: &FILTER_TEMPLATE - # Allow forks to specify SKIP_BRANCH_PUSH=true and skip CI runs when a branch is pushed, - # but still run CI when a PR is created. - # https://cirrus-ci.org/guide/writing-tasks/#conditional-task-execution - skip: $SKIP_BRANCH_PUSH == "true" && $CIRRUS_PR == "" - stateful: false # https://cirrus-ci.org/guide/writing-tasks/#stateful-tasks - -base_template: &BASE_TEMPLATE - << : *FILTER_TEMPLATE - merge_base_script: - # Require git (used in fingerprint_script). - - git --version || ( apt-get update && apt-get install -y git ) - - if [ "$CIRRUS_PR" = "" ]; then exit 0; fi - - git fetch --depth=1 $CIRRUS_REPO_CLONE_URL "pull/${CIRRUS_PR}/merge" - - git checkout FETCH_HEAD # Use merged changes to detect silent merge conflicts - # Also, the merge commit is used to lint COMMIT_RANGE="HEAD~..HEAD" - -main_template: &MAIN_TEMPLATE - timeout_in: 120m # https://cirrus-ci.org/faq/#instance-timed-out - ci_script: - - ./ci/test_run_all.sh - -global_task_template: &GLOBAL_TASK_TEMPLATE - << : *BASE_TEMPLATE - << : *MAIN_TEMPLATE - -compute_credits_template: &CREDITS_TEMPLATE - # https://cirrus-ci.org/pricing/#compute-credits - # Only use credits for pull requests to the main repo - use_compute_credits: $CIRRUS_REPO_FULL_NAME == 'bitcoin/bitcoin' && $CIRRUS_PR != "" - -task: - name: 'lint' - << : *BASE_TEMPLATE - container: - image: debian:bookworm - cpu: 1 - memory: 1G - # For faster CI feedback, immediately schedule the linters - << : *CREDITS_TEMPLATE - test_runner_cache: - folder: "/lint_test_runner" - fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-parse HEAD:test/lint/test_runner) - python_cache: - folder: "/python_build" - fingerprint_script: cat .python-version /etc/os-release - unshallow_script: - - git fetch --unshallow --no-tags - lint_script: - - ./ci/lint_run_all.sh - -task: - name: 'tidy' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: medium - env: - FILE_ENV: "./ci/test/00_setup_env_native_tidy.sh" - -task: - name: 'ARM, unit tests, no functional tests' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: arm64 # Use arm64 worker to sidestep qemu and avoid a slow CI: https://github.com/bitcoin/bitcoin/pull/28087#issuecomment-1649399453 - env: - FILE_ENV: "./ci/test/00_setup_env_arm.sh" - -task: - name: 'Win64-cross' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_win64.sh" - -task: - name: 'CentOS, depends, gui' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_native_centos.sh" - -task: - name: 'previous releases, depends DEBUG' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_native_previous_releases.sh" - -task: - name: 'TSan, depends, no gui' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: medium - env: - FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh" - -task: - name: 'MSan, depends' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - timeout_in: 300m # Use longer timeout for the *rare* case where a full build (llvm + msan + depends + ...) needs to be done. - env: - FILE_ENV: "./ci/test/00_setup_env_native_msan.sh" - -task: - name: 'fuzzer,address,undefined,integer, no depends' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: medium - timeout_in: 240m # larger timeout, due to the high CPU demand - env: - FILE_ENV: "./ci/test/00_setup_env_native_fuzz.sh" - -task: - name: 'multiprocess, i686, DEBUG' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: medium - env: - FILE_ENV: "./ci/test/00_setup_env_i686_multiprocess.sh" - -task: - name: 'no wallet, libbitcoinkernel' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh" - -task: - name: 'macOS-cross, gui, no tests' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_mac_cross.sh" From c7f290b826fc4928c6e1e0a9649da85d4752717b Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 042/115] ci: dynamically match makejobs with cores Github-Pull: #32989 Rebased-From: 3f339e99e00 Previously jobs were running on a large multi-core server where 10 jobs as default made sense (or may even have been on the low side). Using hosted runners with fixed (and lower) numbers of vCPUs we should adapt compilation to match the number of cpus we have dynamically. This is cross-platform compatible with macos and linux only. --- .github/workflows/ci.yml | 1 - ci/test/00_setup_env.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6d9f920e68b1..60cb41b8f98e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,7 +20,6 @@ concurrency: env: CI_FAILFAST_TEST_LEAVE_DANGLING: 1 # GHA does not care about dangling processes and setting this variable avoids killing the CI script itself on error CIRRUS_CACHE_HOST: http://127.0.0.1:12321/ # When using Cirrus Runners this host can be used by the docker `gha` build cache type. - MAKEJOBS: '-j10' REPO_USE_CIRRUS_RUNNERS: 'bitcoin/bitcoin' # Use cirrus runners and cache for this repo, instead of falling back to the slow GHA runners jobs: diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh index 9f794c252358..8a5cd4b24357 100755 --- a/ci/test/00_setup_env.sh +++ b/ci/test/00_setup_env.sh @@ -35,7 +35,7 @@ fi echo "Fallback to default values in env (if not yet set)" # The number of parallel jobs to pass down to make and test_runner.py -export MAKEJOBS=${MAKEJOBS:--j4} +export MAKEJOBS=${MAKEJOBS:--j$(if command -v nproc > /dev/null 2>&1; then nproc; else sysctl -n hw.logicalcpu; fi)} # Whether to prefer BusyBox over GNU utilities export USE_BUSY_BOX=${USE_BUSY_BOX:-false} From 4339787379d2d246846e60f10ab9582805a6845e Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 043/115] doc: Detail configuration of hosted CI runners Github-Pull: #32989 Rebased-From: f4272844833dd660c2b9db587856baa408889302 --- ci/README.md | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/ci/README.md b/ci/README.md index 377aae7fa0b1..81e048ce687c 100644 --- a/ci/README.md +++ b/ci/README.md @@ -1,8 +1,8 @@ -## CI Scripts +# CI Scripts This directory contains scripts for each build step in each build stage. -### Running a Stage Locally +## Running a Stage Locally Be aware that the tests will be built and run in-place, so please run at your own risk. If the repository is not a fresh git clone, you might have to clean files from previous builds or test runs first. @@ -27,7 +27,7 @@ with a specific configuration, env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh' ``` -### Configurations +## Configurations The test files (`FILE_ENV`) are constructed to test a wide range of configurations, rather than a single pass/fail. This helps to catch build @@ -49,8 +49,32 @@ env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'MAKEJOBS="-j1" FILE_ENV=" The files starting with `0n` (`n` greater than 0) are the scripts that are run in order. -### Cache +## Cache In order to avoid rebuilding all dependencies for each build, the binaries are cached and reused when possible. Changes in the dependency-generator will trigger cache-invalidation and rebuilds as necessary. + +## Configuring a repository for CI + +### Primary repository + +To configure the primary repository, follow these steps: + +1. Register with [Cirrus Runners](https://cirrus-runners.app/) and purchase runners. +2. Install the Cirrus Runners GitHub app against the GitHub organization. +3. Enable organisation-level runners to be used in public repositories: + 1. `Org settings -> Actions -> Runner Groups -> Default -> Allow public repos` +4. Permit the following actions to run: + 1. cirruslabs/cache/restore@\* + 1. cirruslabs/cache/save@\* + 1. docker/setup-buildx-action@\* + 1. actions/github-script@\* + +### Forked repositories + +When used in a fork the CI will run on GitHub's free hosted runners by default. +In this case, due to GitHub's 10GB-per-repo cache size limitations caches will be frequently evicted and missed, but the workflows will run (slowly). + +It is also possible to use your own Cirrus Runners in your own fork with an appropriate patch to the `REPO_USE_CIRRUS_RUNNERS` variable in ../.github/workflows/ci.yml +NB that Cirrus Runners only work at an organisation level, therefore in order to use your own Cirrus Runners, *the fork must be within your own organisation*. From 773e4cda9446a03c0b23468ef8a9e38496b4566b Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 044/115] ci: add ccache hit-rate warning when < 75% Github-Pull: #32989 Rebased-From: dd1c5903e8d Print the ccache hit-rate for the job using a GitHub annotation if it was below 75%. --- ci/test/00_setup_env_mac_native.sh | 1 + ci/test/00_setup_env_mac_native_fuzz.sh | 1 + ci/test/03_test_script.sh | 6 ++++++ 3 files changed, 8 insertions(+) diff --git a/ci/test/00_setup_env_mac_native.sh b/ci/test/00_setup_env_mac_native.sh index e01a56895bfb..9de51f93291e 100755 --- a/ci/test/00_setup_env_mac_native.sh +++ b/ci/test/00_setup_env_mac_native.sh @@ -8,6 +8,7 @@ export LC_ALL=C.UTF-8 # Homebrew's python@3.12 is marked as externally managed (PEP 668). # Therefore, `--break-system-packages` is needed. +export CONTAINER_NAME="ci_mac_native" # macos does not use a container, but the env var is needed for logging export PIP_PACKAGES="--break-system-packages zmq" export GOAL="install" export CMAKE_GENERATOR="Ninja" diff --git a/ci/test/00_setup_env_mac_native_fuzz.sh b/ci/test/00_setup_env_mac_native_fuzz.sh index cacf2423ac30..22b6bc97ab3e 100755 --- a/ci/test/00_setup_env_mac_native_fuzz.sh +++ b/ci/test/00_setup_env_mac_native_fuzz.sh @@ -6,6 +6,7 @@ export LC_ALL=C.UTF-8 +export CONTAINER_NAME="ci_mac_native_fuzz" # macos does not use a container, but the env var is needed for logging export CMAKE_GENERATOR="Ninja" export BITCOIN_CONFIG="-DBUILD_FOR_FUZZING=ON" export CI_OS_NAME="macos" diff --git a/ci/test/03_test_script.sh b/ci/test/03_test_script.sh index c2ef2291bd55..36f8b9dfc2d1 100755 --- a/ci/test/03_test_script.sh +++ b/ci/test/03_test_script.sh @@ -137,6 +137,12 @@ bash -c "cmake -S $BASE_ROOT_DIR $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( (cat $ bash -c "cmake --build . $MAKEJOBS --target all $GOAL" || ( echo "Build failure. Verbose build follows." && cmake --build . --target all "$GOAL" --verbose ; false ) bash -c "${PRINT_CCACHE_STATISTICS}" +if [ "$CI" = "true" ]; then + hit_rate=$(ccache -s | grep "Hits:" | head -1 | sed 's/.*(\(.*\)%).*/\1/') + if [ "${hit_rate%.*}" -lt 75 ]; then + echo "::notice title=low ccache hitrate::Ccache hit-rate in $CONTAINER_NAME was $hit_rate%" + fi +fi du -sh "${DEPENDS_DIR}"/*/ du -sh "${PREVIOUS_RELEASES_DIR}" From 4e8b64b181e1bb7d82789699eaac24dc1242afa3 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 045/115] ci: fix annoying docker warning Github-Pull: #32989 Rebased-From: 2aa288efdda Docker currently warns that we are missing a default value. Set this to scratch which will error if an appropriate image tag is not passed in to silence the warning. --- ci/test_imagefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ci/test_imagefile b/ci/test_imagefile index 224141b138f5..f9cf3187a252 100644 --- a/ci/test_imagefile +++ b/ci/test_imagefile @@ -4,7 +4,8 @@ # See ci/README.md for usage. -ARG CI_IMAGE_NAME_TAG +# We never want scratch, but default arg silences a Warning +ARG CI_IMAGE_NAME_TAG=scratch FROM ${CI_IMAGE_NAME_TAG} ARG FILE_ENV From 6ded1fe11752372c52169d2aec7f0658bf9b0455 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 8 Aug 2025 10:31:56 +0200 Subject: [PATCH 046/115] ci: remove un-needed lint_run*.sh files Github-Pull: #32989 Rebased-From: 3c5da69a232 ci/lint_run_all.sh: Only used in .cirrus.yml. Refer to test/lint/README.md on how to run locally. --- ci/lint_run_all.sh | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100755 ci/lint_run_all.sh diff --git a/ci/lint_run_all.sh b/ci/lint_run_all.sh deleted file mode 100755 index c57261d21a69..000000000000 --- a/ci/lint_run_all.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019-present The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -# Only used in .cirrus.yml. Refer to test/lint/README.md on how to run locally. - -cp "./ci/retry/retry" "/ci_retry" -cp "./.python-version" "/.python-version" -mkdir --parents "/test/lint" -cp --recursive "./test/lint/test_runner" "/test/lint/" -set -o errexit; source ./ci/lint/04_install.sh -set -o errexit -./ci/lint/06_script.sh From 4a034cbeb42763c6b7a82089973c4a30cb0cd1c4 Mon Sep 17 00:00:00 2001 From: will Date: Thu, 4 Sep 2025 19:53:45 +0100 Subject: [PATCH 047/115] ci: reduce runner sizes on various jobs Github-Pull: #33319 Rebased-From: 5eeb2facbbbbf68a2c30ef9e6747e39c85d7b116 These jobs can use reduced runner size to avoid wasting CPU, as much of the long-running part of the job is single-threaded. Suggested in: https://github.com/bitcoin/bitcoin/pull/32989#discussion_r2321775620 Co-authored-by: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> --- .github/workflows/ci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 60cb41b8f98e..2b774b6afde4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -310,7 +310,7 @@ jobs: file-env: './ci/test/00_setup_env_win64.sh' - name: 'ASan + LSan + UBSan + integer, no depends, USDT' - cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools fallback-runner: 'ubuntu-24.04' timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_asan.sh' @@ -322,7 +322,7 @@ jobs: file-env: './ci/test/00_setup_env_mac_cross.sh' - name: 'No wallet, libbitcoinkernel' - cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' fallback-runner: 'ubuntu-24.04' timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh' @@ -358,7 +358,7 @@ jobs: file-env: './ci/test/00_setup_env_native_tidy.sh' - name: 'TSan, depends, no gui' - cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' fallback-runner: 'ubuntu-24.04' timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_tsan.sh' @@ -405,7 +405,7 @@ jobs: lint: name: 'lint' needs: runners - runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' || 'ubuntu-24.04' }} + runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-xs' || 'ubuntu-24.04' }} if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }} timeout-minutes: 20 env: From 78d93effd03278b46e21ae8ef79f61f4ec32f855 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Thu, 4 Sep 2025 11:21:45 +0200 Subject: [PATCH 048/115] ci: Checkout latest merged pulls Github-Pull: #33303 Rebased-From: fa8f081af31 --- .github/workflows/ci.yml | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2b774b6afde4..ae614d5bb299 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -123,8 +123,12 @@ jobs: BASE_ROOT_DIR: ${{ github.workspace }} steps: - - name: Checkout - uses: actions/checkout@v4 + - &CHECKOUT + name: Checkout + uses: actions/checkout@v5 + with: + # Ensure the latest merged pull request state is used, even on re-runs. + ref: &CHECKOUT_REF_TMPL ${{ github.event_name == 'pull_request' && github.ref || '' }} - name: Clang version run: | @@ -192,8 +196,7 @@ jobs: job-name: 'Win64 native fuzz, VS 2022' steps: - - name: Checkout - uses: actions/checkout@v4 + - *CHECKOUT - name: Configure Developer Command Prompt for Microsoft Visual C++ # Using microsoft/setup-msbuild is not enough. @@ -370,8 +373,7 @@ jobs: file-env: './ci/test/00_setup_env_native_msan.sh' steps: - - name: Checkout - uses: actions/checkout@v5 + - *CHECKOUT - name: Configure environment uses: ./.github/actions/configure-environment @@ -414,6 +416,7 @@ jobs: - name: Checkout uses: actions/checkout@v5 with: + ref: *CHECKOUT_REF_TMPL fetch-depth: 0 - name: Configure Docker From 5750355139eb7fc2bd11124adf46bf053be6b690 Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 1 Aug 2025 09:48:30 +0100 Subject: [PATCH 049/115] ci: link against -lstdc++ in native fuzz with msan job Github-Pull: #33425 Rebased-From: b77137a5644e09a08442aed7d8a4a9290fb53526 --- ci/test/00_setup_env_native_fuzz_with_msan.sh | 6 ++++-- ci/test/01_base_install.sh | 17 ----------------- 2 files changed, 4 insertions(+), 19 deletions(-) diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh index 27b704017c42..655fe609c0d0 100755 --- a/ci/test/00_setup_env_native_fuzz_with_msan.sh +++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh @@ -7,14 +7,16 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" +export APT_LLVM_V="21" LIBCXX_DIR="/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" -LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" +# -lstdc++ to resolve link issues due to upstream packaging +LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument -lstdc++" export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" export CONTAINER_NAME="ci_native_fuzz_msan" -export PACKAGES="ninja-build" # BDB generates false-positives and will be removed in future +export PACKAGES="ninja-build clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev" export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" export GOAL="all" # Setting CMAKE_{C,CXX}_FLAGS_DEBUG flags to an empty string ensures that the flags set in MSAN_FLAGS remain unaltered. diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index 1b624f389424..65f68351c899 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -57,23 +57,6 @@ fi if [[ -n "${USE_INSTRUMENTED_LIBCPP}" ]]; then ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-21.1.1" /llvm-project - if [ -n "${APT_LLVM_V}" ]; then - - cmake -G Ninja -B /clang_build/ \ - -DLLVM_ENABLE_PROJECTS="clang" \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_TARGETS_TO_BUILD=Native \ - -DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \ - -S /llvm-project/llvm - - ninja -C /clang_build/ "$MAKEJOBS" - ninja -C /clang_build/ install-runtimes - - update-alternatives --install /usr/bin/clang++ clang++ /clang_build/bin/clang++ 100 - update-alternatives --install /usr/bin/clang clang /clang_build/bin/clang 100 - update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /clang_build/bin/llvm-symbolizer 100 - fi - cmake -G Ninja -B /cxx_build/ \ -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \ -DCMAKE_BUILD_TYPE=Release \ From 118abf4c305c01e6359a8588327a0b011ca52944 Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Sat, 20 Sep 2025 21:32:41 +0200 Subject: [PATCH 050/115] test: add block 2016 to mock mainnet The next commit requires an additional mainnet block which changes the difficulty. Also fix a few minor mistakes in the test (suite): - rename the create_coinbase retarger_period argument to halving_period. Before bitcoin#31583 this was hardcoded for regtest where these values are the same. - drop unused fees argument from mine helper Finally the CPU miner instructions for generating the alternative mainnet chain are expanded. Github-Pull: #33446 Rebased-From: 4c3c1f42cf705e039751395799240da33ca969bd --- test/functional/data/README.md | 14 +++++++++++--- test/functional/data/mainnet_alt.json | 6 ++++-- test/functional/mining_mainnet.py | 19 +++++++++++++------ test/functional/test_framework/blocktools.py | 4 ++-- 4 files changed, 30 insertions(+), 13 deletions(-) diff --git a/test/functional/data/README.md b/test/functional/data/README.md index bb03422f95ff..956394e385cc 100644 --- a/test/functional/data/README.md +++ b/test/functional/data/README.md @@ -11,9 +11,10 @@ The alternate mainnet chain was generated as follows: - restart node with a faketime 2 minutes later ```sh -for i in {1..2015} +for i in {1..2016} do - faketime "`date -d @"$(( 1231006505 + $i * 120 ))" +'%Y-%m-%d %H:%M:%S'`" \ + t=$(( 1231006505 + $i * 120 )) + faketime "`date -d @$t +'%Y-%m-%d %H:%M:%S'`" \ bitcoind -connect=0 -nocheckpoints -stopatheight=$i done ``` @@ -21,7 +22,9 @@ done The CPU miner is kept running as follows: ```sh -./minerd --coinbase-addr 1NQpH6Nf8QtR2HphLRcvuVqfhXBXsiWn8r --no-stratum --algo sha256d --no-longpoll --scantime 3 --retry-pause 1 +./minerd -u ... -p ... -o http://127.0.0.1:8332 --no-stratum \ + --coinbase-addr 1NQpH6Nf8QtR2HphLRcvuVqfhXBXsiWn8r \ + --algo sha256d --no-longpoll --scantime 3 --retry-pause 1 ``` The payout address is derived from first BIP32 test vector master key: @@ -40,3 +43,8 @@ The timestamp was not kept constant because at difficulty 1 it's not sufficient to only grind the nonce. Grinding the extra_nonce or version field instead would have required additional (stratum) software. It would also make it more complicated to reconstruct the blocks in this test. + +The `getblocktemplate` RPC code needs to be patched to ignore not being connected +to any peers, and to ignore the IBD status check. + +On macOS use `faketime "@$t"` instead. diff --git a/test/functional/data/mainnet_alt.json b/test/functional/data/mainnet_alt.json index a4a072d2c5b0..96821a36f41c 100644 --- a/test/functional/data/mainnet_alt.json +++ b/test/functional/data/mainnet_alt.json @@ -2014,7 +2014,8 @@ 1231247971, 1231248071, 1231248198, - 1231248322 + 1231248322, + 1231248621 ], "nonces": [ 2345621585, @@ -4031,6 +4032,7 @@ 3658502865, 2519048297, 1915965760, - 1183846025 + 1183846025, + 2713372123 ] } diff --git a/test/functional/mining_mainnet.py b/test/functional/mining_mainnet.py index c2757b61574f..c58c4784b4d1 100755 --- a/test/functional/mining_mainnet.py +++ b/test/functional/mining_mainnet.py @@ -54,15 +54,15 @@ def add_options(self, parser): self.add_wallet_options(parser) - def mine(self, height, prev_hash, blocks, node, fees=0): + def mine(self, height, prev_hash, blocks, node): self.log.debug(f"height={height}") block = CBlock() block.nVersion = 0x20000000 block.hashPrevBlock = int(prev_hash, 16) block.nTime = blocks['timestamps'][height - 1] - block.nBits = DIFF_1_N_BITS + block.nBits = DIFF_1_N_BITS if height < 2016 else DIFF_4_N_BITS block.nNonce = blocks['nonces'][height - 1] - block.vtx = [create_coinbase(height=height, script_pubkey=bytes.fromhex(COINBASE_SCRIPT_PUBKEY), retarget_period=2016)] + block.vtx = [create_coinbase(height=height, script_pubkey=bytes.fromhex(COINBASE_SCRIPT_PUBKEY), halving_period=210000)] block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block_hex = block.serialize(with_witness=False).hex() @@ -81,12 +81,15 @@ def run_test(self): self.log.info("Load alternative mainnet blocks") path = os.path.join(os.path.dirname(os.path.realpath(__file__)), self.options.datafile) prev_hash = node.getbestblockhash() + blocks = None with open(path, encoding='utf-8') as f: blocks = json.load(f) n_blocks = len(blocks['timestamps']) - assert_equal(n_blocks, 2015) - for i in range(2015): - prev_hash = self.mine(i + 1, prev_hash, blocks, node) + assert_equal(n_blocks, 2016) + + # Mine up to the last block of the first retarget period + for i in range(2015): + prev_hash = self.mine(i + 1, prev_hash, blocks, node) assert_equal(node.getblockcount(), 2015) @@ -101,5 +104,9 @@ def run_test(self): assert_equal(mining_info['next']['bits'], nbits_str(DIFF_4_N_BITS)) assert_equal(mining_info['next']['target'], target_str(DIFF_4_TARGET)) + # Mine first block of the second retarget period + height = 2016 + prev_hash = self.mine(height, prev_hash, blocks, node) + assert_equal(node.getblockcount(), height) if __name__ == '__main__': MiningMainnetTest(__file__).main() diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index 38600bc005a1..49e2518887f7 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -143,7 +143,7 @@ def script_BIP34_coinbase_height(height): return CScript([CScriptNum(height)]) -def create_coinbase(height, pubkey=None, *, script_pubkey=None, extra_output_script=None, fees=0, nValue=50, retarget_period=REGTEST_RETARGET_PERIOD): +def create_coinbase(height, pubkey=None, *, script_pubkey=None, extra_output_script=None, fees=0, nValue=50, halving_period=REGTEST_RETARGET_PERIOD): """Create a coinbase transaction. If pubkey is passed in, the coinbase output will be a P2PK output; @@ -156,7 +156,7 @@ def create_coinbase(height, pubkey=None, *, script_pubkey=None, extra_output_scr coinbaseoutput = CTxOut() coinbaseoutput.nValue = nValue * COIN if nValue == 50: - halvings = int(height / retarget_period) + halvings = int(height / halving_period) coinbaseoutput.nValue >>= halvings coinbaseoutput.nValue += fees if pubkey is not None: From 22ab141243eeb4a929e589ef70a6f54a5aaed3ba Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Sat, 20 Sep 2025 21:33:13 +0200 Subject: [PATCH 051/115] rpc: fix getblock(header) returns target for tip A target field was added to the getblock and getblockheader RPC calls in bitcoin#31583, but it mistakingly always used the tip value. Because regtest does not have difficulty adjustment, a test is added for mainnet instead. Github-Pull: #33446 Rebased-From: bf7996cbc3becf329d8b1cd2f1007fec9b3a3188 --- src/rpc/blockchain.cpp | 2 +- test/functional/mining_mainnet.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 8cbca51ccbf4..edda17d3697a 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -164,7 +164,7 @@ UniValue blockheaderToJSON(const CBlockIndex& tip, const CBlockIndex& blockindex result.pushKV("mediantime", blockindex.GetMedianTimePast()); result.pushKV("nonce", blockindex.nNonce); result.pushKV("bits", strprintf("%08x", blockindex.nBits)); - result.pushKV("target", GetTarget(tip, pow_limit).GetHex()); + result.pushKV("target", GetTarget(blockindex, pow_limit).GetHex()); result.pushKV("difficulty", GetDifficulty(blockindex)); result.pushKV("chainwork", blockindex.nChainWork.GetHex()); result.pushKV("nTx", blockindex.nTx); diff --git a/test/functional/mining_mainnet.py b/test/functional/mining_mainnet.py index c58c4784b4d1..456381af55ab 100755 --- a/test/functional/mining_mainnet.py +++ b/test/functional/mining_mainnet.py @@ -108,5 +108,17 @@ def run_test(self): height = 2016 prev_hash = self.mine(height, prev_hash, blocks, node) assert_equal(node.getblockcount(), height) + + mining_info = node.getmininginfo() + assert_equal(mining_info['difficulty'], 4) + + self.log.info("getblock RPC should show historical target") + block_info = node.getblock(node.getblockhash(1)) + + assert_equal(block_info['difficulty'], 1) + assert_equal(block_info['bits'], nbits_str(DIFF_1_N_BITS)) + assert_equal(block_info['target'], target_str(DIFF_1_TARGET)) + + if __name__ == '__main__': MiningMainnetTest(__file__).main() From 9d9baafc6f9357179e57fdcc6cf2ce36d65dd16d Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Fri, 26 Sep 2025 19:25:26 +0200 Subject: [PATCH 052/115] doc: rpc: fix case typo in `finalizepsbt` help (final_scriptwitness) Github-Pull: #33484 Rebased-From: ff05bebcc4262966b117082a67dc4c63a3f67d2d --- src/rpc/rawtransaction.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 421656152cba..77e8fd49e11b 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -1494,7 +1494,7 @@ static RPCHelpMan finalizepsbt() return RPCHelpMan{"finalizepsbt", "Finalize the inputs of a PSBT. If the transaction is fully signed, it will produce a\n" "network serialized transaction which can be broadcast with sendrawtransaction. Otherwise a PSBT will be\n" - "created which has the final_scriptSig and final_scriptWitness fields filled for inputs that are complete.\n" + "created which has the final_scriptSig and final_scriptwitness fields filled for inputs that are complete.\n" "Implements the Finalizer and Extractor roles.\n", { {"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "A base64 string of a PSBT"}, From 6f23ead4a2d97e245f4fc1824b1dd956dc06cc42 Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Mon, 29 Sep 2025 15:47:07 -0400 Subject: [PATCH 053/115] fuzz: don't bypass_limits for most mempool harnesses Using bypass_limits=true is essentially fuzzing part of a reorg only, and results in TRUC invariants unable to be checked. Remove most instances of bypassing limits, leaving one harness able to do so. Github-Pull: #33504 Rebased-From: bbe8e9063c15dc230553e0cbf16d603f5ad0e4cf --- src/test/fuzz/package_eval.cpp | 2 +- src/test/fuzz/tx_pool.cpp | 14 ++++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/test/fuzz/package_eval.cpp b/src/test/fuzz/package_eval.cpp index 8e3d84a9e635..37b18a59414b 100644 --- a/src/test/fuzz/package_eval.cpp +++ b/src/test/fuzz/package_eval.cpp @@ -324,7 +324,7 @@ FUZZ_TARGET(ephemeral_package_eval, .init = initialize_tx_pool) return ProcessNewPackage(chainstate, tx_pool, txs, /*test_accept=*/single_submit, /*client_maxfeerate=*/{})); const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, txs.back(), GetTime(), - /*bypass_limits=*/fuzzed_data_provider.ConsumeBool(), /*test_accept=*/!single_submit)); + /*bypass_limits=*/false, /*test_accept=*/!single_submit)); if (!single_submit && result_package.m_state.GetResult() != PackageValidationResult::PCKG_POLICY) { // We don't know anything about the validity since transactions were randomly generated, so diff --git a/src/test/fuzz/tx_pool.cpp b/src/test/fuzz/tx_pool.cpp index a697ee9d8384..98feadf516e2 100644 --- a/src/test/fuzz/tx_pool.cpp +++ b/src/test/fuzz/tx_pool.cpp @@ -295,7 +295,6 @@ FUZZ_TARGET(tx_pool_standard, .init = initialize_tx_pool) std::set added; auto txr = std::make_shared(removed, added); node.validation_signals->RegisterSharedValidationInterface(txr); - const bool bypass_limits = fuzzed_data_provider.ConsumeBool(); // Make sure ProcessNewPackage on one transaction works. // The result is not guaranteed to be the same as what is returned by ATMP. @@ -310,7 +309,7 @@ FUZZ_TARGET(tx_pool_standard, .init = initialize_tx_pool) it->second.m_result_type == MempoolAcceptResult::ResultType::INVALID); } - const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), bypass_limits, /*test_accept=*/false)); + const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), /*bypass_limits=*/false, /*test_accept=*/false)); const bool accepted = res.m_result_type == MempoolAcceptResult::ResultType::VALID; node.validation_signals->SyncWithValidationInterfaceQueue(); node.validation_signals->UnregisterSharedValidationInterface(txr); @@ -393,6 +392,9 @@ FUZZ_TARGET(tx_pool, .init = initialize_tx_pool) chainstate.SetMempool(&tx_pool); + // If we ever bypass limits, do not do TRUC invariants checks + bool ever_bypassed_limits{false}; + LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 300) { const auto mut_tx = ConsumeTransaction(fuzzed_data_provider, txids); @@ -411,13 +413,17 @@ FUZZ_TARGET(tx_pool, .init = initialize_tx_pool) tx_pool.PrioritiseTransaction(txid.ToUint256(), delta); } + const bool bypass_limits{fuzzed_data_provider.ConsumeBool()}; + ever_bypassed_limits |= bypass_limits; + const auto tx = MakeTransactionRef(mut_tx); - const bool bypass_limits = fuzzed_data_provider.ConsumeBool(); const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), bypass_limits, /*test_accept=*/false)); const bool accepted = res.m_result_type == MempoolAcceptResult::ResultType::VALID; if (accepted) { txids.push_back(tx->GetHash()); - CheckMempoolTRUCInvariants(tx_pool); + if (!ever_bypassed_limits) { + CheckMempoolTRUCInvariants(tx_pool); + } } } Finish(fuzzed_data_provider, tx_pool, chainstate); From 666aec7d49506c587ecbbcd71f6e8f1e7bb4e4cd Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Fri, 26 Sep 2025 14:47:47 -0400 Subject: [PATCH 054/115] Mempool: Do not enforce TRUC checks on reorg Not enforcing TRUC topology on reorg was the intended behavior, but the appropriate bypass argument was not checked. This mistake means we could potentially invalidate a long chain of perfectly incentive-compatible transactions that were made historically, including subsequent non-TRUC transactions, all of which may have been very high feerate. Lastly, it wastes CPU cycles doing topology checks since this behavior cannot actually enforce the topology in general for the reorg setting. Github-Pull: #33504 Rebased-From: 26e71c237d9d2197824b547f55ee3a0a60149f92 --- src/validation.cpp | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index fde064458dc6..85504d1e2907 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1025,26 +1025,28 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // Even though just checking direct mempool parents for inheritance would be sufficient, we // check using the full ancestor set here because it's more convenient to use what we have // already calculated. - if (const auto err{SingleTRUCChecks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) { - // Single transaction contexts only. - if (args.m_allow_sibling_eviction && err->second != nullptr) { - // We should only be considering where replacement is considered valid as well. - Assume(args.m_allow_replacement); - - // Potential sibling eviction. Add the sibling to our list of mempool conflicts to be - // included in RBF checks. - ws.m_conflicts.insert(err->second->GetHash()); - // Adding the sibling to m_iters_conflicting here means that it doesn't count towards - // RBF Carve Out above. This is correct, since removing to-be-replaced transactions from - // the descendant count is done separately in SingleTRUCChecks for TRUC transactions. - ws.m_iters_conflicting.insert(m_pool.GetIter(err->second->GetHash()).value()); - ws.m_sibling_eviction = true; - // The sibling will be treated as part of the to-be-replaced set in ReplacementChecks. - // Note that we are not checking whether it opts in to replaceability via BIP125 or TRUC - // (which is normally done in PreChecks). However, the only way a TRUC transaction can - // have a non-TRUC and non-BIP125 descendant is due to a reorg. - } else { - return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "TRUC-violation", err->first); + if (!args.m_bypass_limits) { + if (const auto err{SingleTRUCChecks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) { + // Single transaction contexts only. + if (args.m_allow_sibling_eviction && err->second != nullptr) { + // We should only be considering where replacement is considered valid as well. + Assume(args.m_allow_replacement); + + // Potential sibling eviction. Add the sibling to our list of mempool conflicts to be + // included in RBF checks. + ws.m_conflicts.insert(err->second->GetHash()); + // Adding the sibling to m_iters_conflicting here means that it doesn't count towards + // RBF Carve Out above. This is correct, since removing to-be-replaced transactions from + // the descendant count is done separately in SingleTRUCChecks for TRUC transactions. + ws.m_iters_conflicting.insert(m_pool.GetIter(err->second->GetHash()).value()); + ws.m_sibling_eviction = true; + // The sibling will be treated as part of the to-be-replaced set in ReplacementChecks. + // Note that we are not checking whether it opts in to replaceability via BIP125 or TRUC + // (which is normally done in PreChecks). However, the only way a TRUC transaction can + // have a non-TRUC and non-BIP125 descendant is due to a reorg. + } else { + return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "TRUC-violation", err->first); + } } } From a8bb76b61f49e1abd681f21a754f970eef206ced Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Fri, 26 Sep 2025 14:49:06 -0400 Subject: [PATCH 055/115] test: add more TRUC reorg coverge Github-Pull: #33504 Rebased-From: 06df14ba75be5f48cf9c417424900ace17d1cf4d --- test/functional/mempool_truc.py | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/test/functional/mempool_truc.py b/test/functional/mempool_truc.py index 8850ba800289..d095033a847b 100755 --- a/test/functional/mempool_truc.py +++ b/test/functional/mempool_truc.py @@ -164,23 +164,36 @@ def test_truc_replacement(self): def test_truc_reorg(self): node = self.nodes[0] self.log.info("Test that, during a reorg, TRUC rules are not enforced") - tx_v2_block = self.wallet.send_self_transfer(from_node=node, version=2) - tx_v3_block = self.wallet.send_self_transfer(from_node=node, version=3) - tx_v3_block2 = self.wallet.send_self_transfer(from_node=node, version=3) - self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"]]) + self.check_mempool([]) + + # Testing 2<-3 versions allowed + tx_v2_block = self.wallet.create_self_transfer(version=2) + + # Testing 3<-2 versions allowed + tx_v3_block = self.wallet.create_self_transfer(version=3) + + # Testing overly-large child size + tx_v3_block2 = self.wallet.create_self_transfer(version=3) + + # Also create a linear chain of 3 TRUC transactions that will be directly mined, followed by one v2 in-mempool after block is made + tx_chain_1 = self.wallet.create_self_transfer(version=3) + tx_chain_2 = self.wallet.create_self_transfer(utxo_to_spend=tx_chain_1["new_utxo"], version=3) + tx_chain_3 = self.wallet.create_self_transfer(utxo_to_spend=tx_chain_2["new_utxo"], version=3) + + tx_to_mine = [tx_v3_block["hex"], tx_v2_block["hex"], tx_v3_block2["hex"], tx_chain_1["hex"], tx_chain_2["hex"], tx_chain_3["hex"]] + block = self.generateblock(node, output="raw(42)", transactions=tx_to_mine) - block = self.generate(node, 1) self.check_mempool([]) tx_v2_from_v3 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block["new_utxo"], version=2) tx_v3_from_v2 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v2_block["new_utxo"], version=3) tx_v3_child_large = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block2["new_utxo"], target_vsize=1250, version=3) assert_greater_than(node.getmempoolentry(tx_v3_child_large["txid"])["vsize"], TRUC_CHILD_MAX_VSIZE) - self.check_mempool([tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]]) - node.invalidateblock(block[0]) - self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"], tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]]) - # This is needed because generate() will create the exact same block again. - node.reconsiderblock(block[0]) + tx_chain_4 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_chain_3["new_utxo"], version=2) + self.check_mempool([tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"], tx_chain_4["txid"]]) + # Reorg should have all block transactions re-accepted, ignoring TRUC enforcement + node.invalidateblock(block["hash"]) + self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"], tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"], tx_chain_1["txid"], tx_chain_2["txid"], tx_chain_3["txid"], tx_chain_4["txid"]]) @cleanup(extra_args=["-limitdescendantsize=10", "-datacarriersize=40000"]) def test_nondefault_package_limits(self): From 2d7ebd2d913ea63c1a23fefa0a09ee06fb069161 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 24 Sep 2025 10:35:55 -0400 Subject: [PATCH 056/115] doc: update release notes for 29.x --- doc/release-notes.md | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index 0325d3a3e288..8a79e99ad271 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.2rc1 is now available from: +Bitcoin Core version 29.2rc2 is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. @@ -43,6 +43,14 @@ Notable changes - #33296 net: check for empty header before calling FillBlock - #33395 net: do not apply whitelist permissions to onion inbounds +### Mempool + +- #33504 mempool: Do not enforce TRUC checks on reorg + +### RPC + +- #33446 rpc: fix getblock(header) returns target for tip + ### CI - #32999 ci: Use APT_LLVM_V in msan task @@ -50,6 +58,10 @@ Notable changes - #33258 ci: use LLVM 21 - #33364 ci: always use tag for LLVM checkout +### Doc + +- #33484 doc: rpc: fix case typo in `finalizepsbt` help + ### Misc - #33310 trace: Workaround GCC bug compiling with old systemtap @@ -67,6 +79,8 @@ Thanks to everyone who directly contributed to this release: - Luke Dashjr - MarcoFalke - Martin Zumsande +- Sebastian Falbesoner +- Sjors Provoost - Vasil Dimov As well as to everyone that helped with translations on From 6b3c1dbc5c0df4357ee7f57ac238bcdff55526af Mon Sep 17 00:00:00 2001 From: amisha Date: Wed, 10 Sep 2025 21:04:57 +0530 Subject: [PATCH 057/115] contrib: fix using macdploy script without translations. QT translations are optional, but the script would error when 'translations_dir' falls back to its default value NULL. This PR fixes it by moving the set-up of QT translations under the check for 'translations_dir' presence. Github-Pull: #33482 Rebased-From: 7b5261f7ef3d88361204c40eb10c0d9dc44f5ed7 --- contrib/macdeploy/macdeployqtplus | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus index eaa7b896be9f..512053762699 100755 --- a/contrib/macdeploy/macdeployqtplus +++ b/contrib/macdeploy/macdeployqtplus @@ -465,18 +465,18 @@ if config.translations_dir: sys.stderr.write(f"Error: Could not find translation dir \"{config.translations_dir[0]}\"\n") sys.exit(1) -print("+ Adding Qt translations +") + print("+ Adding Qt translations +") -translations = Path(config.translations_dir[0]) + translations = Path(config.translations_dir[0]) -regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)') + regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)') -lang_files = [x for x in translations.iterdir() if regex.match(x.name)] + lang_files = [x for x in translations.iterdir() if regex.match(x.name)] -for file in lang_files: - if verbose: - print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name)) - shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name)) + for file in lang_files: + if verbose: + print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name)) + shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name)) # ------------------------------------------------ From eea16f7de7c4382e4491f3b018ecd0c36678affb Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 3 Oct 2025 14:27:43 +0100 Subject: [PATCH 058/115] build: bump version to v29.2rc2 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 05a86a1d97ff..681926a1e4b2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,7 +30,7 @@ set(CLIENT_NAME "Bitcoin Core") set(CLIENT_VERSION_MAJOR 29) set(CLIENT_VERSION_MINOR 2) set(CLIENT_VERSION_BUILD 0) -set(CLIENT_VERSION_RC 1) +set(CLIENT_VERSION_RC 2) set(CLIENT_VERSION_IS_RELEASE "true") set(COPYRIGHT_YEAR "2025") From 513cef75ee06bc5d310a22d366a5f3c815aa1499 Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 3 Oct 2025 14:39:17 +0100 Subject: [PATCH 059/115] doc: update manual pages for v29.2rc2 --- doc/man/bitcoin-cli.1 | 6 +++--- doc/man/bitcoin-qt.1 | 6 +++--- doc/man/bitcoin-tx.1 | 6 +++--- doc/man/bitcoin-util.1 | 6 +++--- doc/man/bitcoin-wallet.1 | 6 +++--- doc/man/bitcoind.1 | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/doc/man/bitcoin-cli.1 b/doc/man/bitcoin-cli.1 index a8dc092a6ccc..707ccfc322ba 100644 --- a/doc/man/bitcoin-cli.1 +++ b/doc/man/bitcoin-cli.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-CLI "1" "September 2025" "bitcoin-cli v29.2.0rc1" "User Commands" +.TH BITCOIN-CLI "1" "October 2025" "bitcoin-cli v29.2.0rc2" "User Commands" .SH NAME -bitcoin-cli \- manual page for bitcoin-cli v29.2.0rc1 +bitcoin-cli \- manual page for bitcoin-cli v29.2.0rc2 .SH SYNOPSIS .B bitcoin-cli [\fI\,options\/\fR] \fI\, \/\fR[\fI\,params\/\fR] @@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.2.0rc1 .B bitcoin-cli [\fI\,options\/\fR] \fI\,help \/\fR .SH DESCRIPTION -Bitcoin Core RPC client version v29.2.0rc1 +Bitcoin Core RPC client version v29.2.0rc2 .PP The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Core RPC server. .PP diff --git a/doc/man/bitcoin-qt.1 b/doc/man/bitcoin-qt.1 index 7821b8fb440f..a02b52ea2e70 100644 --- a/doc/man/bitcoin-qt.1 +++ b/doc/man/bitcoin-qt.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-QT "1" "September 2025" "bitcoin-qt v29.2.0rc1" "User Commands" +.TH BITCOIN-QT "1" "October 2025" "bitcoin-qt v29.2.0rc2" "User Commands" .SH NAME -bitcoin-qt \- manual page for bitcoin-qt v29.2.0rc1 +bitcoin-qt \- manual page for bitcoin-qt v29.2.0rc2 .SH SYNOPSIS .B bitcoin-qt [\fI\,options\/\fR] [\fI\,URI\/\fR] .SH DESCRIPTION -Bitcoin Core version v29.2.0rc1 +Bitcoin Core version v29.2.0rc2 .PP The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Core. .PP diff --git a/doc/man/bitcoin-tx.1 b/doc/man/bitcoin-tx.1 index a14a6be60243..a7df27545c2e 100644 --- a/doc/man/bitcoin-tx.1 +++ b/doc/man/bitcoin-tx.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-TX "1" "September 2025" "bitcoin-tx v29.2.0rc1" "User Commands" +.TH BITCOIN-TX "1" "October 2025" "bitcoin-tx v29.2.0rc2" "User Commands" .SH NAME -bitcoin-tx \- manual page for bitcoin-tx v29.2.0rc1 +bitcoin-tx \- manual page for bitcoin-tx v29.2.0rc2 .SH SYNOPSIS .B bitcoin-tx [\fI\,options\/\fR] \fI\, \/\fR[\fI\,commands\/\fR] @@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.2.0rc1 .B bitcoin-tx [\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR] .SH DESCRIPTION -Bitcoin Core bitcoin\-tx utility version v29.2.0rc1 +Bitcoin Core bitcoin\-tx utility version v29.2.0rc2 .PP The bitcoin\-tx tool is used for creating and modifying bitcoin transactions. .PP diff --git a/doc/man/bitcoin-util.1 b/doc/man/bitcoin-util.1 index e0cc27e2d772..d127ddf4cb38 100644 --- a/doc/man/bitcoin-util.1 +++ b/doc/man/bitcoin-util.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-UTIL "1" "September 2025" "bitcoin-util v29.2.0rc1" "User Commands" +.TH BITCOIN-UTIL "1" "October 2025" "bitcoin-util v29.2.0rc2" "User Commands" .SH NAME -bitcoin-util \- manual page for bitcoin-util v29.2.0rc1 +bitcoin-util \- manual page for bitcoin-util v29.2.0rc2 .SH SYNOPSIS .B bitcoin-util [\fI\,options\/\fR] [\fI\,command\/\fR] @@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.2.0rc1 .B bitcoin-util [\fI\,options\/\fR] \fI\,grind \/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-util utility version v29.2.0rc1 +Bitcoin Core bitcoin\-util utility version v29.2.0rc2 .PP The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below. .SH OPTIONS diff --git a/doc/man/bitcoin-wallet.1 b/doc/man/bitcoin-wallet.1 index 58bbf2715b3b..b471ac1a246e 100644 --- a/doc/man/bitcoin-wallet.1 +++ b/doc/man/bitcoin-wallet.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-WALLET "1" "September 2025" "bitcoin-wallet v29.2.0rc1" "User Commands" +.TH BITCOIN-WALLET "1" "October 2025" "bitcoin-wallet v29.2.0rc2" "User Commands" .SH NAME -bitcoin-wallet \- manual page for bitcoin-wallet v29.2.0rc1 +bitcoin-wallet \- manual page for bitcoin-wallet v29.2.0rc2 .SH SYNOPSIS .B bitcoin-wallet [\fI\,options\/\fR] \fI\,\/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-wallet utility version v29.2.0rc1 +Bitcoin Core bitcoin\-wallet utility version v29.2.0rc2 .PP bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files. .PP diff --git a/doc/man/bitcoind.1 b/doc/man/bitcoind.1 index 0846f3e0619e..3e7e394b1af7 100644 --- a/doc/man/bitcoind.1 +++ b/doc/man/bitcoind.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIND "1" "September 2025" "bitcoind v29.2.0rc1" "User Commands" +.TH BITCOIND "1" "October 2025" "bitcoind v29.2.0rc2" "User Commands" .SH NAME -bitcoind \- manual page for bitcoind v29.2.0rc1 +bitcoind \- manual page for bitcoind v29.2.0rc2 .SH SYNOPSIS .B bitcoind [\fI\,options\/\fR] .SH DESCRIPTION -Bitcoin Core daemon version v29.2.0rc1 +Bitcoin Core daemon version v29.2.0rc2 .PP The Bitcoin Core daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses. .PP From d82fc69829cd8cabbaf2c3a969597b40c32edc86 Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 3 Oct 2025 18:04:23 +0100 Subject: [PATCH 060/115] doc: update release notes for 29.2rc2 --- doc/release-notes.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/release-notes.md b/doc/release-notes.md index 8a79e99ad271..30cb763a0c76 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -53,6 +53,7 @@ Notable changes ### CI +- #32989 ci: Migrate CI to hosted Cirrus Runners - #32999 ci: Use APT_LLVM_V in msan task - #33099 ci: allow for any libc++ intrumentation & use it for TSAN - #33258 ci: use LLVM 21 @@ -66,12 +67,14 @@ Notable changes - #33310 trace: Workaround GCC bug compiling with old systemtap - #33340 Fix benchmark CSV output +- #33482 contrib: fix macOS deployment with no translations Credits ======= Thanks to everyone who directly contributed to this release: +- Amisha Chhajed - Eugene Siegel - fanquake - Greg Sanders @@ -82,6 +85,7 @@ Thanks to everyone who directly contributed to this release: - Sebastian Falbesoner - Sjors Provoost - Vasil Dimov +- Will Clark As well as to everyone that helped with translations on [Transifex](https://explore.transifex.com/bitcoin/bitcoin/). From abf4a6eeaee116917dafd56eb9caee03e13048d2 Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 7 Oct 2025 13:31:04 +0100 Subject: [PATCH 061/115] build: fix depends Qt download link --- depends/packages/qt.mk | 2 +- doc/dependencies.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index d41ac4e784eb..abd8a6fa8d26 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -1,6 +1,6 @@ package=qt $(package)_version=5.15.16 -$(package)_download_path=https://download.qt.io/official_releases/qt/5.15/$($(package)_version)/submodules +$(package)_download_path=https://download.qt.io/archive/qt/5.15/$($(package)_version)/submodules $(package)_suffix=everywhere-opensource-src-$($(package)_version).tar.xz $(package)_file_name=qtbase-$($(package)_suffix) $(package)_sha256_hash=b04815058c18058b6ba837206756a2c87d1391f07a0dcb0dd314f970fd041592 diff --git a/doc/dependencies.md b/doc/dependencies.md index 7c866a433db7..d3f6b74367ba 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -30,7 +30,7 @@ Bitcoin Core requires one of the following compilers. | [Fontconfig](../depends/packages/fontconfig.mk) (gui) | [link](https://www.freedesktop.org/wiki/Software/fontconfig/) | [2.12.6](https://github.com/bitcoin/bitcoin/pull/23495) | 2.6 | Yes | | [FreeType](../depends/packages/freetype.mk) (gui) | [link](https://freetype.org) | [2.11.0](https://github.com/bitcoin/bitcoin/commit/01544dd78ccc0b0474571da854e27adef97137fb) | 2.3.0 | Yes | | [qrencode](../depends/packages/qrencode.mk) (gui) | [link](https://fukuchi.org/works/qrencode/) | [4.1.1](https://github.com/bitcoin/bitcoin/pull/27312) | N/A | No | -| [Qt](../depends/packages/qt.mk) (gui) | [link](https://download.qt.io/official_releases/qt/) | [5.15.16](https://github.com/bitcoin/bitcoin/pull/30774) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | +| [Qt](../depends/packages/qt.mk) (gui) | [link](https://download.qt.io/archive/qt/) | [5.15.16](https://github.com/bitcoin/bitcoin/pull/30774) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | | [ZeroMQ](../depends/packages/zeromq.mk) (notifications) | [link](https://github.com/zeromq/libzmq/releases) | [4.3.4](https://github.com/bitcoin/bitcoin/pull/23956) | 4.0.0 | No | | [Berkeley DB](../depends/packages/bdb.mk) (legacy wallet) | [link](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.30 | 4.8.x | No | | [SQLite](../depends/packages/sqlite.mk) (wallet) | [link](https://sqlite.org) | [3.38.5](https://github.com/bitcoin/bitcoin/pull/25378) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | No | From 3226616493289b111997bb107e569fef54386743 Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 6 Oct 2025 16:35:18 +0100 Subject: [PATCH 062/115] doc: update release notes for 29.2 --- doc/release-notes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index 30cb763a0c76..b981b8a7f546 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.2rc2 is now available from: +Bitcoin Core version 29.2 is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. From b2026fa290f0aef9a0dcfe45750121f113e2ce7d Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 6 Oct 2025 16:35:38 +0100 Subject: [PATCH 063/115] build: bump version to v29.2 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 681926a1e4b2..70f672132b41 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,7 +30,7 @@ set(CLIENT_NAME "Bitcoin Core") set(CLIENT_VERSION_MAJOR 29) set(CLIENT_VERSION_MINOR 2) set(CLIENT_VERSION_BUILD 0) -set(CLIENT_VERSION_RC 2) +set(CLIENT_VERSION_RC 0) set(CLIENT_VERSION_IS_RELEASE "true") set(COPYRIGHT_YEAR "2025") From 46d9b9091baa096da30da5e14329a32f1264229a Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 6 Oct 2025 16:39:57 +0100 Subject: [PATCH 064/115] doc: update manual pages for v29.2 --- doc/man/bitcoin-cli.1 | 6 +++--- doc/man/bitcoin-qt.1 | 6 +++--- doc/man/bitcoin-tx.1 | 6 +++--- doc/man/bitcoin-util.1 | 6 +++--- doc/man/bitcoin-wallet.1 | 6 +++--- doc/man/bitcoind.1 | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/doc/man/bitcoin-cli.1 b/doc/man/bitcoin-cli.1 index 707ccfc322ba..ce6f35c198e6 100644 --- a/doc/man/bitcoin-cli.1 +++ b/doc/man/bitcoin-cli.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-CLI "1" "October 2025" "bitcoin-cli v29.2.0rc2" "User Commands" +.TH BITCOIN-CLI "1" "October 2025" "bitcoin-cli v29.2.0" "User Commands" .SH NAME -bitcoin-cli \- manual page for bitcoin-cli v29.2.0rc2 +bitcoin-cli \- manual page for bitcoin-cli v29.2.0 .SH SYNOPSIS .B bitcoin-cli [\fI\,options\/\fR] \fI\, \/\fR[\fI\,params\/\fR] @@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.2.0rc2 .B bitcoin-cli [\fI\,options\/\fR] \fI\,help \/\fR .SH DESCRIPTION -Bitcoin Core RPC client version v29.2.0rc2 +Bitcoin Core RPC client version v29.2.0 .PP The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Core RPC server. .PP diff --git a/doc/man/bitcoin-qt.1 b/doc/man/bitcoin-qt.1 index a02b52ea2e70..5efc9e96172e 100644 --- a/doc/man/bitcoin-qt.1 +++ b/doc/man/bitcoin-qt.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-QT "1" "October 2025" "bitcoin-qt v29.2.0rc2" "User Commands" +.TH BITCOIN-QT "1" "October 2025" "bitcoin-qt v29.2.0" "User Commands" .SH NAME -bitcoin-qt \- manual page for bitcoin-qt v29.2.0rc2 +bitcoin-qt \- manual page for bitcoin-qt v29.2.0 .SH SYNOPSIS .B bitcoin-qt [\fI\,options\/\fR] [\fI\,URI\/\fR] .SH DESCRIPTION -Bitcoin Core version v29.2.0rc2 +Bitcoin Core version v29.2.0 .PP The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Core. .PP diff --git a/doc/man/bitcoin-tx.1 b/doc/man/bitcoin-tx.1 index a7df27545c2e..90a233619fa7 100644 --- a/doc/man/bitcoin-tx.1 +++ b/doc/man/bitcoin-tx.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-TX "1" "October 2025" "bitcoin-tx v29.2.0rc2" "User Commands" +.TH BITCOIN-TX "1" "October 2025" "bitcoin-tx v29.2.0" "User Commands" .SH NAME -bitcoin-tx \- manual page for bitcoin-tx v29.2.0rc2 +bitcoin-tx \- manual page for bitcoin-tx v29.2.0 .SH SYNOPSIS .B bitcoin-tx [\fI\,options\/\fR] \fI\, \/\fR[\fI\,commands\/\fR] @@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.2.0rc2 .B bitcoin-tx [\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR] .SH DESCRIPTION -Bitcoin Core bitcoin\-tx utility version v29.2.0rc2 +Bitcoin Core bitcoin\-tx utility version v29.2.0 .PP The bitcoin\-tx tool is used for creating and modifying bitcoin transactions. .PP diff --git a/doc/man/bitcoin-util.1 b/doc/man/bitcoin-util.1 index d127ddf4cb38..4186bd3f5a79 100644 --- a/doc/man/bitcoin-util.1 +++ b/doc/man/bitcoin-util.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-UTIL "1" "October 2025" "bitcoin-util v29.2.0rc2" "User Commands" +.TH BITCOIN-UTIL "1" "October 2025" "bitcoin-util v29.2.0" "User Commands" .SH NAME -bitcoin-util \- manual page for bitcoin-util v29.2.0rc2 +bitcoin-util \- manual page for bitcoin-util v29.2.0 .SH SYNOPSIS .B bitcoin-util [\fI\,options\/\fR] [\fI\,command\/\fR] @@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.2.0rc2 .B bitcoin-util [\fI\,options\/\fR] \fI\,grind \/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-util utility version v29.2.0rc2 +Bitcoin Core bitcoin\-util utility version v29.2.0 .PP The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below. .SH OPTIONS diff --git a/doc/man/bitcoin-wallet.1 b/doc/man/bitcoin-wallet.1 index b471ac1a246e..97c6144f81a0 100644 --- a/doc/man/bitcoin-wallet.1 +++ b/doc/man/bitcoin-wallet.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-WALLET "1" "October 2025" "bitcoin-wallet v29.2.0rc2" "User Commands" +.TH BITCOIN-WALLET "1" "October 2025" "bitcoin-wallet v29.2.0" "User Commands" .SH NAME -bitcoin-wallet \- manual page for bitcoin-wallet v29.2.0rc2 +bitcoin-wallet \- manual page for bitcoin-wallet v29.2.0 .SH SYNOPSIS .B bitcoin-wallet [\fI\,options\/\fR] \fI\,\/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-wallet utility version v29.2.0rc2 +Bitcoin Core bitcoin\-wallet utility version v29.2.0 .PP bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files. .PP diff --git a/doc/man/bitcoind.1 b/doc/man/bitcoind.1 index 3e7e394b1af7..82804a50c83e 100644 --- a/doc/man/bitcoind.1 +++ b/doc/man/bitcoind.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIND "1" "October 2025" "bitcoind v29.2.0rc2" "User Commands" +.TH BITCOIND "1" "October 2025" "bitcoind v29.2.0" "User Commands" .SH NAME -bitcoind \- manual page for bitcoind v29.2.0rc2 +bitcoind \- manual page for bitcoind v29.2.0 .SH SYNOPSIS .B bitcoind [\fI\,options\/\fR] .SH DESCRIPTION -Bitcoin Core daemon version v29.2.0rc2 +Bitcoin Core daemon version v29.2.0 .PP The Bitcoin Core daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses. .PP From c7979f429a86a2971a4ff024bd0e9cd7a6b7222f Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Wed, 8 Oct 2025 14:47:00 -0700 Subject: [PATCH 065/115] ci: Properly include $FILE_ENV in DEPENDS_HASH $FILE_ENV has a full relative path already, prepending with ci/test/ results in a non-existent path which means that DEPENDS_HASH was not actually committing to the test's environment file. Github-Pull: #33581 Rebased-From: ceeb53adcd0a6a87a65c8ebbb20472c15c502dfd --- .github/actions/configure-environment/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/configure-environment/action.yml b/.github/actions/configure-environment/action.yml index aae5016bdce9..e2a26b7184d1 100644 --- a/.github/actions/configure-environment/action.yml +++ b/.github/actions/configure-environment/action.yml @@ -17,7 +17,7 @@ runs: - name: Set cache hashes shell: bash run: | - echo "DEPENDS_HASH=$(git ls-tree HEAD depends "ci/test/$FILE_ENV" | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV + echo "DEPENDS_HASH=$(git ls-tree HEAD depends "$FILE_ENV" | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV echo "PREVIOUS_RELEASES_HASH=$(git ls-tree HEAD test/get_previous_releases.py | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV - name: Get container name From 16e10f928cc65b2096046c8c5e0fe715fc5b9d72 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 30 Sep 2025 11:00:26 +0100 Subject: [PATCH 066/115] ci: expose all ACTIONS_* vars When using `docker buildx build` in conjunction with the `gha` backend cache type, it's important to specify the URL and TOKEN needed to authenticate. On Cirrus runners this is working with only `ACTIONS_CACHE_URL` and `ACTIONS_RUNTIME_TOKEN`, but this is not enough for the GitHub backend. Fix this by exporting all `ACTIONS_*` variables. This fixes cache restore/save on forks or where GH-hosted runners are being used. Github-Pull: #33508 Rebased-From: bc706955d740f8a59bec78e44d33e80d1cca373b --- .github/actions/configure-docker/action.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/actions/configure-docker/action.yml b/.github/actions/configure-docker/action.yml index c78df86b6cf1..131fdb1ccc37 100644 --- a/.github/actions/configure-docker/action.yml +++ b/.github/actions/configure-docker/action.yml @@ -19,8 +19,12 @@ runs: uses: actions/github-script@v6 with: script: | - core.exportVariable('ACTIONS_CACHE_URL', process.env['ACTIONS_CACHE_URL']) - core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env['ACTIONS_RUNTIME_TOKEN']) + Object.keys(process.env).forEach(function (key) { + if (key.startsWith('ACTIONS_')) { + core.info(`Exporting ${key}`); + core.exportVariable(key, process.env[key]); + } + }); - name: Construct docker build cache args shell: bash From 554ff3f7f33651db3a7071d6b8dc5438a303ac03 Mon Sep 17 00:00:00 2001 From: Eugene Siegel Date: Mon, 13 Oct 2025 11:29:19 -0400 Subject: [PATCH 067/115] test: change log rate limit version gate from 299900 to 290100 Github-Pull: #33612 Rebased-From: 7b544341c0021dd713f05bc439ee190de911930c --- test/functional/test_framework/test_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 47ae2cc22da1..919d48b37aeb 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -137,7 +137,7 @@ def __init__(self, i, datadir_path, *, chain, rpchost, timewait, timeout_factor, self.args.append("-logsourcelocations") if self.version_is_at_least(239000): self.args.append("-loglevel=trace") - if self.version_is_at_least(299900): + if self.version_is_at_least(290100): self.args.append("-nologratelimit") # Default behavior from global -v2transport flag is added to args to persist it over restarts. From 4917d0c0de50da204b002bd4ae0c53cafd268f0c Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 13 Oct 2025 16:21:50 +0100 Subject: [PATCH 068/115] doc: update release notes for 29.x --- doc/release-notes.md | 48 ++++++++------------------------------------ 1 file changed, 8 insertions(+), 40 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index b981b8a7f546..4e2071dfa675 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.2 is now available from: +Bitcoin Core version 29.x is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. @@ -37,55 +37,23 @@ unsupported systems. Notable changes =============== -### P2P +### Test -- #32646 p2p: Add witness mutation check inside FillBlock -- #33296 net: check for empty header before calling FillBlock -- #33395 net: do not apply whitelist permissions to onion inbounds - -### Mempool - -- #33504 mempool: Do not enforce TRUC checks on reorg - -### RPC - -- #33446 rpc: fix getblock(header) returns target for tip - -### CI - -- #32989 ci: Migrate CI to hosted Cirrus Runners -- #32999 ci: Use APT_LLVM_V in msan task -- #33099 ci: allow for any libc++ intrumentation & use it for TSAN -- #33258 ci: use LLVM 21 -- #33364 ci: always use tag for LLVM checkout - -### Doc - -- #33484 doc: rpc: fix case typo in `finalizepsbt` help +- #33612 test: change log rate limit version gate ### Misc -- #33310 trace: Workaround GCC bug compiling with old systemtap -- #33340 Fix benchmark CSV output -- #33482 contrib: fix macOS deployment with no translations +- #33508 ci: fix buildx gha cache authentication on forks +- #33581 ci: Properly include $FILE_ENV in DEPENDS_HASH Credits ======= Thanks to everyone who directly contributed to this release: -- Amisha Chhajed +- Ava Chow - Eugene Siegel -- fanquake -- Greg Sanders -- Hennadii Stepanov -- Luke Dashjr -- MarcoFalke -- Martin Zumsande -- Sebastian Falbesoner -- Sjors Provoost -- Vasil Dimov -- Will Clark +- willcl-ark As well as to everyone that helped with translations on [Transifex](https://explore.transifex.com/bitcoin/bitcoin/). From 97088fa75aa0af5355587ce3522320f459e35204 Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Mon, 4 Aug 2025 14:06:27 -0400 Subject: [PATCH 069/115] qa: test witness stripping in p2p_segwit A stripped witness is detected as a special case in mempool acceptance to make sure we do not add the wtxid (which is =txid since witness is stripped) to the reject filter. This is because it may interfere with 1p1c parent relay which currently uses orphan reconciliation (and originally it was until wtxid-relay was widely adopted on the network. This commit adds a test for this special case in the p2p_segwit function test, both when spending a native segwit output and when spending a p2sh-wrapped segwit output. Thanks to Eugene Siegel for pointing out the p2sh-wrapped detection did not have test coverage by finding a bug in a related patch of mine. Github-Pull: #33105 Rebased-From: eb073209db9efdbc2c94bc1f535a27ec6b20d954 --- test/functional/p2p_segwit.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 9caf5a19aadf..e8f7f7e0f4e7 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -707,6 +707,12 @@ def test_p2sh_witness(self): expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)']): test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) + # The transaction was detected as witness stripped above and not added to the reject + # filter. Trying again will check it again and result in the same error. + with self.nodes[0].assert_debug_log( + expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)']): + test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) + # Try to put the witness script in the scriptSig, should also fail. spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a']) spend_tx.rehash() @@ -1282,6 +1288,13 @@ def test_tx_relay_after_segwit_activation(self): test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True) test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False) + # Now do the opposite: strip the witness entirely. This will be detected as witness stripping and + # the (w)txid won't be added to the reject filter: we can try again and get the same error. + tx3.wit.vtxinwit[0].scriptWitness.stack = [] + reason = "was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)" + test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=False, accepted=False, reason=reason) + test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=False, accepted=False, reason=reason) + # Get rid of the extra witness, and verify acceptance. tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script] # Also check that old_node gets a tx announcement, even though this is From 56626300b80dced9e111a39d5c560b0b81276cb8 Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Mon, 4 Aug 2025 13:11:33 -0400 Subject: [PATCH 070/115] policy: introduce a helper to detect whether a transaction spends Segwit outputs We will use this helper in later commits to detect witness stripping without having to execute every input Script three times in a row. Github-Pull: #33105 Rebased-From: 2907b58834ab011f7dd0c42d323e440abd227c25 --- src/policy/policy.cpp | 36 ++++++++ src/policy/policy.h | 5 ++ src/test/transaction_tests.cpp | 155 +++++++++++++++++++++++++++++++++ 3 files changed, 196 insertions(+) diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp index 545387d150ce..5942747d6099 100644 --- a/src/policy/policy.cpp +++ b/src/policy/policy.cpp @@ -344,6 +344,42 @@ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) return true; } +bool SpendsNonAnchorWitnessProg(const CTransaction& tx, const CCoinsViewCache& prevouts) +{ + if (tx.IsCoinBase()) { + return false; + } + + int version; + std::vector program; + for (const auto& txin: tx.vin) { + const auto& prev_spk{prevouts.AccessCoin(txin.prevout).out.scriptPubKey}; + + // Note this includes not-yet-defined witness programs. + if (prev_spk.IsWitnessProgram(version, program) && !prev_spk.IsPayToAnchor(version, program)) { + return true; + } + + // For P2SH extract the redeem script and check if it spends a non-Taproot witness program. Note + // this is fine to call EvalScript (as done in AreInputsStandard/IsWitnessStandard) because this + // function is only ever called after IsStandardTx, which checks the scriptsig is pushonly. + if (prev_spk.IsPayToScriptHash()) { + // If EvalScript fails or results in an empty stack, the transaction is invalid by consensus. + std::vector > stack; + if (!EvalScript(stack, txin.scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker{}, SigVersion::BASE) + || stack.empty()) { + continue; + } + const CScript redeem_script{stack.back().begin(), stack.back().end()}; + if (redeem_script.IsWitnessProgram(version, program)) { + return true; + } + } + } + + return false; +} + int64_t GetVirtualTransactionSize(int64_t nWeight, int64_t nSigOpCost, unsigned int bytes_per_sigop) { return (std::max(nWeight, nSigOpCost * bytes_per_sigop) + WITNESS_SCALE_FACTOR - 1) / WITNESS_SCALE_FACTOR; diff --git a/src/policy/policy.h b/src/policy/policy.h index bf6224af3db6..a6ce608bcfad 100644 --- a/src/policy/policy.h +++ b/src/policy/policy.h @@ -167,6 +167,11 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) * Also enforce a maximum stack item size limit and no annexes for tapscript spends. */ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs); +/** + * Check whether this transaction spends any witness program but P2A, including not-yet-defined ones. + * May return `false` early for consensus-invalid transactions. + */ +bool SpendsNonAnchorWitnessProg(const CTransaction& tx, const CCoinsViewCache& prevouts); /** Compute the virtual transaction size (weight reinterpreted as bytes). */ int64_t GetVirtualTransactionSize(int64_t nWeight, int64_t nSigOpCost, unsigned int bytes_per_sigop); diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index 2db30e203317..5844ab23bc8d 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -1144,4 +1144,159 @@ BOOST_AUTO_TEST_CASE(max_standard_legacy_sigops) BOOST_CHECK(!::AreInputsStandard(CTransaction(tx_max_sigops), coins)); } +/** Sanity check the return value of SpendsNonAnchorWitnessProg for various output types. */ +BOOST_AUTO_TEST_CASE(spends_witness_prog) +{ + CCoinsView coins_dummy; + CCoinsViewCache coins(&coins_dummy); + CKey key; + key.MakeNewKey(true); + const CPubKey pubkey{key.GetPubKey()}; + CMutableTransaction tx_create{}, tx_spend{}; + tx_create.vout.emplace_back(0, CScript{}); + tx_spend.vin.emplace_back(Txid{}, 0); + std::vector> sol_dummy; + + // CNoDestination, PubKeyDestination, PKHash, ScriptHash, WitnessV0ScriptHash, WitnessV0KeyHash, + // WitnessV1Taproot, PayToAnchor, WitnessUnknown. + static_assert(std::variant_size_v == 9); + + // Go through all defined output types and sanity check SpendsNonAnchorWitnessProg. + + // P2PK + tx_create.vout[0].scriptPubKey = GetScriptForDestination(PubKeyDestination{pubkey}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::PUBKEY); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2PKH + tx_create.vout[0].scriptPubKey = GetScriptForDestination(PKHash{pubkey}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::PUBKEYHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH + auto redeem_script{CScript{} << OP_1 << OP_CHECKSIG}; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash{redeem_script}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << OP_0 << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + + // native P2WSH + const auto witness_script{CScript{} << OP_12 << OP_HASH160 << OP_DUP << OP_EQUAL}; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV0ScriptHash{witness_script}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V0_SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped P2WSH + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // native P2WPKH + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV0KeyHash{pubkey}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V0_KEYHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped P2WPKH + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2TR + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV1Taproot{XOnlyPubKey{pubkey}}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V1_TAPROOT); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped P2TR (undefined, non-standard) + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2A + tx_create.vout[0].scriptPubKey = GetScriptForDestination(PayToAnchor{}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::ANCHOR); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped P2A (undefined, non-standard) + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + + // Undefined version 1 witness program + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessUnknown{1, {0x42, 0x42}}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_UNKNOWN); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped undefined version 1 witness program + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // Various undefined version >1 32-byte witness programs. + const auto program{ToByteVector(XOnlyPubKey{pubkey})}; + for (int i{2}; i <= 16; ++i) { + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessUnknown{i, program}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_UNKNOWN); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // It's also detected within P2SH. + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + } +} + BOOST_AUTO_TEST_SUITE_END() From 020ed613bed1148888692cb37e3522202bfca44e Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Wed, 30 Jul 2025 15:56:57 -0400 Subject: [PATCH 071/115] validation: detect witness stripping without re-running Script checks Since it was introduced in 4eb515574e1012bc8ea5dafc3042dcdf4c766f26 (#18044), the detection of a stripped witness relies on running the Script checks 3 times. In the worst case, this consists in running Script validation 3 times for every single input. Detection of a stripped witness is necessary because in this case wtxid==txid, and the transaction's wtxid must not be added to the reject filter or it could allow a malicious peer to interfere with txid-based orphan resolution as used in 1p1c package relay. However it is not necessary to run Script validation to detect a stripped witness (much less so doing it 3 times in a row). There are 3 types of witness program: defined program types (Taproot, P2WPKH, P2WSH), undefined types, and the Pay-to-anchor carve-out. For defined program types, Script validation with an empty witness will always fail (by consensus). For undefined program types, Script validation is always going to fail regardless of the witness (by standardness). For P2A, an empty witness is never going to lead to a failure. Therefore it holds that we can always detect a stripped witness without re-running Script validation. However this might lead to more "false positives" (cases where we return witness stripping for an otherwise invalid transaction) than the existing implementation. For instance a transaction with one P2PKH input with an invalid signature and one P2WPKH input with its witness stripped. The existing implementation would treat it as consensus invalid while the implementation in this commit would always consider it witness stripped. Github-Pull: #33105 Rebased-From: 27aefac42505e9c083fa131d3d7edbec7803f3c0 --- src/validation.cpp | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index fde064458dc6..36734bc61222 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1236,13 +1236,8 @@ bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws) // Check input scripts and signatures. // This is done last to help prevent CPU exhaustion denial-of-service attacks. if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, ws.m_precomputed_txdata, GetValidationCache())) { - // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we - // need to turn both off, and compare against just turning off CLEANSTACK - // to see if the failure is specifically due to witness validation. - TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts - if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, ws.m_precomputed_txdata, GetValidationCache()) && - !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, ws.m_precomputed_txdata, GetValidationCache())) { - // Only the witness is missing, so the transaction itself may be fine. + // Detect a failure due to a missing witness so that p2p code can handle rejection caching appropriately. + if (!tx.HasWitness() && SpendsNonAnchorWitnessProg(tx, m_view)) { state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED, state.GetRejectReason(), state.GetDebugMessage()); } From 5a0506eea03e423121dd2112c2ba5fb4320022e3 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Fri, 25 Apr 2025 16:13:25 -0400 Subject: [PATCH 072/115] tests: add sighash caching tests to feature_taproot Github-Pull: #32473 Rebased-From: 9014d4016ad9351cb59b587541895e55f5d589cc --- test/functional/feature_taproot.py | 98 ++++++++++++++++++++++++++++-- 1 file changed, 93 insertions(+), 5 deletions(-) diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py index 4acb7524fba8..198bec7df530 100755 --- a/test/functional/feature_taproot.py +++ b/test/functional/feature_taproot.py @@ -71,6 +71,7 @@ OP_PUSHDATA1, OP_RETURN, OP_SWAP, + OP_TUCK, OP_VERIFY, SIGHASH_DEFAULT, SIGHASH_ALL, @@ -171,9 +172,9 @@ def get(ctx, name): ctx[name] = expr return expr.value -def getter(name): +def getter(name, **kwargs): """Return a callable that evaluates name in its passed context.""" - return lambda ctx: get(ctx, name) + return lambda ctx: get({**ctx, **kwargs}, name) def override(expr, **kwargs): """Return a callable that evaluates expr in a modified context.""" @@ -217,6 +218,20 @@ def default_controlblock(ctx): """Default expression for "controlblock": combine leafversion, negflag, pubkey_internal, merklebranch.""" return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_internal") + get(ctx, "merklebranch") +def default_scriptcode_suffix(ctx): + """Default expression for "scriptcode_suffix", the actually used portion of the scriptcode.""" + scriptcode = get(ctx, "scriptcode") + codesepnum = get(ctx, "codesepnum") + if codesepnum == -1: + return scriptcode + codeseps = 0 + for (opcode, data, sop_idx) in scriptcode.raw_iter(): + if opcode == OP_CODESEPARATOR: + if codeseps == codesepnum: + return CScript(scriptcode[sop_idx+1:]) + codeseps += 1 + assert False + def default_sigmsg(ctx): """Default expression for "sigmsg": depending on mode, compute BIP341, BIP143, or legacy sigmsg.""" tx = get(ctx, "tx") @@ -236,12 +251,12 @@ def default_sigmsg(ctx): return TaprootSignatureMsg(tx, utxos, hashtype, idx, scriptpath=False, annex=annex) elif mode == "witv0": # BIP143 signature hash - scriptcode = get(ctx, "scriptcode") + scriptcode = get(ctx, "scriptcode_suffix") utxos = get(ctx, "utxos") return SegwitV0SignatureMsg(scriptcode, tx, idx, hashtype, utxos[idx].nValue) else: # Pre-segwit signature hash - scriptcode = get(ctx, "scriptcode") + scriptcode = get(ctx, "scriptcode_suffix") return LegacySignatureMsg(scriptcode, tx, idx, hashtype)[0] def default_sighash(ctx): @@ -301,7 +316,12 @@ def default_hashtype_actual(ctx): def default_bytes_hashtype(ctx): """Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise.""" - return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0]) + mode = get(ctx, "mode") + hashtype_actual = get(ctx, "hashtype_actual") + if mode != "taproot" or hashtype_actual != 0: + return bytes([hashtype_actual]) + else: + return bytes() def default_sign(ctx): """Default expression for "sign": concatenation of signature and bytes_hashtype.""" @@ -379,6 +399,8 @@ def default_scriptsig(ctx): "key_tweaked": default_key_tweaked, # The tweak to use (None for script path spends, the actual tweak for key path spends). "tweak": default_tweak, + # The part of the scriptcode after the last executed OP_CODESEPARATOR. + "scriptcode_suffix": default_scriptcode_suffix, # The sigmsg value (preimage of sighash) "sigmsg": default_sigmsg, # The sighash value (32 bytes) @@ -409,6 +431,8 @@ def default_scriptsig(ctx): "annex": None, # The codeseparator position (only when mode=="taproot"). "codeseppos": -1, + # Which OP_CODESEPARATOR is the last executed one in the script (in legacy/P2SH/P2WSH). + "codesepnum": -1, # The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH). "script_p2sh": None, # The script to add to the witness in (if P2WSH; None implies P2WPKH) @@ -1210,6 +1234,70 @@ def predict_sigops_ratio(n, dummy_size): standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0) add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE) + # == sighash caching tests == + + # Sighash caching in legacy. + for p2sh in [False, True]: + for witv0 in [False, True]: + eckey1, pubkey1 = generate_keypair(compressed=compressed) + for _ in range(10): + # Construct a script with 20 checksig operations (10 sighash types, each 2 times), + # randomly ordered and interleaved with 4 OP_CODESEPARATORS. + ops = [1, 2, 3, 0x21, 0x42, 0x63, 0x81, 0x83, 0xe1, 0xc2, -1, -1] * 2 + # Make sure no OP_CODESEPARATOR appears last. + while True: + random.shuffle(ops) + if ops[-1] != -1: + break + script = [pubkey1] + inputs = [] + codeseps = -1 + for pos, op in enumerate(ops): + if op == -1: + codeseps += 1 + script.append(OP_CODESEPARATOR) + elif pos + 1 != len(ops): + script += [OP_TUCK, OP_CHECKSIGVERIFY] + inputs.append(getter("sign", codesepnum=codeseps, hashtype=op)) + else: + script += [OP_CHECKSIG] + inputs.append(getter("sign", codesepnum=codeseps, hashtype=op)) + inputs.reverse() + script = CScript(script) + add_spender(spenders, "sighashcache/legacy", p2sh=p2sh, witv0=witv0, standard=False, script=script, inputs=inputs, key=eckey1, sigops_weight=12*8*(4-3*witv0), no_fail=True) + + # Sighash caching in tapscript. + for _ in range(10): + # Construct a script with 700 checksig operations (7 sighash types, each 100 times), + # randomly ordered and interleaved with 100 OP_CODESEPARATORS. + ops = [0, 1, 2, 3, 0x81, 0x82, 0x83, -1] * 100 + # Make sure no OP_CODESEPARATOR appears last. + while True: + random.shuffle(ops) + if ops[-1] != -1: + break + script = [pubs[1]] + inputs = [] + opcount = 1 + codeseppos = -1 + for pos, op in enumerate(ops): + if op == -1: + codeseppos = opcount + opcount += 1 + script.append(OP_CODESEPARATOR) + elif pos + 1 != len(ops): + opcount += 2 + script += [OP_TUCK, OP_CHECKSIGVERIFY] + inputs.append(getter("sign", codeseppos=codeseppos, hashtype=op)) + else: + opcount += 1 + script += [OP_CHECKSIG] + inputs.append(getter("sign", codeseppos=codeseppos, hashtype=op)) + inputs.reverse() + script = CScript(script) + tap = taproot_construct(pubs[0], [("leaf", script)]) + add_spender(spenders, "sighashcache/taproot", tap=tap, leaf="leaf", inputs=inputs, standard=True, key=secs[1], no_fail=True) + return spenders From 354d46bc10c61c45140be7a425c5c29fed934d32 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Fri, 25 Apr 2025 13:11:30 -0400 Subject: [PATCH 073/115] script: (refactor) prepare for introducing sighash midstate cache Github-Pull: #32473 Rebased-From: 8f3ddb0bccebc930836b4a6745a7cf29b41eb302 --- src/script/interpreter.cpp | 44 +++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index a35306b69355..0e304973a982 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -1569,6 +1569,18 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn { assert(nIn < txTo.vin.size()); + if (sigversion != SigVersion::WITNESS_V0) { + // Check for invalid use of SIGHASH_SINGLE + if ((nHashType & 0x1f) == SIGHASH_SINGLE) { + if (nIn >= txTo.vout.size()) { + // nOut out of range + return uint256::ONE; + } + } + } + + HashWriter ss{}; + if (sigversion == SigVersion::WITNESS_V0) { uint256 hashPrevouts; uint256 hashSequence; @@ -1583,16 +1595,14 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn hashSequence = cacheready ? cache->hashSequence : SHA256Uint256(GetSequencesSHA256(txTo)); } - if ((nHashType & 0x1f) != SIGHASH_SINGLE && (nHashType & 0x1f) != SIGHASH_NONE) { hashOutputs = cacheready ? cache->hashOutputs : SHA256Uint256(GetOutputsSHA256(txTo)); } else if ((nHashType & 0x1f) == SIGHASH_SINGLE && nIn < txTo.vout.size()) { - HashWriter ss{}; - ss << txTo.vout[nIn]; - hashOutputs = ss.GetHash(); + HashWriter inner_ss{}; + inner_ss << txTo.vout[nIn]; + hashOutputs = inner_ss.GetHash(); } - HashWriter ss{}; // Version ss << txTo.version; // Input prevouts/nSequence (none/all, depending on flags) @@ -1609,26 +1619,16 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn ss << hashOutputs; // Locktime ss << txTo.nLockTime; - // Sighash type - ss << nHashType; - - return ss.GetHash(); - } + } else { + // Wrapper to serialize only the necessary parts of the transaction being signed + CTransactionSignatureSerializer txTmp(txTo, scriptCode, nIn, nHashType); - // Check for invalid use of SIGHASH_SINGLE - if ((nHashType & 0x1f) == SIGHASH_SINGLE) { - if (nIn >= txTo.vout.size()) { - // nOut out of range - return uint256::ONE; - } + // Serialize + ss << txTmp; } - // Wrapper to serialize only the necessary parts of the transaction being signed - CTransactionSignatureSerializer txTmp(txTo, scriptCode, nIn, nHashType); - - // Serialize and hash - HashWriter ss{}; - ss << txTmp << nHashType; + // Add sighash type and hash. + ss << nHashType; return ss.GetHash(); } From ddfb9150b80c0c692c06b91cefa988c7773b15ff Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Fri, 25 Apr 2025 13:31:18 -0400 Subject: [PATCH 074/115] script: (optimization) introduce sighash midstate caching Github-Pull: #32473 Rebased-From: 92af9f74d74e76681f7d98f293eab226972137b4 --- src/script/interpreter.cpp | 43 ++++++++++++++++++++++++++++++++++++-- src/script/interpreter.h | 22 ++++++++++++++++++- 2 files changed, 62 insertions(+), 3 deletions(-) diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 0e304973a982..4b7bfcedc6c5 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -1564,8 +1564,35 @@ bool SignatureHashSchnorr(uint256& hash_out, ScriptExecutionData& execdata, cons return true; } +int SigHashCache::CacheIndex(int32_t hash_type) const noexcept +{ + // Note that we do not distinguish between BASE and WITNESS_V0 to determine the cache index, + // because no input can simultaneously use both. + return 3 * !!(hash_type & SIGHASH_ANYONECANPAY) + + 2 * ((hash_type & 0x1f) == SIGHASH_SINGLE) + + 1 * ((hash_type & 0x1f) == SIGHASH_NONE); +} + +bool SigHashCache::Load(int32_t hash_type, const CScript& script_code, HashWriter& writer) const noexcept +{ + auto& entry = m_cache_entries[CacheIndex(hash_type)]; + if (entry.has_value()) { + if (script_code == entry->first) { + writer = HashWriter(entry->second); + return true; + } + } + return false; +} + +void SigHashCache::Store(int32_t hash_type, const CScript& script_code, const HashWriter& writer) noexcept +{ + auto& entry = m_cache_entries[CacheIndex(hash_type)]; + entry.emplace(script_code, writer); +} + template -uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache) +uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache, SigHashCache* sighash_cache) { assert(nIn < txTo.vin.size()); @@ -1581,6 +1608,13 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn HashWriter ss{}; + // Try to compute using cached SHA256 midstate. + if (sighash_cache && sighash_cache->Load(nHashType, scriptCode, ss)) { + // Add sighash type and hash. + ss << nHashType; + return ss.GetHash(); + } + if (sigversion == SigVersion::WITNESS_V0) { uint256 hashPrevouts; uint256 hashSequence; @@ -1627,6 +1661,11 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn ss << txTmp; } + // If a cache object was provided, store the midstate there. + if (sighash_cache != nullptr) { + sighash_cache->Store(nHashType, scriptCode, ss); + } + // Add sighash type and hash. ss << nHashType; return ss.GetHash(); @@ -1661,7 +1700,7 @@ bool GenericTransactionSignatureChecker::CheckECDSASignature(const std::vecto // Witness sighashes need the amount. if (sigversion == SigVersion::WITNESS_V0 && amount < 0) return HandleMissingData(m_mdb); - uint256 sighash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion, this->txdata); + uint256 sighash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion, this->txdata, &m_sighash_cache); if (!VerifyECDSASignature(vchSig, pubkey, sighash)) return false; diff --git a/src/script/interpreter.h b/src/script/interpreter.h index e2fb1998f0b2..d613becb8f6c 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -239,8 +239,27 @@ extern const HashWriter HASHER_TAPSIGHASH; //!< Hasher with tag "TapSighash" pre extern const HashWriter HASHER_TAPLEAF; //!< Hasher with tag "TapLeaf" pre-fed to it. extern const HashWriter HASHER_TAPBRANCH; //!< Hasher with tag "TapBranch" pre-fed to it. +/** Data structure to cache SHA256 midstates for the ECDSA sighash calculations + * (bare, P2SH, P2WPKH, P2WSH). */ +class SigHashCache +{ + /** For each sighash mode (ALL, SINGLE, NONE, ALL|ANYONE, SINGLE|ANYONE, NONE|ANYONE), + * optionally store a scriptCode which the hash is for, plus a midstate for the SHA256 + * computation just before adding the hash_type itself. */ + std::optional> m_cache_entries[6]; + + /** Given a hash_type, find which of the 6 cache entries is to be used. */ + int CacheIndex(int32_t hash_type) const noexcept; + +public: + /** Load into writer the SHA256 midstate if found in this cache. */ + [[nodiscard]] bool Load(int32_t hash_type, const CScript& script_code, HashWriter& writer) const noexcept; + /** Store into this cache object the provided SHA256 midstate. */ + void Store(int32_t hash_type, const CScript& script_code, const HashWriter& writer) noexcept; +}; + template -uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache = nullptr); +uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache = nullptr, SigHashCache* sighash_cache = nullptr); class BaseSignatureChecker { @@ -289,6 +308,7 @@ class GenericTransactionSignatureChecker : public BaseSignatureChecker unsigned int nIn; const CAmount amount; const PrecomputedTransactionData* txdata; + mutable SigHashCache m_sighash_cache; protected: virtual bool VerifyECDSASignature(const std::vector& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const; From 73d3ab8fc93119f14f72a6c5f3cdd9eedcb36a20 Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Tue, 22 Jul 2025 18:40:23 -0400 Subject: [PATCH 075/115] qa: simple differential fuzzing for sighash with/without caching Github-Pull: #32473 Rebased-From: b221aa80a081579b8d3b460e3403f7ac0daa7139 --- src/test/fuzz/script_interpreter.cpp | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/test/fuzz/script_interpreter.cpp b/src/test/fuzz/script_interpreter.cpp index 9e3ad02b2e53..2c2ce855d47d 100644 --- a/src/test/fuzz/script_interpreter.cpp +++ b/src/test/fuzz/script_interpreter.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -45,3 +46,27 @@ FUZZ_TARGET(script_interpreter) (void)CastToBool(ConsumeRandomLengthByteVector(fuzzed_data_provider)); } } + +/** Differential fuzzing for SignatureHash with and without cache. */ +FUZZ_TARGET(sighash_cache) +{ + FuzzedDataProvider provider(buffer.data(), buffer.size()); + + // Get inputs to the sighash function that won't change across types. + const auto scriptcode{ConsumeScript(provider)}; + const auto tx{ConsumeTransaction(provider, std::nullopt)}; + if (tx.vin.empty()) return; + const auto in_index{provider.ConsumeIntegralInRange(0, tx.vin.size() - 1)}; + const auto amount{ConsumeMoney(provider)}; + const auto sigversion{(SigVersion)provider.ConsumeIntegralInRange(0, 1)}; + + // Check the sighash function will give the same result for 100 fuzzer-generated hash types whether or not a cache is + // provided. The cache is conserved across types to exercise cache hits. + SigHashCache sighash_cache{}; + for (int i{0}; i < 100; ++i) { + const auto hash_type{((i & 2) == 0) ? provider.ConsumeIntegral() : provider.ConsumeIntegral()}; + const auto nocache_res{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion)}; + const auto cache_res{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &sighash_cache)}; + Assert(nocache_res == cache_res); + } +} From f24291bd96f92ecc0fc04317fd93747eeb2d557a Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Tue, 22 Jul 2025 11:23:16 -0400 Subject: [PATCH 076/115] qa: unit test sighash caching Github-Pull: #32473 Rebased-From: 83950275eddacac56c58a7a3648ed435a5593328 --- src/test/sighash_tests.cpp | 90 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index d3320878ec0e..6e2ec800e746 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -207,4 +207,94 @@ BOOST_AUTO_TEST_CASE(sighash_from_data) BOOST_CHECK_MESSAGE(sh.GetHex() == sigHashHex, strTest); } } + +BOOST_AUTO_TEST_CASE(sighash_caching) +{ + // Get a script, transaction and parameters as inputs to the sighash function. + CScript scriptcode; + RandomScript(scriptcode); + CScript diff_scriptcode{scriptcode}; + diff_scriptcode << OP_1; + CMutableTransaction tx; + RandomTransaction(tx, /*fSingle=*/false); + const auto in_index{static_cast(m_rng.randrange(tx.vin.size()))}; + const auto amount{m_rng.rand()}; + + // Exercise the sighash function under both legacy and segwit v0. + for (const auto sigversion: {SigVersion::BASE, SigVersion::WITNESS_V0}) { + // For each, run it against all the 6 standard hash types and a few additional random ones. + std::vector hash_types{{SIGHASH_ALL, SIGHASH_SINGLE, SIGHASH_NONE, SIGHASH_ALL | SIGHASH_ANYONECANPAY, + SIGHASH_SINGLE | SIGHASH_ANYONECANPAY, SIGHASH_NONE | SIGHASH_ANYONECANPAY, + SIGHASH_ANYONECANPAY, 0, std::numeric_limits::max()}}; + for (int i{0}; i < 10; ++i) { + hash_types.push_back(i % 2 == 0 ? m_rng.rand() : m_rng.rand()); + } + + // Reuse the same cache across script types. This must not cause any issue as the cached value for one hash type must never + // be confused for another (instantiating the cache within the loop instead would prevent testing this). + SigHashCache cache; + for (const auto hash_type: hash_types) { + const bool expect_one{sigversion == SigVersion::BASE && ((hash_type & 0x1f) == SIGHASH_SINGLE) && in_index >= tx.vout.size()}; + + // The result of computing the sighash should be the same with or without cache. + const auto sighash_with_cache{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)}; + const auto sighash_no_cache{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, nullptr)}; + BOOST_CHECK_EQUAL(sighash_with_cache, sighash_no_cache); + + // Calling the cached version again should return the same value again. + BOOST_CHECK_EQUAL(sighash_with_cache, SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)); + + // While here we might as well also check that the result for legacy is the same as for the old SignatureHash() function. + if (sigversion == SigVersion::BASE) { + BOOST_CHECK_EQUAL(sighash_with_cache, SignatureHashOld(scriptcode, CTransaction(tx), in_index, hash_type)); + } + + // Calling with a different scriptcode (for instance in case a CODESEP is encountered) will not return the cache value but + // overwrite it. The sighash will always be different except in case of legacy SIGHASH_SINGLE bug. + const auto sighash_with_cache2{SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)}; + const auto sighash_no_cache2{SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, nullptr)}; + BOOST_CHECK_EQUAL(sighash_with_cache2, sighash_no_cache2); + if (!expect_one) { + BOOST_CHECK_NE(sighash_with_cache, sighash_with_cache2); + } else { + BOOST_CHECK_EQUAL(sighash_with_cache, sighash_with_cache2); + BOOST_CHECK_EQUAL(sighash_with_cache, uint256::ONE); + } + + // Calling the cached version again should return the same value again. + BOOST_CHECK_EQUAL(sighash_with_cache2, SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)); + + // And if we store a different value for this scriptcode and hash type it will return that instead. + { + HashWriter h{}; + h << 42; + cache.Store(hash_type, scriptcode, h); + const auto stored_hash{h.GetHash()}; + BOOST_CHECK(cache.Load(hash_type, scriptcode, h)); + const auto loaded_hash{h.GetHash()}; + BOOST_CHECK_EQUAL(stored_hash, loaded_hash); + } + + // And using this mutated cache with the sighash function will return the new value (except in the legacy SIGHASH_SINGLE bug + // case in which it'll return 1). + if (!expect_one) { + BOOST_CHECK_NE(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), sighash_with_cache); + HashWriter h{}; + BOOST_CHECK(cache.Load(hash_type, scriptcode, h)); + h << hash_type; + const auto new_hash{h.GetHash()}; + BOOST_CHECK_EQUAL(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), new_hash); + } else { + BOOST_CHECK_EQUAL(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), uint256::ONE); + } + + // Wipe the cache and restore the correct cached value for this scriptcode and hash_type before starting the next iteration. + HashWriter dummy{}; + cache.Store(hash_type, diff_scriptcode, dummy); + (void)SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache); + BOOST_CHECK(cache.Load(hash_type, scriptcode, dummy) || expect_one); + } + } +} + BOOST_AUTO_TEST_SUITE_END() From 65bcbbc538234957b1f7f76b2f21ad7c138efb87 Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Wed, 23 Jul 2025 10:50:33 +1000 Subject: [PATCH 077/115] net_processing: drop MaybePunishNodeForTx Do not discourage nodes even when they send us consensus invalid transactions. Because we do not discourage nodes for transactions we consider non-standard, we don't get any DoS protection from this check in adversarial scenarios, so remove the check entirely both to simplify the code and reduce the risk of splitting the network due to changes in tx relay policy. NOTE: Backport required additional adjustment in test/functional/p2p_invalid_tx Github-Pull: #33050 Rebased-From: 266dd0e10d08c0bfde63205db15d6c210a021b90 --- src/net_processing.cpp | 34 ----------------------- test/functional/data/invalid_txs.py | 20 ++++++------- test/functional/p2p_invalid_tx.py | 5 ++-- test/functional/p2p_opportunistic_1p1c.py | 6 ++-- 4 files changed, 16 insertions(+), 49 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 1da3ec9d211e..b25819c821b4 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -553,12 +553,6 @@ class PeerManagerImpl final : public PeerManager bool via_compact_block, const std::string& message = "") EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - /** - * Potentially disconnect and discourage a node based on the contents of a TxValidationState object - */ - void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - /** Maybe disconnect a peer and discourage future connections from its address. * * @param[in] pnode The node to check. @@ -1805,32 +1799,6 @@ void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati } } -void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state) -{ - PeerRef peer{GetPeerRef(nodeid)}; - switch (state.GetResult()) { - case TxValidationResult::TX_RESULT_UNSET: - break; - // The node is providing invalid data: - case TxValidationResult::TX_CONSENSUS: - if (peer) Misbehaving(*peer, ""); - return; - // Conflicting (but not necessarily invalid) data or different policy: - case TxValidationResult::TX_INPUTS_NOT_STANDARD: - case TxValidationResult::TX_NOT_STANDARD: - case TxValidationResult::TX_MISSING_INPUTS: - case TxValidationResult::TX_PREMATURE_SPEND: - case TxValidationResult::TX_WITNESS_MUTATED: - case TxValidationResult::TX_WITNESS_STRIPPED: - case TxValidationResult::TX_CONFLICT: - case TxValidationResult::TX_MEMPOOL_POLICY: - case TxValidationResult::TX_NO_MEMPOOL: - case TxValidationResult::TX_RECONSIDERABLE: - case TxValidationResult::TX_UNKNOWN: - break; - } -} - bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex) { AssertLockHeld(cs_main); @@ -2987,8 +2955,6 @@ std::optional PeerManagerImpl::ProcessInvalidTx(NodeId if (peer) AddKnownTx(*peer, parent_txid); } - MaybePunishNodeForTx(nodeid, state); - return package_to_validate; } diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py index d2d7202d8601..48ec88fde0d9 100644 --- a/test/functional/data/invalid_txs.py +++ b/test/functional/data/invalid_txs.py @@ -89,7 +89,7 @@ def get_tx(self, *args, **kwargs): class OutputMissing(BadTxTemplate): reject_reason = "bad-txns-vout-empty" - expect_disconnect = True + expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -100,7 +100,7 @@ def get_tx(self): class InputMissing(BadTxTemplate): reject_reason = "bad-txns-vin-empty" - expect_disconnect = True + expect_disconnect = False # We use a blank transaction here to make sure # it is interpreted as a non-witness transaction. @@ -149,7 +149,7 @@ def get_tx(self): class DuplicateInput(BadTxTemplate): reject_reason = 'bad-txns-inputs-duplicate' - expect_disconnect = True + expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -162,7 +162,7 @@ def get_tx(self): class PrevoutNullInput(BadTxTemplate): reject_reason = 'bad-txns-prevout-null' - expect_disconnect = True + expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -188,7 +188,7 @@ def get_tx(self): class SpendTooMuch(BadTxTemplate): reject_reason = 'bad-txns-in-belowout' - expect_disconnect = True + expect_disconnect = False def get_tx(self): return create_tx_with_script( @@ -197,7 +197,7 @@ def get_tx(self): class CreateNegative(BadTxTemplate): reject_reason = 'bad-txns-vout-negative' - expect_disconnect = True + expect_disconnect = False def get_tx(self): return create_tx_with_script(self.spend_tx, 0, amount=-1) @@ -205,7 +205,7 @@ def get_tx(self): class CreateTooLarge(BadTxTemplate): reject_reason = 'bad-txns-vout-toolarge' - expect_disconnect = True + expect_disconnect = False def get_tx(self): return create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY + 1) @@ -213,7 +213,7 @@ def get_tx(self): class CreateSumTooLarge(BadTxTemplate): reject_reason = 'bad-txns-txouttotal-toolarge' - expect_disconnect = True + expect_disconnect = False def get_tx(self): tx = create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY) @@ -224,7 +224,7 @@ def get_tx(self): class InvalidOPIFConstruction(BadTxTemplate): reject_reason = "mandatory-script-verify-flag-failed (Invalid OP_IF construction)" - expect_disconnect = True + expect_disconnect = False valid_in_block = True def get_tx(self): @@ -266,7 +266,7 @@ def get_tx(self): class NonStandardAndInvalid(BadTxTemplate): """A non-standard transaction which is also consensus-invalid should return the consensus error.""" reject_reason = "mandatory-script-verify-flag-failed (OP_RETURN was encountered)" - expect_disconnect = True + expect_disconnect = False valid_in_block = False def get_tx(self): diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py index ee8c6c16ca37..3785f725fefd 100755 --- a/test/functional/p2p_invalid_tx.py +++ b/test/functional/p2p_invalid_tx.py @@ -73,7 +73,7 @@ def run_test(self): tx = template.get_tx() node.p2ps[0].send_txs_and_test( [tx], node, success=False, - expect_disconnect=template.expect_disconnect, + expect_disconnect=False, reject_reason=template.reject_reason, ) @@ -144,7 +144,6 @@ def run_test(self): # tx_orphan_2_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx) # tx_orphan_2_invalid, because it has negative fee (p2ps[1] is disconnected for relaying that tx) - self.wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected assert_equal(expected_mempool, set(node.getrawmempool())) self.log.info('Test orphan pool overflow') @@ -165,7 +164,7 @@ def run_test(self): node.p2ps[0].send_txs_and_test([rejected_parent], node, success=False) self.log.info('Test that a peer disconnection causes erase its transactions from the orphan pool') - with node.assert_debug_log(['Erased 100 orphan transaction(s) from peer=26']): + with node.assert_debug_log(['Erased 100 orphan transaction(s) from peer=']): self.reconnect_p2p(num_connections=1) self.log.info('Test that a transaction in the orphan pool is included in a new tip block causes erase this transaction from the orphan pool') diff --git a/test/functional/p2p_opportunistic_1p1c.py b/test/functional/p2p_opportunistic_1p1c.py index 5fdbf74a5730..def70b733a64 100755 --- a/test/functional/p2p_opportunistic_1p1c.py +++ b/test/functional/p2p_opportunistic_1p1c.py @@ -251,8 +251,10 @@ def test_orphan_consensus_failure(self): assert tx_orphan_bad_wit.rehash() not in node_mempool # 5. Have the other peer send the tx too, so that tx_orphan_bad_wit package is attempted. - bad_orphan_sender.send_message(msg_tx(low_fee_parent["tx"])) - bad_orphan_sender.wait_for_disconnect() + bad_orphan_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + + # The bad orphan sender should not be disconnected. + bad_orphan_sender.sync_with_ping() # The peer that didn't provide the orphan should not be disconnected. parent_sender.sync_with_ping() From be0857745a5a0154d89a2aa9ddaa2a84e912598a Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Wed, 23 Jul 2025 10:51:06 +1000 Subject: [PATCH 078/115] validation: only check input scripts once Previously, we would check failing input scripts twice when considering a transaction for the mempool, in order to distinguish policy failures from consensus failures. This allowed us both to provide a different error message and to discourage peers for consensus failures. Because we are no longer discouraging peers for consensus failures during tx relay, and because checking a script can be expensive, only do this once. Also renames non-mandatory-script-verify-flag error to mempool-script-verify-flag-failed. NOTE: Backport required additional adjustment in test/functional/feature_block Github-Pull: #33050 Rebased-From: b29ae9efdfeeff774e32ee433ce67d8ed8ecd49f --- src/validation.cpp | 35 +++++++--------------------- test/functional/data/invalid_txs.py | 7 +++--- test/functional/feature_block.py | 5 +++- test/functional/feature_cltv.py | 18 +++++++------- test/functional/feature_dersig.py | 4 ++-- test/functional/feature_nulldummy.py | 12 +++++----- test/functional/feature_segwit.py | 24 +++++++++---------- test/functional/mempool_accept.py | 2 +- test/functional/p2p_segwit.py | 14 +++++------ test/functional/rpc_packages.py | 4 ++-- 10 files changed, 57 insertions(+), 68 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index 36734bc61222..ebeb67ac78a6 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2207,34 +2207,17 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, if (pvChecks) { pvChecks->emplace_back(std::move(check)); } else if (auto result = check(); result.has_value()) { + // Tx failures never trigger disconnections/bans. + // This is so that network splits aren't triggered + // either due to non-consensus relay policies (such as + // non-standard DER encodings or non-null dummy + // arguments) or due to new consensus rules introduced in + // soft forks. if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { - // Check whether the failure was caused by a - // non-mandatory script verification check, such as - // non-standard DER encodings or non-null dummy - // arguments; if so, ensure we return NOT_STANDARD - // instead of CONSENSUS to avoid downstream users - // splitting the network between upgraded and - // non-upgraded nodes by banning CONSENSUS-failing - // data providers. - CScriptCheck check2(txdata.m_spent_outputs[i], tx, validation_cache.m_signature_cache, i, - flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata); - auto mandatory_result = check2(); - if (!mandatory_result.has_value()) { - return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(result->first)), result->second); - } else { - // If the second check failed, it failed due to a mandatory script verification - // flag, but the first check might have failed on a non-mandatory script - // verification flag. - // - // Avoid reporting a mandatory script check failure with a non-mandatory error - // string by reporting the error from the second check. - result = mandatory_result; - } + return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("mempool-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second); + } else { + return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second); } - - // MANDATORY flag failures correspond to - // TxValidationResult::TX_CONSENSUS. - return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second); } } diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py index 48ec88fde0d9..bb1931be2df0 100644 --- a/test/functional/data/invalid_txs.py +++ b/test/functional/data/invalid_txs.py @@ -223,7 +223,7 @@ def get_tx(self): class InvalidOPIFConstruction(BadTxTemplate): - reject_reason = "mandatory-script-verify-flag-failed (Invalid OP_IF construction)" + reject_reason = "mempool-script-verify-flag-failed (Invalid OP_IF construction)" expect_disconnect = False valid_in_block = True @@ -264,8 +264,9 @@ def get_tx(self): }) class NonStandardAndInvalid(BadTxTemplate): - """A non-standard transaction which is also consensus-invalid should return the consensus error.""" - reject_reason = "mandatory-script-verify-flag-failed (OP_RETURN was encountered)" + """A non-standard transaction which is also consensus-invalid should return the first error.""" + reject_reason = "mempool-script-verify-flag-failed (Using OP_CODESEPARATOR in non-witness script)" + block_reject_reason = "mandatory-script-verify-flag-failed (OP_RETURN was encountered)" expect_disconnect = False valid_in_block = False diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index 2dfa568c5b6c..222b23878539 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -164,9 +164,12 @@ def run_test(self): self.sign_tx(badtx, attempt_spend_tx) badtx.rehash() badblock = self.update_block(blockname, [badtx]) + reject_reason = (template.block_reject_reason or template.reject_reason) + if reject_reason and reject_reason.startswith("mempool-script-verify-flag-failed"): + reject_reason = "mandatory-script-verify-flag-failed" + reject_reason[33:] self.send_blocks( [badblock], success=False, - reject_reason=(template.block_reject_reason or template.reject_reason), + reject_reason=reject_reason, reconnect=True, timeout=2) self.move_tip(2) diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py index 60b3fb4e20bf..81cc10a5adfe 100755 --- a/test/functional/feature_cltv.py +++ b/test/functional/feature_cltv.py @@ -154,12 +154,14 @@ def run_test(self): coin_vout = coin.prevout.n cltv_invalidate(spendtx, i) + blk_rej = "mandatory-script-verify-flag-failed" + tx_rej = "mempool-script-verify-flag-failed" expected_cltv_reject_reason = [ - "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", - "mandatory-script-verify-flag-failed (Negative locktime)", - "mandatory-script-verify-flag-failed (Locktime requirement not satisfied)", - "mandatory-script-verify-flag-failed (Locktime requirement not satisfied)", - "mandatory-script-verify-flag-failed (Locktime requirement not satisfied)", + " (Operation not valid with the current stack size)", + " (Negative locktime)", + " (Locktime requirement not satisfied)", + " (Locktime requirement not satisfied)", + " (Locktime requirement not satisfied)", ][i] # First we show that this tx is valid except for CLTV by getting it # rejected from the mempool for exactly that reason. @@ -170,8 +172,8 @@ def run_test(self): 'txid': spendtx_txid, 'wtxid': spendtx_wtxid, 'allowed': False, - 'reject-reason': expected_cltv_reject_reason, - 'reject-details': expected_cltv_reject_reason + f", input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:{coin_vout}" + 'reject-reason': tx_rej + expected_cltv_reject_reason, + 'reject-details': tx_rej + expected_cltv_reject_reason + f", input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:{coin_vout}" }], self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0), ) @@ -181,7 +183,7 @@ def run_test(self): block.hashMerkleRoot = block.calc_merkle_root() block.solve() - with self.nodes[0].assert_debug_log(expected_msgs=[f'Block validation error: {expected_cltv_reject_reason}']): + with self.nodes[0].assert_debug_log(expected_msgs=[f'Block validation error: {blk_rej + expected_cltv_reject_reason}']): peer.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) peer.sync_with_ping() diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py index 0c3b0f12243a..2a7eb0d0f473 100755 --- a/test/functional/feature_dersig.py +++ b/test/functional/feature_dersig.py @@ -123,8 +123,8 @@ def run_test(self): 'txid': spendtx_txid, 'wtxid': spendtx_wtxid, 'allowed': False, - 'reject-reason': 'mandatory-script-verify-flag-failed (Non-canonical DER signature)', - 'reject-details': 'mandatory-script-verify-flag-failed (Non-canonical DER signature), ' + + 'reject-reason': 'mempool-script-verify-flag-failed (Non-canonical DER signature)', + 'reject-details': 'mempool-script-verify-flag-failed (Non-canonical DER signature), ' + f"input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:0" }], self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0), diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py index 885bc4855b02..e7fe7d65e488 100755 --- a/test/functional/feature_nulldummy.py +++ b/test/functional/feature_nulldummy.py @@ -37,8 +37,8 @@ from test_framework.wallet import getnewdestination from test_framework.wallet_util import generate_keypair -NULLDUMMY_ERROR = "mandatory-script-verify-flag-failed (Dummy CHECKMULTISIG argument must be zero)" - +NULLDUMMY_TX_ERROR = "mempool-script-verify-flag-failed (Dummy CHECKMULTISIG argument must be zero)" +NULLDUMMY_BLK_ERROR = "mandatory-script-verify-flag-failed (Dummy CHECKMULTISIG argument must be zero)" def invalidate_nulldummy_tx(tx): """Transform a NULLDUMMY compliant tx (i.e. scriptSig starts with OP_0) @@ -105,7 +105,7 @@ def run_test(self): addr=self.ms_address, amount=47, privkey=self.privkey) invalidate_nulldummy_tx(test2tx) - assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0) + assert_raises_rpc_error(-26, NULLDUMMY_TX_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0) self.log.info(f"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [{COINBASE_MATURITY + 4}]") self.block_submit(self.nodes[0], [test2tx], accept=True) @@ -116,7 +116,7 @@ def run_test(self): privkey=self.privkey) test6txs = [CTransaction(test4tx)] invalidate_nulldummy_tx(test4tx) - assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0) + assert_raises_rpc_error(-26, NULLDUMMY_TX_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0) self.block_submit(self.nodes[0], [test4tx], accept=False) self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation") @@ -126,7 +126,7 @@ def run_test(self): privkey=self.privkey) test6txs.append(CTransaction(test5tx)) test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01' - assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0) + assert_raises_rpc_error(-26, NULLDUMMY_TX_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0) self.block_submit(self.nodes[0], [test5tx], with_witness=True, accept=False) self.log.info(f"Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [{COINBASE_MATURITY + 5}]") @@ -142,7 +142,7 @@ def block_submit(self, node, txs, *, with_witness=False, accept): if with_witness: add_witness_commitment(block) block.solve() - assert_equal(None if accept else NULLDUMMY_ERROR, node.submitblock(block.serialize().hex())) + assert_equal(None if accept else NULLDUMMY_BLK_ERROR, node.submitblock(block.serialize().hex())) if accept: assert_equal(node.getbestblockhash(), block.hash) self.lastblockhash = block.hash diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py index f98f326e8f44..cc664a83aa3e 100755 --- a/test/functional/feature_segwit.py +++ b/test/functional/feature_segwit.py @@ -193,8 +193,8 @@ def run_test(self): assert_equal(self.nodes[2].getbalance(), 20 * Decimal("49.999")) self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid") - self.fail_accept(self.nodes[2], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WPKH][1], sign=False) - self.fail_accept(self.nodes[2], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WSH][1], sign=False) + self.fail_accept(self.nodes[2], "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WPKH][1], sign=False) + self.fail_accept(self.nodes[2], "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WSH][1], sign=False) self.generate(self.nodes[0], 1) # block 164 @@ -213,13 +213,13 @@ def run_test(self): self.log.info("Verify default node can't accept txs with missing witness") # unsigned, no scriptsig - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Witness program hash mismatch)", wit_ids[NODE_0][P2WPKH][0], sign=False) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Witness program was passed an empty witness)", wit_ids[NODE_0][P2WSH][0], sign=False) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WPKH][0], sign=False) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WSH][0], sign=False) + self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Witness program hash mismatch)", wit_ids[NODE_0][P2WPKH][0], sign=False) + self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Witness program was passed an empty witness)", wit_ids[NODE_0][P2WSH][0], sign=False) + self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WPKH][0], sign=False) + self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WSH][0], sign=False) # unsigned with redeem script - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Witness program hash mismatch)", p2sh_ids[NODE_0][P2WPKH][0], sign=False, redeem_script=witness_script(False, self.pubkey[0])) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Witness program was passed an empty witness)", p2sh_ids[NODE_0][P2WSH][0], sign=False, redeem_script=witness_script(True, self.pubkey[0])) + self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Witness program hash mismatch)", p2sh_ids[NODE_0][P2WPKH][0], sign=False, redeem_script=witness_script(False, self.pubkey[0])) + self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Witness program was passed an empty witness)", p2sh_ids[NODE_0][P2WSH][0], sign=False, redeem_script=witness_script(True, self.pubkey[0])) # Coinbase contains the witness commitment nonce, check that RPC shows us coinbase_txid = self.nodes[2].getblock(blockhash)['tx'][0] @@ -230,10 +230,10 @@ def run_test(self): assert_equal(witnesses[0], '00' * 32) self.log.info("Verify witness txs without witness data are invalid after the fork") - self.fail_accept(self.nodes[2], 'mandatory-script-verify-flag-failed (Witness program hash mismatch)', wit_ids[NODE_2][P2WPKH][2], sign=False) - self.fail_accept(self.nodes[2], 'mandatory-script-verify-flag-failed (Witness program was passed an empty witness)', wit_ids[NODE_2][P2WSH][2], sign=False) - self.fail_accept(self.nodes[2], 'mandatory-script-verify-flag-failed (Witness program hash mismatch)', p2sh_ids[NODE_2][P2WPKH][2], sign=False, redeem_script=witness_script(False, self.pubkey[2])) - self.fail_accept(self.nodes[2], 'mandatory-script-verify-flag-failed (Witness program was passed an empty witness)', p2sh_ids[NODE_2][P2WSH][2], sign=False, redeem_script=witness_script(True, self.pubkey[2])) + self.fail_accept(self.nodes[2], 'mempool-script-verify-flag-failed (Witness program hash mismatch)', wit_ids[NODE_2][P2WPKH][2], sign=False) + self.fail_accept(self.nodes[2], 'mempool-script-verify-flag-failed (Witness program was passed an empty witness)', wit_ids[NODE_2][P2WSH][2], sign=False) + self.fail_accept(self.nodes[2], 'mempool-script-verify-flag-failed (Witness program hash mismatch)', p2sh_ids[NODE_2][P2WPKH][2], sign=False, redeem_script=witness_script(False, self.pubkey[2])) + self.fail_accept(self.nodes[2], 'mempool-script-verify-flag-failed (Witness program was passed an empty witness)', p2sh_ids[NODE_2][P2WSH][2], sign=False, redeem_script=witness_script(True, self.pubkey[2])) self.log.info("Verify default node can now use witness txs") self.success_mine(self.nodes[0], wit_ids[NODE_0][P2WPKH][0], True) diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py index 2155b8de6b1b..32d8f7f6eac9 100755 --- a/test/functional/mempool_accept.py +++ b/test/functional/mempool_accept.py @@ -441,7 +441,7 @@ def run_test(self): nested_anchor_spend.rehash() self.check_mempool_result( - result_expected=[{'txid': nested_anchor_spend.rehash(), 'allowed': False, 'reject-reason': 'non-mandatory-script-verify-flag (Witness version reserved for soft-fork upgrades)'}], + result_expected=[{'txid': nested_anchor_spend.rehash(), 'allowed': False, 'reject-reason': 'mempool-script-verify-flag-failed (Witness version reserved for soft-fork upgrades)'}], rawtxs=[nested_anchor_spend.serialize().hex()], maxfeerate=0, ) diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index e8f7f7e0f4e7..7815d6ea84ec 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -704,20 +704,20 @@ def test_p2sh_witness(self): # segwit activation. Note that older bitcoind's that are not # segwit-aware would also reject this for failing CLEANSTACK. with self.nodes[0].assert_debug_log( - expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)']): + expected_msgs=[spend_tx.hash, 'was not accepted: mempool-script-verify-flag-failed (Witness program was passed an empty witness)']): test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) # The transaction was detected as witness stripped above and not added to the reject # filter. Trying again will check it again and result in the same error. with self.nodes[0].assert_debug_log( - expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)']): + expected_msgs=[spend_tx.hash, 'was not accepted: mempool-script-verify-flag-failed (Witness program was passed an empty witness)']): test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) # Try to put the witness script in the scriptSig, should also fail. spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a']) spend_tx.rehash() with self.nodes[0].assert_debug_log( - expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)']): + expected_msgs=[spend_tx.hash, 'was not accepted: mempool-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)']): test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) # Now put the witness script in the witness, should succeed after @@ -1291,7 +1291,7 @@ def test_tx_relay_after_segwit_activation(self): # Now do the opposite: strip the witness entirely. This will be detected as witness stripping and # the (w)txid won't be added to the reject filter: we can try again and get the same error. tx3.wit.vtxinwit[0].scriptWitness.stack = [] - reason = "was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)" + reason = "was not accepted: mempool-script-verify-flag-failed (Witness program was passed an empty witness)" test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=False, accepted=False, reason=reason) test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=False, accepted=False, reason=reason) @@ -1490,7 +1490,7 @@ def test_uncompressed_pubkey(self): sign_input_segwitv0(tx2, 0, script, tx.vout[0].nValue, key) # Should fail policy test. - test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') + test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'mempool-script-verify-flag-failed (Using non-compressed keys in segwit)') # But passes consensus. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2]) @@ -1509,7 +1509,7 @@ def test_uncompressed_pubkey(self): sign_p2pk_witness_input(witness_script, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key) # Should fail policy test. - test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') + test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'mempool-script-verify-flag-failed (Using non-compressed keys in segwit)') # But passes consensus. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx3]) @@ -1526,7 +1526,7 @@ def test_uncompressed_pubkey(self): sign_p2pk_witness_input(witness_script, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key) # Should fail policy test. - test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') + test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'mempool-script-verify-flag-failed (Using non-compressed keys in segwit)') block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx4]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) diff --git a/test/functional/rpc_packages.py b/test/functional/rpc_packages.py index a2f9210f94d9..539e9d09add6 100755 --- a/test/functional/rpc_packages.py +++ b/test/functional/rpc_packages.py @@ -122,8 +122,8 @@ def test_independent(self, coin): assert_equal(testres_bad_sig, self.independent_txns_testres + [{ "txid": tx_bad_sig_txid, "wtxid": tx_bad_sig_wtxid, "allowed": False, - "reject-reason": "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", - "reject-details": "mandatory-script-verify-flag-failed (Operation not valid with the current stack size), " + + "reject-reason": "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", + "reject-details": "mempool-script-verify-flag-failed (Operation not valid with the current stack size), " + f"input 0 of {tx_bad_sig_txid} (wtxid {tx_bad_sig_wtxid}), spending {coin['txid']}:{coin['vout']}" }]) From 6f136cd3914b001752cce02adde00fccaed0ad48 Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Fri, 8 Aug 2025 23:15:17 +1000 Subject: [PATCH 079/115] tests: drop expect_disconnect behaviour for tx relay Github-Pull: #33050 Rebased-From: 876dbdfb4702410dfd4037614dc9298a0c09c63e --- test/functional/data/invalid_txs.py | 18 ------------------ test/functional/p2p_invalid_tx.py | 5 ----- test/functional/test_framework/p2p.py | 8 ++------ 3 files changed, 2 insertions(+), 29 deletions(-) diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py index bb1931be2df0..f96059d4ee80 100644 --- a/test/functional/data/invalid_txs.py +++ b/test/functional/data/invalid_txs.py @@ -69,9 +69,6 @@ class BadTxTemplate: # Only specified if it differs from mempool acceptance error. block_reject_reason = "" - # Do we expect to be disconnected after submitting this tx? - expect_disconnect = False - # Is this tx considered valid when included in a block, but not for acceptance into # the mempool (i.e. does it violate policy but not consensus)? valid_in_block = False @@ -89,7 +86,6 @@ def get_tx(self, *args, **kwargs): class OutputMissing(BadTxTemplate): reject_reason = "bad-txns-vout-empty" - expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -100,7 +96,6 @@ def get_tx(self): class InputMissing(BadTxTemplate): reject_reason = "bad-txns-vin-empty" - expect_disconnect = False # We use a blank transaction here to make sure # it is interpreted as a non-witness transaction. @@ -117,7 +112,6 @@ def get_tx(self): # tree depth commitment (CVE-2017-12842) class SizeTooSmall(BadTxTemplate): reject_reason = "tx-size-small" - expect_disconnect = False valid_in_block = True def get_tx(self): @@ -134,7 +128,6 @@ class BadInputOutpointIndex(BadTxTemplate): # Won't be rejected - nonexistent outpoint index is treated as an orphan since the coins # database can't distinguish between spent outpoints and outpoints which never existed. reject_reason = None - expect_disconnect = False def get_tx(self): num_indices = len(self.spend_tx.vin) @@ -149,7 +142,6 @@ def get_tx(self): class DuplicateInput(BadTxTemplate): reject_reason = 'bad-txns-inputs-duplicate' - expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -162,7 +154,6 @@ def get_tx(self): class PrevoutNullInput(BadTxTemplate): reject_reason = 'bad-txns-prevout-null' - expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -175,7 +166,6 @@ def get_tx(self): class NonexistentInput(BadTxTemplate): reject_reason = None # Added as an orphan tx. - expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -188,7 +178,6 @@ def get_tx(self): class SpendTooMuch(BadTxTemplate): reject_reason = 'bad-txns-in-belowout' - expect_disconnect = False def get_tx(self): return create_tx_with_script( @@ -197,7 +186,6 @@ def get_tx(self): class CreateNegative(BadTxTemplate): reject_reason = 'bad-txns-vout-negative' - expect_disconnect = False def get_tx(self): return create_tx_with_script(self.spend_tx, 0, amount=-1) @@ -205,7 +193,6 @@ def get_tx(self): class CreateTooLarge(BadTxTemplate): reject_reason = 'bad-txns-vout-toolarge' - expect_disconnect = False def get_tx(self): return create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY + 1) @@ -213,7 +200,6 @@ def get_tx(self): class CreateSumTooLarge(BadTxTemplate): reject_reason = 'bad-txns-txouttotal-toolarge' - expect_disconnect = False def get_tx(self): tx = create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY) @@ -224,7 +210,6 @@ def get_tx(self): class InvalidOPIFConstruction(BadTxTemplate): reject_reason = "mempool-script-verify-flag-failed (Invalid OP_IF construction)" - expect_disconnect = False valid_in_block = True def get_tx(self): @@ -236,7 +221,6 @@ def get_tx(self): class TooManySigops(BadTxTemplate): reject_reason = "bad-txns-too-many-sigops" block_reject_reason = "bad-blk-sigops, out-of-bounds SigOpCount" - expect_disconnect = False def get_tx(self): lotsa_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS)) @@ -258,7 +242,6 @@ def get_tx(self): return type('DisabledOpcode_' + str(opcode), (BadTxTemplate,), { 'reject_reason': "disabled opcode", - 'expect_disconnect': True, 'get_tx': get_tx, 'valid_in_block' : True }) @@ -267,7 +250,6 @@ class NonStandardAndInvalid(BadTxTemplate): """A non-standard transaction which is also consensus-invalid should return the first error.""" reject_reason = "mempool-script-verify-flag-failed (Using OP_CODESEPARATOR in non-witness script)" block_reject_reason = "mandatory-script-verify-flag-failed (OP_RETURN was encountered)" - expect_disconnect = False valid_in_block = False def get_tx(self): diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py index 3785f725fefd..439735d178a3 100755 --- a/test/functional/p2p_invalid_tx.py +++ b/test/functional/p2p_invalid_tx.py @@ -73,14 +73,9 @@ def run_test(self): tx = template.get_tx() node.p2ps[0].send_txs_and_test( [tx], node, success=False, - expect_disconnect=False, reject_reason=template.reject_reason, ) - if template.expect_disconnect: - self.log.info("Reconnecting to peer") - self.reconnect_p2p() - # Make two p2p connections to provide the node with orphans # * p2ps[0] will send valid orphan txs (one with low fee) # * p2ps[1] will send an invalid orphan tx (and is later disconnected for that) diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index 207d19137b18..c5e518238ce2 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -893,13 +893,12 @@ def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, else: assert node.getbestblockhash() != blocks[-1].hash - def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None): + def send_txs_and_test(self, txs, node, *, success=True, reject_reason=None): """Send txs to test node and test whether they're accepted to the mempool. - add all txs to our tx_store - send tx messages for all txs - if success is True/False: assert that the txs are/are not accepted to the mempool - - if expect_disconnect is True: Skip the sync with ping - if reject_reason is set: assert that the correct reject message is logged.""" with p2p_lock: @@ -911,10 +910,7 @@ def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, for tx in txs: self.send_message(msg_tx(tx)) - if expect_disconnect: - self.wait_for_disconnect() - else: - self.sync_with_ping() + self.sync_with_ping() raw_mempool = node.getrawmempool() if success: From 2cf352fd8e6a77003e38d954b6c879b20d4b960a Mon Sep 17 00:00:00 2001 From: will Date: Tue, 14 Oct 2025 13:57:20 +0100 Subject: [PATCH 080/115] doc: document capnproto and libmultiprocess deps These dependencies are both undocumented, and libmultiprocess has a relatively special requirement in that v6.0 and later are known to not work with v29.x of Bitcoin Core due to https://github.com/bitcoin-core/libmultiprocess/pull/160 --- depends/packages/native_libmultiprocess.mk | 4 ++-- doc/dependencies.md | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/depends/packages/native_libmultiprocess.mk b/depends/packages/native_libmultiprocess.mk index 4467dee76f50..a76304f9f050 100644 --- a/depends/packages/native_libmultiprocess.mk +++ b/depends/packages/native_libmultiprocess.mk @@ -1,8 +1,8 @@ package=native_libmultiprocess -$(package)_version=1954f7f65661d49e700c344eae0fc8092decf975 +$(package)_version=v5.0 $(package)_download_path=https://github.com/bitcoin-core/libmultiprocess/archive $(package)_file_name=$($(package)_version).tar.gz -$(package)_sha256_hash=fc014bd74727c1d5d30b396813685012c965d079244dd07b53bc1c75c610a2cb +$(package)_sha256_hash=401984715b271a3446e1910f21adf048ba390d31cc93cc3073742e70d56fa3ea $(package)_dependencies=native_capnp define $(package)_config_cmds diff --git a/doc/dependencies.md b/doc/dependencies.md index d3f6b74367ba..a042f8f2ea7c 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -36,3 +36,7 @@ Bitcoin Core requires one of the following compilers. | [SQLite](../depends/packages/sqlite.mk) (wallet) | [link](https://sqlite.org) | [3.38.5](https://github.com/bitcoin/bitcoin/pull/25378) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | No | | Python (scripts, tests) | [link](https://www.python.org) | N/A | [3.10](https://github.com/bitcoin/bitcoin/pull/30527) | No | | [systemtap](../depends/packages/systemtap.mk) ([tracing](tracing.md)) | [link](https://sourceware.org/systemtap/) | [4.8](https://github.com/bitcoin/bitcoin/pull/26945)| N/A | No | +| [capnproto](../depends/packages/capnp.mk) ([multiprocess](multiprocess.md)) | [link](https://capnproto.org/) | [1.2.0](https://github.com/bitcoin/bitcoin/pull/32760)| [0.7.0](https://github.com/bitcoin-core/libmultiprocess/pull/88) | No | +| [libmultiprocess](../depends/packages/libmultiprocess.mk) ([multiprocess](multiprocess.md)) | [link](https://github.com/bitcoin-core/libmultiprocess) | [5.0](https://github.com/bitcoin/bitcoin/pull/31945)| [v5.0-pre1](https://github.com/bitcoin/bitcoin/pull/31740)* | No | + +\* Libmultiprocess 5.x versions should be compatible, but 6.0 and later are not due to bitcoin-core/libmultiprocess#160. From abaf1e37a79bdf7481cef1cd5ae5e102fdce09be Mon Sep 17 00:00:00 2001 From: furszy Date: Wed, 11 Dec 2024 13:05:21 -0500 Subject: [PATCH 081/115] refactor: remove sqlite dir path back-and-forth conversion Github-Pull: bitcoin/bitcoin#31423 Rebased-From: d04f6a97ba9a55aa9455e1a805feeed4d630f59a --- src/wallet/sqlite.cpp | 6 +++--- src/wallet/sqlite.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp index a8c9f8a8ab6e..896a2fc0f33f 100644 --- a/src/wallet/sqlite.cpp +++ b/src/wallet/sqlite.cpp @@ -112,12 +112,12 @@ Mutex SQLiteDatabase::g_sqlite_mutex; int SQLiteDatabase::g_sqlite_count = 0; SQLiteDatabase::SQLiteDatabase(const fs::path& dir_path, const fs::path& file_path, const DatabaseOptions& options, bool mock) - : WalletDatabase(), m_mock(mock), m_dir_path(fs::PathToString(dir_path)), m_file_path(fs::PathToString(file_path)), m_write_semaphore(1), m_use_unsafe_sync(options.use_unsafe_sync) + : WalletDatabase(), m_mock(mock), m_dir_path(dir_path), m_file_path(fs::PathToString(file_path)), m_write_semaphore(1), m_use_unsafe_sync(options.use_unsafe_sync) { { LOCK(g_sqlite_mutex); LogPrintf("Using SQLite Version %s\n", SQLiteDatabaseVersion()); - LogPrintf("Using wallet %s\n", m_dir_path); + LogPrintf("Using wallet %s\n", fs::PathToString(m_dir_path)); if (++g_sqlite_count == 1) { // Setup logging @@ -253,7 +253,7 @@ void SQLiteDatabase::Open() if (m_db == nullptr) { if (!m_mock) { - TryCreateDirectories(fs::PathFromString(m_dir_path)); + TryCreateDirectories(m_dir_path); } int ret = sqlite3_open_v2(m_file_path.c_str(), &m_db, flags, nullptr); if (ret != SQLITE_OK) { diff --git a/src/wallet/sqlite.h b/src/wallet/sqlite.h index 78a3accf890e..eb3c0217f55f 100644 --- a/src/wallet/sqlite.h +++ b/src/wallet/sqlite.h @@ -105,7 +105,7 @@ class SQLiteDatabase : public WalletDatabase private: const bool m_mock{false}; - const std::string m_dir_path; + const fs::path m_dir_path; const std::string m_file_path; From 01c04d32aa3e1c323b304b1c6a573dd933b8b598 Mon Sep 17 00:00:00 2001 From: furszy Date: Wed, 11 Dec 2024 13:10:01 -0500 Subject: [PATCH 082/115] wallet: introduce method to return all db created files Github-Pull: bitcoin/bitcoin#31423 Rebased-From: 1de423e0a08bbc63eed36c8772e9ef8b48e80fb8 --- src/wallet/bdb.h | 15 +++++++++++++++ src/wallet/db.h | 3 +++ src/wallet/migrate.h | 1 + src/wallet/salvage.cpp | 1 + src/wallet/sqlite.h | 8 ++++++++ src/wallet/test/util.h | 1 + 6 files changed, 29 insertions(+) diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h index f3fe8a19c198..ec773fd1770f 100644 --- a/src/wallet/bdb.h +++ b/src/wallet/bdb.h @@ -132,6 +132,21 @@ class BerkeleyDatabase : public WalletDatabase /** Return path to main database filename */ std::string Filename() override { return fs::PathToString(env->Directory() / m_filename); } + std::vector Files() override + { + std::vector files; + files.emplace_back(env->Directory() / m_filename); + if (env->m_databases.size() == 1) { + files.emplace_back(env->Directory() / "db.log"); + files.emplace_back(env->Directory() / ".walletlock"); + files.emplace_back(env->Directory() / "database" / "log.0000000001"); + files.emplace_back(env->Directory() / "database"); + // Note that this list is not exhaustive as BDB may create more log files, and possibly other ones too + // However it should be good enough for the only calls to Files() + } + return files; + } + std::string Format() override { return "bdb"; } /** * Pointer to shared database environment. diff --git a/src/wallet/db.h b/src/wallet/db.h index e8790006a4d8..5f13ca29ff9b 100644 --- a/src/wallet/db.h +++ b/src/wallet/db.h @@ -170,6 +170,9 @@ class WalletDatabase /** Return path to main database file for logs and error messages. */ virtual std::string Filename() = 0; + /** Return paths to all database created files */ + virtual std::vector Files() = 0; + virtual std::string Format() = 0; std::atomic nUpdateCounter; diff --git a/src/wallet/migrate.h b/src/wallet/migrate.h index 16eadeb019d5..82359f9d4bb4 100644 --- a/src/wallet/migrate.h +++ b/src/wallet/migrate.h @@ -65,6 +65,7 @@ class BerkeleyRODatabase : public WalletDatabase /** Return path to main database file for logs and error messages. */ std::string Filename() override { return fs::PathToString(m_filepath); } + std::vector Files() override { return {m_filepath}; } std::string Format() override { return "bdb_ro"; } diff --git a/src/wallet/salvage.cpp b/src/wallet/salvage.cpp index b924239073c1..443f80893ff9 100644 --- a/src/wallet/salvage.cpp +++ b/src/wallet/salvage.cpp @@ -63,6 +63,7 @@ class DummyDatabase : public WalletDatabase void IncrementUpdateCounter() override { ++nUpdateCounter; } void ReloadDbEnv() override {} std::string Filename() override { return "dummy"; } + std::vector Files() override { return {}; } std::string Format() override { return "dummy"; } std::unique_ptr MakeBatch(bool flush_on_close = true) override { return std::make_unique(); } }; diff --git a/src/wallet/sqlite.h b/src/wallet/sqlite.h index eb3c0217f55f..c78cd29afc2c 100644 --- a/src/wallet/sqlite.h +++ b/src/wallet/sqlite.h @@ -166,6 +166,14 @@ class SQLiteDatabase : public WalletDatabase void IncrementUpdateCounter() override { ++nUpdateCounter; } std::string Filename() override { return m_file_path; } + /** Return paths to all database created files */ + std::vector Files() override + { + std::vector files; + files.emplace_back(m_dir_path / fs::PathFromString(m_file_path)); + files.emplace_back(m_dir_path / fs::PathFromString(m_file_path + "-journal")); + return files; + } std::string Format() override { return "sqlite"; } /** Make a SQLiteBatch connected to this database */ diff --git a/src/wallet/test/util.h b/src/wallet/test/util.h index b055c6c69304..59e3a9c75f74 100644 --- a/src/wallet/test/util.h +++ b/src/wallet/test/util.h @@ -123,6 +123,7 @@ class MockableDatabase : public WalletDatabase void ReloadDbEnv() override {} std::string Filename() override { return "mockable"; } + std::vector Files() override { return {}; } std::string Format() override { return "mock"; } std::unique_ptr MakeBatch(bool flush_on_close = true) override { return std::make_unique(m_records, m_pass); } }; From cc324aa2bed30afa713625dfb9cf83c438dd15c1 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Tue, 6 Jan 2026 16:09:38 -0800 Subject: [PATCH 083/115] wallettool: do not use fs::remove_all in createfromdump cleanup Github-Pull: bitcoin/bitcoin#34215 Rebased-From: f78f6f1dc8e16d5a8a23749e77bc3bf17c91ae42 --- src/wallet/dump.cpp | 8 +++++++- test/functional/tool_wallet.py | 12 ++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/src/wallet/dump.cpp b/src/wallet/dump.cpp index db2756e0ca8d..20aa5d453ed3 100644 --- a/src/wallet/dump.cpp +++ b/src/wallet/dump.cpp @@ -288,11 +288,17 @@ bool CreateFromDump(const ArgsManager& args, const std::string& name, const fs:: dump_file.close(); } + // On failure, gather the paths to remove + std::vector paths_to_remove = wallet->GetDatabase().Files(); + if (!name.empty()) paths_to_remove.push_back(wallet_path); + wallet.reset(); // The pointer deleter will close the wallet for us. // Remove the wallet dir if we have a failure if (!ret) { - fs::remove_all(wallet_path); + for (const auto& p : paths_to_remove) { + fs::remove(p); + } } return ret; diff --git a/test/functional/tool_wallet.py b/test/functional/tool_wallet.py index c7abc2da8d4f..979804a5fea8 100755 --- a/test/functional/tool_wallet.py +++ b/test/functional/tool_wallet.py @@ -409,6 +409,18 @@ def test_dump_createfromdump(self): self.write_dump(dump_data, bad_sum_wallet_dump) self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=badload', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump') assert not (self.nodes[0].wallets_path / "badload").is_dir() + if not self.options.descriptors: + os.rename(self.nodes[0].wallets_path / "wallet.dat", self.nodes[0].wallets_path / "default.wallet.dat") + self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump') + assert self.nodes[0].wallets_path.exists() + assert not (self.nodes[0].wallets_path / "wallet.dat").exists() + + self.log.info('Checking createfromdump with an unnamed wallet') + self.do_tool_createfromdump("", "wallet.dump") + assert (self.nodes[0].wallets_path / "wallet.dat").exists() + os.unlink(self.nodes[0].wallets_path / "wallet.dat") + if not self.options.descriptors: + os.rename(self.nodes[0].wallets_path / "default.wallet.dat", self.nodes[0].wallets_path / "wallet.dat") def test_chainless_conflicts(self): self.log.info("Test wallet tool when wallet contains conflicting transactions") From d91f56e1e3f1aee99b0c09c23db70622ad6ed1b4 Mon Sep 17 00:00:00 2001 From: furszy Date: Mon, 5 Jan 2026 18:12:40 -0500 Subject: [PATCH 084/115] wallet: RestoreWallet failure, erase only what was created Track what RestoreWallet creates so only those files and directories are removed during a failure and nothing else. Preexisting paths must be left untouched. Note: Using fs::remove_all() instead of fs::remove() in RestoreWallet does not cause any problems currently, but the change is necessary for the next commit which extends RestoreWallet to work with existing directories, which may contain files that must not be deleted. Github-Pull: bitcoin/bitcoin#34156 Rebased-From: 4ed0693a3f2a427ef9e7ad016930ec29fa244995 --- src/wallet/wallet.cpp | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 09eda0c28e4e..32c902b7680f 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -501,6 +501,8 @@ std::shared_ptr RestoreWallet(WalletContext& context, const fs::path& b const fs::path wallet_path = fsbridge::AbsPathJoin(GetWalletDir(), fs::u8path(wallet_name)); auto wallet_file = wallet_path / "wallet.dat"; std::shared_ptr wallet; + bool wallet_file_copied = false; + bool created_parent_dir = false; try { if (!fs::exists(backup_file)) { @@ -509,13 +511,22 @@ std::shared_ptr RestoreWallet(WalletContext& context, const fs::path& b return nullptr; } - if (fs::exists(wallet_path) || !TryCreateDirectories(wallet_path)) { + if (fs::exists(wallet_path)) { error = Untranslated(strprintf("Failed to create database path '%s'. Database already exists.", fs::PathToString(wallet_path))); status = DatabaseStatus::FAILED_ALREADY_EXISTS; return nullptr; + } else { + // The directory doesn't exist, create it + if (!TryCreateDirectories(wallet_path)) { + error = Untranslated(strprintf("Failed to restore database path '%s'.", fs::PathToString(wallet_path))); + status = DatabaseStatus::FAILED_ALREADY_EXISTS; + return nullptr; + } + created_parent_dir = true; } fs::copy_file(backup_file, wallet_file, fs::copy_options::none); + wallet_file_copied = true; if (load_after_restore) { wallet = LoadWallet(context, wallet_name, load_on_start, options, status, error, warnings); @@ -528,7 +539,13 @@ std::shared_ptr RestoreWallet(WalletContext& context, const fs::path& b // Remove created wallet path only when loading fails if (load_after_restore && !wallet) { - fs::remove_all(wallet_path); + if (wallet_file_copied) fs::remove(wallet_file); + // Clean up the parent directory if we created it during restoration. + // As we have created it, it must be empty after deleting the wallet file. + if (created_parent_dir) { + Assume(fs::is_empty(wallet_path)); + fs::remove(wallet_path); + } } return wallet; From a074d36254ab0c666f0438fe071cd213715f82de Mon Sep 17 00:00:00 2001 From: furszy Date: Fri, 26 Dec 2025 20:22:55 -0500 Subject: [PATCH 085/115] wallet: fix unnamed wallet migration failure When migrating any legacy unnamed wallet, a failed migration would cause the cleanup logic to remove its parent directory. Since this type of legacy wallet lives directly in the main '/wallets/' folder, this resulted in unintentionally erasing all wallets, including the backup file. To be fully safe, we will no longer call `fs::remove_all`. Instead, we only erase the individual db files we have created, leaving everything else intact. The created wallets parent directories are erased only if they are empty. As part of this last change, `RestoreWallet` was modified to allow an existing directory as the destination, since we no longer remove the original wallet directory (we only remove the files we created inside it). This also fixes the restore of top-level default wallets during failures, which were failing due to the directory existence check that always returns true for the /wallets/ directory. This bug started after: https://github.com/bitcoin/bitcoin/commit/f6ee59b6e2995a3916fb4f0d4cbe15ece2054494 Previously, the `fs::copy_file` call was failing for top-level wallets, which prevented the `fs::remove_all` call from being reached. Github-Pull: bitcoin/bitcoin#34156 Rebased-From: f4c7e28e80bf9af50b03a770b641fd309a801589 --- src/wallet/wallet.cpp | 72 +++++++++++++++++++++++--------- test/functional/wallet_backup.py | 2 +- 2 files changed, 54 insertions(+), 20 deletions(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 32c902b7680f..119a99d601d4 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -511,10 +511,22 @@ std::shared_ptr RestoreWallet(WalletContext& context, const fs::path& b return nullptr; } + // Wallet directories are allowed to exist, but must not contain a .dat file. + // Any existing wallet database is treated as a hard failure to prevent overwriting. if (fs::exists(wallet_path)) { - error = Untranslated(strprintf("Failed to create database path '%s'. Database already exists.", fs::PathToString(wallet_path))); - status = DatabaseStatus::FAILED_ALREADY_EXISTS; - return nullptr; + // If this is a file, it is the db and we don't want to overwrite it. + if (!fs::is_directory(wallet_path)) { + error = Untranslated(strprintf("Failed to restore wallet. Database file exists '%s'.", fs::PathToString(wallet_path))); + status = DatabaseStatus::FAILED_ALREADY_EXISTS; + return nullptr; + } + + // Check we are not going to overwrite an existing db file + if (fs::exists(wallet_file)) { + error = Untranslated(strprintf("Failed to restore wallet. Database file exists in '%s'.", fs::PathToString(wallet_file))); + status = DatabaseStatus::FAILED_ALREADY_EXISTS; + return nullptr; + } } else { // The directory doesn't exist, create it if (!TryCreateDirectories(wallet_path)) { @@ -4559,26 +4571,43 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr } } - // In case of reloading failure, we need to remember the wallet dirs to remove - // Set is used as it may be populated with the same wallet directory paths multiple times, - // both before and after reloading. This ensures the set is complete even if one of the wallets - // fails to reload. - std::set wallet_dirs; + // In case of loading failure, we need to remember the wallet files we have created to remove. + // A `set` is used as it may be populated with the same wallet directory paths multiple times, + // both before and after loading. This ensures the set is complete even if one of the wallets + // fails to load. + std::set wallet_files_to_remove; + std::set wallet_empty_dirs_to_remove; + + // Helper to track wallet files and directories for cleanup on failure. + // Only directories of wallets created during migration (not the main wallet) are tracked. + auto track_for_cleanup = [&](const CWallet& wallet) { + const auto files = wallet.GetDatabase().Files(); + wallet_files_to_remove.insert(files.begin(), files.end()); + if (wallet.GetName() != wallet_name) { + // If this isn’t the main wallet, mark its directory for removal. + // This applies to the watch-only and solvable wallets. + // Wallets stored directly as files in the top-level directory + // (e.g. default unnamed wallets) don’t have a removable parent directory. + wallet_empty_dirs_to_remove.insert(fs::PathFromString(wallet.GetDatabase().Filename()).parent_path()); + } + }; + + if (success) { // Migration successful, unload all wallets locally, then reload them. // Reload the main wallet - wallet_dirs.insert(fs::PathFromString(local_wallet->GetDatabase().Filename()).parent_path()); + track_for_cleanup(*local_wallet); success = reload_wallet(local_wallet); res.wallet = local_wallet; res.wallet_name = wallet_name; if (success && res.watchonly_wallet) { // Reload watchonly - wallet_dirs.insert(fs::PathFromString(res.watchonly_wallet->GetDatabase().Filename()).parent_path()); + track_for_cleanup(*res.watchonly_wallet); success = reload_wallet(res.watchonly_wallet); } if (success && res.solvables_wallet) { // Reload solvables - wallet_dirs.insert(fs::PathFromString(res.solvables_wallet->GetDatabase().Filename()).parent_path()); + track_for_cleanup(*res.solvables_wallet); success = reload_wallet(res.solvables_wallet); } } @@ -4586,7 +4615,7 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr // Migration failed, cleanup // Before deleting the wallet's directory, copy the backup file to the top-level wallets dir fs::path temp_backup_location = fsbridge::AbsPathJoin(GetWalletDir(), backup_filename); - fs::copy_file(backup_path, temp_backup_location, fs::copy_options::none); + fs::rename(backup_path, temp_backup_location); // Make list of wallets to cleanup std::vector> created_wallets; @@ -4595,8 +4624,8 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr if (res.solvables_wallet) created_wallets.push_back(std::move(res.solvables_wallet)); // Get the directories to remove after unloading - for (std::shared_ptr& w : created_wallets) { - wallet_dirs.emplace(fs::PathFromString(w->GetDatabase().Filename()).parent_path()); + for (std::shared_ptr& wallet : created_wallets) { + track_for_cleanup(*wallet); } // Unload the wallets @@ -4615,9 +4644,15 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr } } - // Delete the wallet directories - for (const fs::path& dir : wallet_dirs) { - fs::remove_all(dir); + // First, delete the db files we have created throughout this process and nothing else + for (const fs::path& file : wallet_files_to_remove) { + fs::remove(file); + } + + // Second, delete the created wallet directories and nothing else. They must be empty at this point. + for (const fs::path& dir : wallet_empty_dirs_to_remove) { + Assume(fs::is_empty(dir)); + fs::remove(dir); } // Restore the backup @@ -4631,8 +4666,7 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr } // The wallet directory has been restored, but just in case, copy the previously created backup to the wallet dir - fs::copy_file(temp_backup_location, backup_path, fs::copy_options::none); - fs::remove(temp_backup_location); + fs::rename(temp_backup_location, backup_path); // Verify that there is no dangling wallet: when the wallet wasn't loaded before, expect null. // This check is performed after restoration to avoid an early error before saving the backup. diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py index 7c88f64dcf38..7ad83bdf8704 100755 --- a/test/functional/wallet_backup.py +++ b/test/functional/wallet_backup.py @@ -136,7 +136,7 @@ def restore_wallet_existent_name(self): backup_file = self.nodes[0].datadir_path / 'wallet.bak' wallet_name = "res0" wallet_file = node.wallets_path / wallet_name - error_message = "Failed to create database path '{}'. Database already exists.".format(wallet_file) + error_message = "Failed to restore wallet. Database file exists in '{}'.".format(wallet_file / "wallet.dat") assert_raises_rpc_error(-36, error_message, node.restorewallet, wallet_name, backup_file) assert wallet_file.exists() From 833848e9b8eab430629da116f753f8d4433f51e2 Mon Sep 17 00:00:00 2001 From: furszy Date: Fri, 26 Dec 2025 20:23:02 -0500 Subject: [PATCH 086/115] test: add coverage for unnamed wallet migration failure Verifies that a failed migration of the unnamed (default) wallet does not erase the main /wallets/ directory, and also that the backup file exists. Github-Pull: bitcoin/bitcoin#34156 Rebased-From: 36093bde63286e19821a9e62cdff1712b6245dc7 --- test/functional/wallet_migration.py | 35 +++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index ce8dc19460df..3ca053043bb5 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -4,6 +4,7 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test Migrating a wallet from legacy to descriptor.""" +import os import random import shutil import struct @@ -548,6 +549,39 @@ def test_default_wallet(self): self.master_node.setmocktime(0) + def test_default_wallet_failure(self): + self.log.info("Test failure during unnamed (default) wallet migration") + master_wallet = self.master_node.get_wallet_rpc(self.default_wallet_name) + wallet = self.create_legacy_wallet("", blank=True) + wallet.importaddress(master_wallet.getnewaddress(address_type="legacy")) + + # Create wallet directory with the watch-only name and a wallet file. + # Because the wallet dir exists, this will cause migration to fail. + watch_only_dir = self.master_node.wallets_path / "_watchonly" + os.mkdir(watch_only_dir) + shutil.copyfile(self.old_node.wallets_path / "wallet.dat", watch_only_dir / "wallet.dat") + + mocked_time = int(time.time()) + self.master_node.setmocktime(mocked_time) + assert_raises_rpc_error(-4, "Failed to create database", self.migrate_and_get_rpc, "") + self.master_node.setmocktime(0) + + # Verify the /wallets/ path exists + assert self.master_node.wallets_path.exists() + # Check backup file exists. Because the wallet has no name, the backup is prefixed with 'default_wallet' + backup_path = self.master_node.wallets_path / f"default_wallet_{mocked_time}.legacy.bak" + assert backup_path.exists() + # Verify the original unnamed wallet was restored + assert (self.master_node.wallets_path / "wallet.dat").exists() + # And verify it is still a BDB wallet + with open(self.master_node.wallets_path / "wallet.dat", "rb") as f: + data = f.read(16) + _, _, magic = struct.unpack("QII", data) + assert_equal(magic, BTREE_MAGIC) + + # Test cleanup: clear default wallet for next test + os.remove(self.old_node.wallets_path / "wallet.dat") + def test_direct_file(self): self.log.info("Test migration of a wallet that is not in a wallet directory") wallet = self.create_legacy_wallet("plainfile") @@ -1372,6 +1406,7 @@ def run_test(self): self.test_encrypted() self.test_nonexistent() self.test_unloaded_by_path() + self.test_default_wallet_failure() self.test_default_wallet() self.test_direct_file() self.test_addressbook() From 9ea84c08d7e24ffefa6f18d6bd2af28ec38cfd98 Mon Sep 17 00:00:00 2001 From: furszy Date: Mon, 5 Jan 2026 16:08:13 -0500 Subject: [PATCH 087/115] test: restorewallet, coverage for existing dirs, unnamed wallet and prune failure The first test verifies that restoring into an existing empty directory or a directory with no .dat db files succeeds, while restoring into a dir with a .dat file fails. The second test covers restoring into the default unnamed wallet (wallet.dat), which also implicitly exercises the recovery path used after a failed migration. The third test covers failure during restore on a prune node. When the wallet last sync was beyond the pruning height. Github-Pull: bitcoin/bitcoin#34156 Rebased-From: f011e0f0680a8c39988ae57dae57eb86e92dd449 --- test/functional/wallet_backup.py | 77 ++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py index 7ad83bdf8704..3ff798d3d37d 100755 --- a/test/functional/wallet_backup.py +++ b/test/functional/wallet_backup.py @@ -40,6 +40,7 @@ from test_framework.util import ( assert_equal, assert_raises_rpc_error, + sha256sum_file, ) @@ -140,6 +141,67 @@ def restore_wallet_existent_name(self): assert_raises_rpc_error(-36, error_message, node.restorewallet, wallet_name, backup_file) assert wallet_file.exists() + def test_restore_existent_dir(self): + self.log.info("Test restore on an existent empty directory") + node = self.nodes[3] + backup_file = self.nodes[0].datadir_path / 'wallet.bak' + wallet_name = "restored_wallet" + wallet_dir = node.wallets_path / wallet_name + os.mkdir(wallet_dir) + res = node.restorewallet(wallet_name, backup_file) + assert_equal(res['name'], wallet_name) + node.unloadwallet(wallet_name) + + self.log.info("Test restore succeeds when the target directory contains non-wallet files") + wallet_file = node.wallets_path / wallet_name / "wallet.dat" + os.remove(wallet_file) + extra_file = node.wallets_path / wallet_name / "not_a_wallet.txt" + extra_file.touch() + res = node.restorewallet(wallet_name, backup_file) + assert_equal(res['name'], wallet_name) + assert extra_file.exists() # extra file was not removed by mistake + node.unloadwallet(wallet_name) + + self.log.info("Test restore failure due to existing db file in the destination directory") + original_shasum = sha256sum_file(wallet_file) + error_message = "Failed to restore wallet. Database file exists in '{}'.".format(wallet_dir / "wallet.dat") + assert_raises_rpc_error(-36, error_message, node.restorewallet, wallet_name, backup_file) + # Ensure the wallet file remains untouched + assert wallet_dir.exists() + assert_equal(original_shasum, sha256sum_file(wallet_file)) + + self.log.info("Test restore succeeds when the .dat file in the destination has a different name") + second_wallet = wallet_dir / "hidden_storage.dat" + os.rename(wallet_dir / "wallet.dat", second_wallet) + original_shasum = sha256sum_file(second_wallet) + res = node.restorewallet(wallet_name, backup_file) + assert_equal(res['name'], wallet_name) + assert (wallet_dir / "hidden_storage.dat").exists() + assert_equal(original_shasum, sha256sum_file(second_wallet)) + node.unloadwallet(wallet_name) + + # Clean for follow-up tests + os.remove(wallet_file) + + def test_restore_into_unnamed_wallet(self): + self.log.info("Test restore into a default unnamed wallet") + # This is also useful to test the migration recovery after failure logic + node = self.nodes[3] + if not self.options.descriptors: + node.unloadwallet("") + os.rename(node.wallets_path / "wallet.dat", node.wallets_path / "default.wallet.dat") + backup_file = self.nodes[0].datadir_path / 'wallet.bak' + wallet_name = "" + res = node.restorewallet(wallet_name, backup_file) + assert_equal(res['name'], "") + assert (node.wallets_path / "wallet.dat").exists() + # Clean for follow-up tests + node.unloadwallet("") + os.remove(node.wallets_path / "wallet.dat") + if not self.options.descriptors: + os.rename(node.wallets_path / "default.wallet.dat", node.wallets_path / "wallet.dat") + node.loadwallet("") + def test_pruned_wallet_backup(self): self.log.info("Test loading backup on a pruned node when the backup was created close to the prune height of the restoring node") node = self.nodes[3] @@ -159,6 +221,19 @@ def test_pruned_wallet_backup(self): # the backup to load successfully this close to the prune height node.restorewallet('pruned', node.datadir_path / 'wallet_pruned.bak') + self.log.info("Test restore on a pruned node when the backup was beyond the pruning point") + if not self.options.descriptors: + node.unloadwallet("") + os.rename(node.wallets_path / "wallet.dat", node.wallets_path / "default.wallet.dat") + backup_file = self.nodes[0].datadir_path / 'wallet.bak' + wallet_name = "" + error_message = "Wallet loading failed. Prune: last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of pruned node)" + assert_raises_rpc_error(-4, error_message, node.restorewallet, wallet_name, backup_file) + assert node.wallets_path.exists() # ensure the wallets dir exists + if not self.options.descriptors: + os.rename(node.wallets_path / "default.wallet.dat", node.wallets_path / "wallet.dat") + node.loadwallet("") + def run_test(self): self.log.info("Generating initial blockchain") self.generate(self.nodes[0], 1) @@ -227,6 +302,8 @@ def run_test(self): assert_equal(res2_rpc.getbalance(), balance2) self.restore_wallet_existent_name() + self.test_restore_existent_dir() + self.test_restore_into_unnamed_wallet() if not self.options.descriptors: self.log.info("Restoring using dumped wallet") From a7e2d106db8f193259420bacbccec80ba3beebf1 Mon Sep 17 00:00:00 2001 From: furszy Date: Sat, 27 Dec 2025 13:54:59 -0500 Subject: [PATCH 088/115] wallet: improve post-migration logging Right now, after migration the last message users see is "migration completed", but the migration isn't actually finished yet. We still need to load the new wallets to ensure consistency, and if that fails, the migration will be rolled back. This can be confusing for users. This change logs the post-migration loading step and if a wallet fails to load and the migration will be rolled back. Github-Pull: bitcoin/bitcoin#34156 Rebased-From: d70b159c42008ac3b63d1c43d99d4f1316d2f1ef --- src/wallet/wallet.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 119a99d601d4..b54cf0be0f77 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4515,7 +4515,12 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr std::string name = to_reload->GetName(); to_reload.reset(); to_reload = LoadWallet(context, name, /*load_on_start=*/std::nullopt, options, status, error, warnings); - return to_reload != nullptr; + if (!to_reload) { + LogError("Failed to load wallet '%s' after migration. Rolling back migration to preserve consistency. " + "Error cause: %s\n", wallet_name, error.original); + return false; + } + return true; }; // Before anything else, check if there is something to migrate. @@ -4596,6 +4601,7 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr if (success) { // Migration successful, unload all wallets locally, then reload them. // Reload the main wallet + LogInfo("Loading new wallets after migration...\n"); track_for_cleanup(*local_wallet); success = reload_wallet(local_wallet); res.wallet = local_wallet; From 5e8ad98163af9749e7a3c44a9107cc241c5bd7ab Mon Sep 17 00:00:00 2001 From: furszy Date: Sat, 27 Dec 2025 14:32:11 -0500 Subject: [PATCH 089/115] wallet: migration, fix watch-only and solvables wallets names Because the default wallet has no name, the watch-only and solvables wallets created during migration end up having no name either. This fixes it by applying the same prefix name we use for the backup file for an unnamed default wallet. Before: watch-only wallet named "_watchonly" After: watch-only wallet named "default_wallet_watchonly" Github-Pull: bitcoin/bitcoin#34156 Rebased-From: 82caa8193a3e36f248dcc949e0cd41def191efac --- src/wallet/wallet.cpp | 15 ++++++++-- test/functional/wallet_migration.py | 45 +++++++++++++++++++++++++++-- 2 files changed, 55 insertions(+), 5 deletions(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index b54cf0be0f77..2397d84a6f50 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4326,6 +4326,15 @@ bool CWallet::CanGrindR() const return !IsWalletFlagSet(WALLET_FLAG_EXTERNAL_SIGNER); } +// Returns wallet prefix for migration. +// Used to name the backup file and newly created wallets. +// E.g. a watch-only wallet is named "_watchonly". +static std::string MigrationPrefixName(CWallet& wallet) +{ + const std::string& name{wallet.GetName()}; + return name.empty() ? "default_wallet" : name; +} + bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error, MigrationResult& res) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet) { AssertLockHeld(wallet.cs_wallet); @@ -4357,7 +4366,7 @@ bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error, DatabaseStatus status; std::vector warnings; - std::string wallet_name = wallet.GetName() + "_watchonly"; + std::string wallet_name = MigrationPrefixName(wallet) + "_watchonly"; std::unique_ptr database = MakeWalletDatabase(wallet_name, options, status, error); if (!database) { error = strprintf(_("Wallet file creation failed: %s"), error); @@ -4394,7 +4403,7 @@ bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error, DatabaseStatus status; std::vector warnings; - std::string wallet_name = wallet.GetName() + "_solvables"; + std::string wallet_name = MigrationPrefixName(wallet) + "_solvables"; std::unique_ptr database = MakeWalletDatabase(wallet_name, options, status, error); if (!database) { error = strprintf(_("Wallet file creation failed: %s"), error); @@ -4533,7 +4542,7 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr // Make a backup of the DB fs::path this_wallet_dir = fs::absolute(fs::PathFromString(local_wallet->GetDatabase().Filename())).parent_path(); - fs::path backup_filename = fs::PathFromString(strprintf("%s_%d.legacy.bak", (wallet_name.empty() ? "default_wallet" : wallet_name), GetTime())); + fs::path backup_filename = fs::PathFromString(strprintf("%s_%d.legacy.bak", MigrationPrefixName(*local_wallet), GetTime())); fs::path backup_path = this_wallet_dir / backup_filename; if (!local_wallet->BackupWallet(fs::PathToString(backup_path))) { if (was_loaded) { diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 3ca053043bb5..c11986226dff 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -5,6 +5,7 @@ """Test Migrating a wallet from legacy to descriptor.""" import os +from pathlib import Path import random import shutil import struct @@ -25,6 +26,7 @@ from test_framework.script_util import key_to_p2pkh_script, key_to_p2pk_script, script_to_p2sh_script, script_to_p2wsh_script from test_framework.util import ( assert_equal, + assert_greater_than, assert_raises_rpc_error, find_vout_for_address, sha256sum_file, @@ -523,6 +525,14 @@ def test_unloaded_by_path(self): assert_equal(bals, wallet.getbalances()) + def clear_default_wallet(self, backup_file): + # Test cleanup: Clear unnamed default wallet for subsequent tests + (self.old_node.wallets_path / "wallet.dat").unlink() + (self.master_node.wallets_path / "wallet.dat").unlink(missing_ok=True) + shutil.rmtree(self.master_node.wallets_path / "default_wallet_watchonly", ignore_errors=True) + shutil.rmtree(self.master_node.wallets_path / "default_wallet_solvables", ignore_errors=True) + backup_file.unlink() + def test_default_wallet(self): self.log.info("Test migration of the wallet named as the empty string") wallet = self.create_legacy_wallet("") @@ -549,6 +559,36 @@ def test_default_wallet(self): self.master_node.setmocktime(0) + wallet.unloadwallet() + self.clear_default_wallet(backup_file=Path(res["backup_path"])) + + def test_default_wallet_watch_only(self): + self.log.info("Test unnamed (default) watch-only wallet migration") + master_wallet = self.master_node.get_wallet_rpc(self.default_wallet_name) + wallet = self.create_legacy_wallet("", blank=True) + wallet.importaddress(master_wallet.getnewaddress(address_type="legacy")) + + res, def_wallet = self.migrate_and_get_rpc("") + wallet = self.master_node.get_wallet_rpc("default_wallet_watchonly") + + info = wallet.getwalletinfo() + assert_equal(info["descriptors"], True) + assert_equal(info["format"], "sqlite") + assert_equal(info["private_keys_enabled"], False) + assert_equal(info["walletname"], "default_wallet_watchonly") + + # The default wallet will still exist and have newly generated descriptors + assert (self.master_node.wallets_path / "wallet.dat").exists() + def_wallet_info = def_wallet.getwalletinfo() + assert_equal(def_wallet_info["descriptors"], True) + assert_equal(def_wallet_info["format"], "sqlite") + assert_equal(def_wallet_info["private_keys_enabled"], True) + assert_equal(def_wallet_info["walletname"], "") + assert_greater_than(def_wallet_info["keypoolsize"], 0) + + wallet.unloadwallet() + self.clear_default_wallet(backup_file=Path(res["backup_path"])) + def test_default_wallet_failure(self): self.log.info("Test failure during unnamed (default) wallet migration") master_wallet = self.master_node.get_wallet_rpc(self.default_wallet_name) @@ -557,7 +597,7 @@ def test_default_wallet_failure(self): # Create wallet directory with the watch-only name and a wallet file. # Because the wallet dir exists, this will cause migration to fail. - watch_only_dir = self.master_node.wallets_path / "_watchonly" + watch_only_dir = self.master_node.wallets_path / "default_wallet_watchonly" os.mkdir(watch_only_dir) shutil.copyfile(self.old_node.wallets_path / "wallet.dat", watch_only_dir / "wallet.dat") @@ -580,7 +620,7 @@ def test_default_wallet_failure(self): assert_equal(magic, BTREE_MAGIC) # Test cleanup: clear default wallet for next test - os.remove(self.old_node.wallets_path / "wallet.dat") + self.clear_default_wallet(backup_path) def test_direct_file(self): self.log.info("Test migration of a wallet that is not in a wallet directory") @@ -1408,6 +1448,7 @@ def run_test(self): self.test_unloaded_by_path() self.test_default_wallet_failure() self.test_default_wallet() + self.test_default_wallet_watch_only() self.test_direct_file() self.test_addressbook() self.test_migrate_raw_p2sh() From 9405e915e79d86d262779ea38104624d37add2a3 Mon Sep 17 00:00:00 2001 From: furszy Date: Sun, 4 Jan 2026 12:25:21 -0500 Subject: [PATCH 090/115] test: coverage for migration failure when last sync is beyond prune height Github-Pull: bitcoin/bitcoin#34156 Rebased-From: b7c34d08dd9549a95cffc6ec1ffa4bb4f81e35eb --- test/functional/wallet_migration.py | 39 +++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index c11986226dff..5424fda2ab2e 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -1431,6 +1431,42 @@ def test_solvable_no_privs(self): assert_equal(addr_info["solvable"], True) assert "hex" in addr_info + def unsynced_wallet_on_pruned_node_fails(self): + self.log.info("Test migration of an unsynced wallet on a pruned node fails gracefully") + wallet = self.create_legacy_wallet("", load_on_startup=False) + last_wallet_synced_block = wallet.getwalletinfo()['lastprocessedblock']['height'] + wallet.unloadwallet() + + shutil.copyfile(self.old_node.wallets_path / "wallet.dat", self.master_node.wallets_path / "wallet.dat") + + # Generate blocks just so the wallet best block is pruned + self.restart_node(0, ["-fastprune", "-prune=1", "-nowallet"]) + self.connect_nodes(0, 1) + self.generate(self.master_node, 450, sync_fun=self.no_op) + self.master_node.pruneblockchain(250) + # Ensure next block to sync is unavailable + assert_raises_rpc_error(-1, "Block not available (pruned data)", self.master_node.getblock, self.master_node.getblockhash(last_wallet_synced_block + 1)) + + # Check migration failure + mocked_time = int(time.time()) + self.master_node.setmocktime(mocked_time) + assert_raises_rpc_error(-4, "last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of pruned node)", self.master_node.migratewallet, wallet_name="") + self.master_node.setmocktime(0) + + # Verify the /wallets/ path exists, the wallet is still BDB and the backup file is there. + assert self.master_node.wallets_path.exists() + + with open(self.master_node.wallets_path / "wallet.dat", "rb") as f: + data = f.read(16) + _, _, magic = struct.unpack("QII", data) + assert_equal(magic, BTREE_MAGIC) + + backup_path = self.master_node.wallets_path / f"default_wallet_{mocked_time}.legacy.bak" + assert backup_path.exists() + + self.clear_default_wallet(backup_path) + + def run_test(self): self.master_node = self.nodes[0] self.old_node = self.nodes[1] @@ -1466,5 +1502,8 @@ def run_test(self): self.test_taproot() self.test_solvable_no_privs() + # Note: After this test the first 250 blocks of 'master_node' are pruned + self.unsynced_wallet_on_pruned_node_fails() + if __name__ == '__main__': WalletMigrationTest(__file__).main() From 76cdeb7b06232050c7d20ffa1395697cc4e53295 Mon Sep 17 00:00:00 2001 From: David Gumberg Date: Wed, 7 Jan 2026 16:02:58 -0800 Subject: [PATCH 091/115] wallet: test: Failed migration cleanup Refactor a common way to perform the failed migration test that exists for default wallets, and add relative-path wallets and absolute-path wallets. Github-Pull: 34226 Rebased-From: eeaf28dbe0e09819ab0e95bb7762b29536bdeef6 --- test/functional/wallet_migration.py | 79 ++++++++++++++++++++++------- 1 file changed, 61 insertions(+), 18 deletions(-) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 5424fda2ab2e..8129baf4388e 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -589,38 +589,72 @@ def test_default_wallet_watch_only(self): wallet.unloadwallet() self.clear_default_wallet(backup_file=Path(res["backup_path"])) - def test_default_wallet_failure(self): - self.log.info("Test failure during unnamed (default) wallet migration") + def test_migration_failure(self, wallet_name): + is_default = wallet_name == "" + wallet_pretty_name = "unnamed (default)" if is_default else f'"{wallet_name}"' + self.log.info(f"Test failure during migration of wallet named: {wallet_pretty_name}") + # Preface, set up legacy wallet and unload it master_wallet = self.master_node.get_wallet_rpc(self.default_wallet_name) - wallet = self.create_legacy_wallet("", blank=True) + wallet = self.create_legacy_wallet(wallet_name, blank=True) wallet.importaddress(master_wallet.getnewaddress(address_type="legacy")) + wallet.unloadwallet() - # Create wallet directory with the watch-only name and a wallet file. - # Because the wallet dir exists, this will cause migration to fail. - watch_only_dir = self.master_node.wallets_path / "default_wallet_watchonly" + if os.path.isabs(wallet_name): + old_path = master_path = Path(wallet_name) + else: + old_path = self.old_node.wallets_path / wallet_name + master_path = self.master_node.wallets_path / wallet_name + os.makedirs(master_path, exist_ok=True) + shutil.copyfile(old_path / "wallet.dat", master_path / "wallet.dat") + + # This will be the watch-only directory the migration tries to create, + # we make migration fail by placing a wallet.dat file there. + wo_prefix = wallet_name or "default_wallet" + # wo_prefix might have path characters in it, this corresponds with + # DoMigration(). + wo_dirname = f"{wo_prefix}_watchonly" + watch_only_dir = self.master_node.wallets_path / wo_dirname os.mkdir(watch_only_dir) - shutil.copyfile(self.old_node.wallets_path / "wallet.dat", watch_only_dir / "wallet.dat") + shutil.copyfile(old_path / "wallet.dat", watch_only_dir / "wallet.dat") + + # Make a file in the wallets dir that must still exist after migration + survive_path = self.master_node.wallets_path / "survive" + open(survive_path, "wb").close() + assert survive_path.exists() mocked_time = int(time.time()) self.master_node.setmocktime(mocked_time) - assert_raises_rpc_error(-4, "Failed to create database", self.migrate_and_get_rpc, "") + assert_raises_rpc_error(-4, "Failed to create database", self.master_node.migratewallet, wallet_name) self.master_node.setmocktime(0) - # Verify the /wallets/ path exists + # Verify the /wallets/ path exists. assert self.master_node.wallets_path.exists() - # Check backup file exists. Because the wallet has no name, the backup is prefixed with 'default_wallet' - backup_path = self.master_node.wallets_path / f"default_wallet_{mocked_time}.legacy.bak" + + # Verify survive is still there + assert survive_path.exists() + # Verify both wallet paths exist. + assert Path(old_path / "wallet.dat").exists() + assert Path(master_path / "wallet.dat").exists() + + backup_prefix = "default_wallet" if is_default else wallet_name + backup_path = master_path / f"{backup_prefix}_{mocked_time}.legacy.bak" assert backup_path.exists() - # Verify the original unnamed wallet was restored - assert (self.master_node.wallets_path / "wallet.dat").exists() - # And verify it is still a BDB wallet - with open(self.master_node.wallets_path / "wallet.dat", "rb") as f: + + with open(self.master_node.wallets_path / wallet_name / self.wallet_data_filename, "rb") as f: data = f.read(16) _, _, magic = struct.unpack("QII", data) assert_equal(magic, BTREE_MAGIC) - # Test cleanup: clear default wallet for next test - self.clear_default_wallet(backup_path) + + # Cleanup + if is_default: + self.clear_default_wallet(backup_path) + else: + backup_path.unlink() + Path(watch_only_dir / "wallet.dat").unlink() + Path(watch_only_dir).rmdir() + Path(master_path / "wallet.dat").unlink() + Path(old_path / "wallet.dat").unlink(missing_ok=True) def test_direct_file(self): self.log.info("Test migration of a wallet that is not in a wallet directory") @@ -1482,7 +1516,16 @@ def run_test(self): self.test_encrypted() self.test_nonexistent() self.test_unloaded_by_path() - self.test_default_wallet_failure() + + migration_failure_cases = [ + "", + "../", + os.path.abspath(self.master_node.datadir_path / "absolute_path"), + "normallynamedwallet" + ] + for wallet_name in migration_failure_cases: + self.test_migration_failure(wallet_name=wallet_name) + self.test_default_wallet() self.test_default_wallet_watch_only() self.test_direct_file() From 2e4688618ba6a68df0936df0cc86b657ec35b4ef Mon Sep 17 00:00:00 2001 From: ismaelsadeeq Date: Wed, 24 Sep 2025 16:31:38 +0200 Subject: [PATCH 092/115] miner: fix `addPackageTxs` unsigned integer overflow Github-Pull: #33475 Rebased-From: b807dfcdc5929c314d43b790c9e705d5bf0a86e8 --- src/node/miner.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/node/miner.cpp b/src/node/miner.cpp index 33eeaf91fb2f..b9ecd855f0a6 100644 --- a/src/node/miner.cpp +++ b/src/node/miner.cpp @@ -394,8 +394,8 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda ++nConsecutiveFailed; - if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight > - m_options.nBlockMaxWeight - m_options.block_reserved_weight) { + if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight + + m_options.block_reserved_weight > m_options.nBlockMaxWeight) { // Give up if we're close to full and haven't succeeded in a while break; } From 7a71850a6d1d2eaf09e19d9d0af574a90487ec2b Mon Sep 17 00:00:00 2001 From: SatsAndSports Date: Tue, 28 Oct 2025 16:52:35 +0100 Subject: [PATCH 093/115] Remove unreliable seed from chainparams.cpp, and the associated README Github-Pull: #33723 Rebased-From: b0c706795ce6a3a00bf068a81ee99fef2ee9bf7e --- contrib/seeds/README.md | 3 +-- src/kernel/chainparams.cpp | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/contrib/seeds/README.md b/contrib/seeds/README.md index a1a2e34b5de3..58d7f41130ff 100644 --- a/contrib/seeds/README.md +++ b/contrib/seeds/README.md @@ -10,14 +10,13 @@ to addrman with). Update `MIN_BLOCKS` in `makeseeds.py` and the `-m`/`--minblocks` arguments below, as needed. -The seeds compiled into the release are created from sipa's, achow101's and luke-jr's +The seeds compiled into the release are created from sipa's and achow101's DNS seed, virtu's crawler, and asmap community AS map data. Run the following commands from the `/contrib/seeds` directory: ``` curl https://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt curl https://21.ninja/seeds.txt.gz | gzip -dc >> seeds_main.txt -curl https://luke.dashjr.org/programs/bitcoin/files/charts/seeds.txt >> seeds_main.txt curl https://mainnet.achownodes.xyz/seeds.txt.gz | gzip -dc >> seeds_main.txt curl https://signet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_signet.txt curl https://testnet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_test.txt diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index ac3fc9eadad6..0f193eff74d9 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -146,7 +146,6 @@ class CMainParams : public CChainParams { // release ASAP to avoid it where possible. vSeeds.emplace_back("seed.bitcoin.sipa.be."); // Pieter Wuille, only supports x1, x5, x9, and xd vSeeds.emplace_back("dnsseed.bluematt.me."); // Matt Corallo, only supports x9 - vSeeds.emplace_back("dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us."); // Luke Dashjr vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch."); // Jonas Schnelli, only supports x1, x5, x9, and xd vSeeds.emplace_back("seed.btc.petertodd.net."); // Peter Todd, only supports x1, x5, x9, and xd vSeeds.emplace_back("seed.bitcoin.sprovoost.nl."); // Sjors Provoost From daef5852f02513521654e15d62748648765acf92 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Thu, 8 Jan 2026 12:45:14 +0000 Subject: [PATCH 094/115] guix: Fix `osslsigncode` tests Github-Pull: #34227 Rebased-From: 194114daf385a5db50e1507fda79a1a93240d494 --- contrib/guix/manifest.scm | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index 4e7e95521828..176fcee0464a 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -2,6 +2,7 @@ ((gnu packages bash) #:select (bash-minimal)) (gnu packages bison) ((gnu packages certs) #:select (nss-certs)) + ((gnu packages check) #:select (libfaketime)) ((gnu packages cmake) #:select (cmake-minimal)) (gnu packages commencement) (gnu packages compression) @@ -209,7 +210,17 @@ and abstract ELF, PE and MachO formats.") (base32 "1j47vwq4caxfv0xw68kw5yh00qcpbd56d7rq6c483ma3y7s96yyz")))) (build-system cmake-build-system) - (inputs (list openssl)) + (arguments + (list + #:phases + #~(modify-phases %standard-phases + (replace 'check + (lambda* (#:key tests? #:allow-other-keys) + (if tests? + (invoke "faketime" "-f" "@2025-01-01 00:00:00" ;; Tests fail after 2025. + "ctest" "--output-on-failure" "--no-tests=error") + (format #t "test suite not run~%"))))))) + (inputs (list libfaketime openssl)) (home-page "https://github.com/mtrojnar/osslsigncode") (synopsis "Authenticode signing and timestamping tool") (description "osslsigncode is a small tool that implements part of the From 71633a9b5c10f0d6a1a1e31bcbf51de2e27649d9 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 1 Sep 2025 13:38:28 -0700 Subject: [PATCH 095/115] test: Test wallet 'from me' status change If something is imported into the wallet, it can change the 'from me' status of a transaction. This status is only visible through gettransaction's "fee" field which is only shown for transactions that are 'from me'. Github-Pull: #33268 Rebased-From: e76c2f7a4111f87080e31539f83c21390fcd8f3b --- test/functional/wallet_listtransactions.py | 47 ++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/test/functional/wallet_listtransactions.py b/test/functional/wallet_listtransactions.py index 6263278a6c25..ce528657278f 100755 --- a/test/functional/wallet_listtransactions.py +++ b/test/functional/wallet_listtransactions.py @@ -5,9 +5,11 @@ """Test the listtransactions API.""" from decimal import Decimal +import time import os import shutil +from test_framework.blocktools import MAX_FUTURE_BLOCK_TIME from test_framework.messages import ( COIN, tx_from_hex, @@ -17,7 +19,9 @@ assert_array_result, assert_equal, assert_raises_rpc_error, + find_vout_for_address, ) +from test_framework.wallet_util import get_generate_key class ListTransactionsTest(BitcoinTestFramework): @@ -114,6 +118,8 @@ def run_test(self): self.run_invalid_parameters_test() self.test_op_return() + self.test_from_me_status_change() + def run_rbf_opt_in_test(self): """Test the opt-in-rbf flag for sent and received transactions.""" @@ -327,6 +333,47 @@ def test_op_return(self): assert 'address' not in op_ret_tx + def test_from_me_status_change(self): + self.log.info("Test gettransaction after changing a transaction's 'from me' status") + self.nodes[0].createwallet("fromme") + default_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name) + wallet = self.nodes[0].get_wallet_rpc("fromme") + + # The 'fee' field of gettransaction is only added when the transaction is 'from me' + # Run twice, once for a transaction in the mempool, again when it confirms + for confirm in [False, True]: + key = get_generate_key() + default_wallet.importprivkey(key.privkey) + + send_res = default_wallet.send(outputs=[{key.p2wpkh_addr: 1}, {wallet.getnewaddress(): 1}]) + assert_equal(send_res["complete"], True) + vout = find_vout_for_address(self.nodes[0], send_res["txid"], key.p2wpkh_addr) + utxos = [{"txid": send_res["txid"], "vout": vout}] + self.generate(self.nodes[0], 1, sync_fun=self.no_op) + + # Send to the test wallet, ensuring that one input is for the descriptor we will import, + # and that there are other inputs belonging to only the sending wallet + send_res = default_wallet.send(outputs=[{wallet.getnewaddress(): 1.5}], inputs=utxos, add_inputs=True) + assert_equal(send_res["complete"], True) + txid = send_res["txid"] + self.nodes[0].syncwithvalidationinterfacequeue() + tx_info = wallet.gettransaction(txid) + assert "fee" not in tx_info + assert_equal(any(detail["category"] == "send" for detail in tx_info["details"]), False) + + if confirm: + self.generate(self.nodes[0], 1, sync_fun=self.no_op) + # Mock time forward and generate blocks so that the import does not rescan the transaction + self.nodes[0].setmocktime(int(time.time()) + MAX_FUTURE_BLOCK_TIME + 1) + self.generate(self.nodes[0], 10, sync_fun=self.no_op) + + wallet.importprivkey(key.privkey) + # TODO: We should check that the fee matches, but since the transaction spends inputs + # not known to the wallet, it is incorrectly calculating the fee. + # assert_equal(wallet.gettransaction(txid)["fee"], fee) + tx_info = wallet.gettransaction(txid) + assert "fee" in tx_info + assert_equal(any(detail["category"] == "send" for detail in tx_info["details"]), True) if __name__ == '__main__': ListTransactionsTest(__file__).main() From bab1ac827b4fdd4984661f32f6b899d56261da5d Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Thu, 28 Aug 2025 13:39:46 -0700 Subject: [PATCH 096/115] wallet: Determine IsFromMe by checking for TXOs of inputs Instead of checking whether the total amount of inputs known by the wallet is greater than 0, we should be checking for whether the input is known by the wallet. This enables us to determine whether a transaction spends an of output with an amount of 0, which is necessary for marking 0-value dust outputs as spent. Github-Pull: #33268 Rebased-From: 39a7dbdd277d1dea9a70314d8cc5ae057999ee88 --- src/wallet/wallet.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 2397d84a6f50..913c74532058 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -1702,7 +1702,13 @@ isminetype CWallet::IsMine(const COutPoint& outpoint) const bool CWallet::IsFromMe(const CTransaction& tx) const { - return (GetDebit(tx, ISMINE_ALL) > 0); + LOCK(cs_wallet); + for (const CTxIn& txin : tx.vin) { + if (IsMine(txin.prevout)) { + return true; + } + } + return false; } CAmount CWallet::GetDebit(const CTransaction& tx, const isminefilter& filter) const From c6e7765c0a03c124fcc86b452d6870b6d2797130 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Thu, 28 Aug 2025 15:13:36 -0700 Subject: [PATCH 097/115] wallet: Throw an error in sendall if the tx size cannot be calculated Github-Pull: #33268 Rebased-From: c40dc822d74aea46e4a21774ca282e008f609c2a --- src/wallet/rpc/spend.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/wallet/rpc/spend.cpp b/src/wallet/rpc/spend.cpp index 64aae701ebc4..5c0b1db23e65 100644 --- a/src/wallet/rpc/spend.cpp +++ b/src/wallet/rpc/spend.cpp @@ -1486,7 +1486,6 @@ RPCHelpMan sendall() CoinFilterParams coins_params; coins_params.min_amount = 0; for (const COutput& output : AvailableCoins(*pwallet, &coin_control, fee_rate, coins_params).All()) { - CHECK_NONFATAL(output.input_bytes > 0); if (send_max && fee_rate.GetFee(output.input_bytes) > output.txout.nValue) { continue; } @@ -1505,6 +1504,9 @@ RPCHelpMan sendall() // estimate final size of tx const TxSize tx_size{CalculateMaximumSignedTxSize(CTransaction(rawTx), pwallet.get())}; + if (tx_size.vsize == -1) { + throw JSONRPCError(RPC_WALLET_ERROR, "Unable to determine the size of the transaction, the wallet contains unsolvable descriptors"); + } const CAmount fee_from_size{fee_rate.GetFee(tx_size.vsize)}; const std::optional total_bump_fees{pwallet->chain().calculateCombinedBumpFee(outpoints_spent, fee_rate)}; CAmount effective_value = total_input_value - fee_from_size - total_bump_fees.value_or(0); From f4b78c42e557aec29f5ed5e570fb55bf70d2b3b4 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Thu, 28 Aug 2025 15:13:23 -0700 Subject: [PATCH 098/115] test: Add a test for anchor outputs in the wallet Github-Pull: #33268 Rebased-From: 609d265ebc51abfe9a9ce570da647b6839dc1214 --- test/functional/test_framework/script_util.py | 1 + test/functional/test_runner.py | 2 + test/functional/wallet_anchor.py | 128 ++++++++++++++++++ 3 files changed, 131 insertions(+) create mode 100755 test/functional/wallet_anchor.py diff --git a/test/functional/test_framework/script_util.py b/test/functional/test_framework/script_util.py index fce32e138eed..d97120fd730a 100755 --- a/test/functional/test_framework/script_util.py +++ b/test/functional/test_framework/script_util.py @@ -50,6 +50,7 @@ assert len(DUMMY_MIN_OP_RETURN_SCRIPT) == MIN_PADDING PAY_TO_ANCHOR = CScript([OP_1, bytes.fromhex("4e73")]) +ANCHOR_ADDRESS = "bcrt1pfeesnyr2tx" def key_to_p2pk_script(key): key = check_key(key) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 1fa22b1cc619..000407b118f1 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -170,6 +170,8 @@ 'wallet_listreceivedby.py --descriptors', 'wallet_abandonconflict.py --legacy-wallet', 'wallet_abandonconflict.py --descriptors', + 'wallet_anchor.py --legacy-wallet', + 'wallet_anchor.py --descriptors', 'feature_reindex.py', 'feature_reindex_readonly.py', 'wallet_labels.py --legacy-wallet', diff --git a/test/functional/wallet_anchor.py b/test/functional/wallet_anchor.py new file mode 100755 index 000000000000..f641f3f9ee1e --- /dev/null +++ b/test/functional/wallet_anchor.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php. + +import time + +from test_framework.blocktools import MAX_FUTURE_BLOCK_TIME +from test_framework.descriptors import descsum_create +from test_framework.messages import ( + COutPoint, + CTxIn, + CTxInWitness, + CTxOut, +) +from test_framework.script_util import ( + ANCHOR_ADDRESS, + PAY_TO_ANCHOR, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, +) +from test_framework.wallet import MiniWallet + +class WalletAnchorTest(BitcoinTestFramework): + def add_options(self, parser): + self.add_wallet_options(parser) + + def set_test_params(self): + self.num_nodes = 1 + + def skip_test_if_missing_module(self): + self.skip_if_no_wallet() + + def test_0_value_anchor_listunspent(self): + self.log.info("Test that 0-value anchor outputs are detected as UTXOs") + + # Create an anchor output, and spend it + sender = MiniWallet(self.nodes[0]) + anchor_tx = sender.create_self_transfer(fee_rate=0, version=3)["tx"] + anchor_tx.vout.append(CTxOut(0, PAY_TO_ANCHOR)) + anchor_tx.rehash() # Rehash after modifying anchor_tx + anchor_spend = sender.create_self_transfer(version=3)["tx"] + anchor_spend.vin.append(CTxIn(COutPoint(anchor_tx.sha256, 1), b"")) + anchor_spend.wit.vtxinwit.append(CTxInWitness()) + anchor_spend.rehash() # Rehash after modifying anchor_spend + submit_res = self.nodes[0].submitpackage([anchor_tx.serialize().hex(), anchor_spend.serialize().hex()]) + assert_equal(submit_res["package_msg"], "success") + anchor_txid = anchor_tx.hash + anchor_spend_txid = anchor_spend.hash + + # Mine each tx in separate blocks + self.generateblock(self.nodes[0], sender.get_address(), [anchor_tx.serialize().hex()]) + anchor_tx_height = self.nodes[0].getblockcount() + self.generateblock(self.nodes[0], sender.get_address(), [anchor_spend.serialize().hex()]) + + # Mock time forward and generate some blocks to avoid rescanning of latest blocks + self.nodes[0].setmocktime(int(time.time()) + MAX_FUTURE_BLOCK_TIME + 1) + self.generate(self.nodes[0], 10) + + self.nodes[0].createwallet(wallet_name="anchor", disable_private_keys=True) + wallet = self.nodes[0].get_wallet_rpc("anchor") + + wallet.importaddress(ANCHOR_ADDRESS, rescan=False) + + # The wallet should have no UTXOs, and not know of the anchor tx or its spend + assert_equal(wallet.listunspent(), []) + assert_raises_rpc_error(-5, "Invalid or non-wallet transaction id", wallet.gettransaction, anchor_txid) + assert_raises_rpc_error(-5, "Invalid or non-wallet transaction id", wallet.gettransaction, anchor_spend_txid) + + # Rescanning the block containing the anchor so that listunspent will list the output + wallet.rescanblockchain(0, anchor_tx_height) + utxos = wallet.listunspent() + assert_equal(len(utxos), 1) + assert_equal(utxos[0]["txid"], anchor_txid) + assert_equal(utxos[0]["address"], ANCHOR_ADDRESS) + assert_equal(utxos[0]["amount"], 0) + wallet.gettransaction(anchor_txid) + assert_raises_rpc_error(-5, "Invalid or non-wallet transaction id", wallet.gettransaction, anchor_spend_txid) + + # Rescan the rest of the blockchain to see the anchor was spent + wallet.rescanblockchain() + assert_equal(wallet.listunspent(), []) + wallet.gettransaction(anchor_spend_txid) + + def test_cannot_sign_anchors(self): + self.log.info("Test that the wallet cannot spend anchor outputs") + for disable_privkeys in [False, True]: + self.nodes[0].createwallet(wallet_name=f"anchor_spend_{disable_privkeys}", disable_private_keys=disable_privkeys) + wallet = self.nodes[0].get_wallet_rpc(f"anchor_spend_{disable_privkeys}") + if self.options.descriptors: + import_res = wallet.importdescriptors([ + {"desc": descsum_create(f"addr({ANCHOR_ADDRESS})"), "timestamp": "now"}, + {"desc": descsum_create(f"raw({PAY_TO_ANCHOR.hex()})"), "timestamp": "now"} + ]) + assert_equal(import_res[0]["success"], disable_privkeys) + assert_equal(import_res[1]["success"], disable_privkeys) + else: + wallet.importaddress(ANCHOR_ADDRESS) + + anchor_txid = self.default_wallet.sendtoaddress(ANCHOR_ADDRESS, 1) + self.generate(self.nodes[0], 1) + + wallet = self.nodes[0].get_wallet_rpc("anchor_spend_True") + utxos = wallet.listunspent() + assert_equal(len(utxos), 1) + assert_equal(utxos[0]["txid"], anchor_txid) + assert_equal(utxos[0]["address"], ANCHOR_ADDRESS) + assert_equal(utxos[0]["amount"], 1) + + if self.options.descriptors: + assert_raises_rpc_error(-4, "Missing solving data for estimating transaction size", wallet.send, [{self.default_wallet.getnewaddress(): 0.9999}]) + assert_raises_rpc_error(-4, "Unable to determine the size of the transaction, the wallet contains unsolvable descriptors", wallet.sendall, recipients=[self.default_wallet.getnewaddress()]) + else: + assert_raises_rpc_error(-4, "Insufficient funds", wallet.send, [{self.default_wallet.getnewaddress(): 0.9999}]) + assert_raises_rpc_error(-6, "Total value of UTXO pool too low to pay for transaction. Try using lower feerate or excluding uneconomic UTXOs with 'send_max' option.", wallet.sendall, recipients=[self.default_wallet.getnewaddress()]) + assert_raises_rpc_error(-4, "Error: Private keys are disabled for this wallet", wallet.sendtoaddress, self.default_wallet.getnewaddress(), 0.9999) + assert_raises_rpc_error(-4, "Unable to determine the size of the transaction, the wallet contains unsolvable descriptors", wallet.sendall, recipients=[self.default_wallet.getnewaddress()], inputs=utxos) + + def run_test(self): + self.default_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name) + self.test_0_value_anchor_listunspent() + self.test_cannot_sign_anchors() + +if __name__ == '__main__': + WalletAnchorTest(__file__).main() From e973b61dbb431141e23846d95a86221b01587900 Mon Sep 17 00:00:00 2001 From: glozow Date: Mon, 12 Jan 2026 14:29:54 -0800 Subject: [PATCH 099/115] [doc] update release notes for 29.3rc1 --- doc/release-notes.md | 43 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index 4e2071dfa675..263ee553d1eb 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.x is now available from: +Bitcoin Core version 29.3rc1 is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. @@ -37,6 +37,36 @@ unsupported systems. Notable changes =============== +### P2P + +- #33050 net, validation: don't punish peers for consensus-invalid txs +- #33723 chainparams: remove dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us + +### Validation + +- #32473 Introduce per-txin sighash midstate cache for legacy/p2sh/segwitv0 scripts +- #33105 validation: detect witness stripping without re-running Script checks + +### Wallet + +- #33268 wallet: Identify transactions spending 0-value outputs, and add tests for anchor outputs in a wallet +- #34156 wallet: fix unnamed legacy wallet migration failure +- #34226 wallet: test: Relative wallet failed migration cleanup +- #34123 wallet: migration, avoid creating spendable wallet from a watch-only legacy wallet +- #34215 wallettool: fix unnamed createfromdump failure walletsdir deletion + +### Mining + +- #33475 bugfix: miner: fix `addPackageTxs` unsigned integer overflow + +### Build + +- #34227 guix: Fix `osslsigncode` tests + +### Documentation + +- #33623 doc: document capnproto and libmultiprocess deps in 29.x + ### Test - #33612 test: change log rate limit version gate @@ -51,8 +81,17 @@ Credits Thanks to everyone who directly contributed to this release: +- Anthony Towns +- Antoine Poinsot - Ava Chow +- David Gumberg - Eugene Siegel +- fanquake +- furszy +- Hennadii Stepanov +- ismaelsadeeq +- Pieter Wuille +- SatsAndSports - willcl-ark As well as to everyone that helped with translations on From e9c978391ff74e99724fbda9fb50f0c45fb13008 Mon Sep 17 00:00:00 2001 From: glozow Date: Mon, 12 Jan 2026 14:36:12 -0800 Subject: [PATCH 100/115] [build] bump version to 29.3rc1 --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 70f672132b41..8337d69535b1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,9 +28,9 @@ get_directory_property(precious_variables CACHE_VARIABLES) #============================= set(CLIENT_NAME "Bitcoin Core") set(CLIENT_VERSION_MAJOR 29) -set(CLIENT_VERSION_MINOR 2) +set(CLIENT_VERSION_MINOR 3) set(CLIENT_VERSION_BUILD 0) -set(CLIENT_VERSION_RC 0) +set(CLIENT_VERSION_RC 1) set(CLIENT_VERSION_IS_RELEASE "true") set(COPYRIGHT_YEAR "2025") From b834447fb2f2073e25164a80ba197a3120610b92 Mon Sep 17 00:00:00 2001 From: glozow Date: Mon, 12 Jan 2026 14:37:04 -0800 Subject: [PATCH 101/115] [doc] generate manpages 29.3rc1 --- doc/man/bitcoin-cli.1 | 6 +++--- doc/man/bitcoin-qt.1 | 6 +++--- doc/man/bitcoin-tx.1 | 6 +++--- doc/man/bitcoin-util.1 | 6 +++--- doc/man/bitcoin-wallet.1 | 6 +++--- doc/man/bitcoind.1 | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/doc/man/bitcoin-cli.1 b/doc/man/bitcoin-cli.1 index ce6f35c198e6..aad8ffd52bd0 100644 --- a/doc/man/bitcoin-cli.1 +++ b/doc/man/bitcoin-cli.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-CLI "1" "October 2025" "bitcoin-cli v29.2.0" "User Commands" +.TH BITCOIN-CLI "1" "January 2026" "bitcoin-cli v29.3.0rc1" "User Commands" .SH NAME -bitcoin-cli \- manual page for bitcoin-cli v29.2.0 +bitcoin-cli \- manual page for bitcoin-cli v29.3.0rc1 .SH SYNOPSIS .B bitcoin-cli [\fI\,options\/\fR] \fI\, \/\fR[\fI\,params\/\fR] @@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.2.0 .B bitcoin-cli [\fI\,options\/\fR] \fI\,help \/\fR .SH DESCRIPTION -Bitcoin Core RPC client version v29.2.0 +Bitcoin Core RPC client version v29.3.0rc1 .PP The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Core RPC server. .PP diff --git a/doc/man/bitcoin-qt.1 b/doc/man/bitcoin-qt.1 index 5efc9e96172e..ba38159542f5 100644 --- a/doc/man/bitcoin-qt.1 +++ b/doc/man/bitcoin-qt.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-QT "1" "October 2025" "bitcoin-qt v29.2.0" "User Commands" +.TH BITCOIN-QT "1" "January 2026" "bitcoin-qt v29.3.0rc1" "User Commands" .SH NAME -bitcoin-qt \- manual page for bitcoin-qt v29.2.0 +bitcoin-qt \- manual page for bitcoin-qt v29.3.0rc1 .SH SYNOPSIS .B bitcoin-qt [\fI\,options\/\fR] [\fI\,URI\/\fR] .SH DESCRIPTION -Bitcoin Core version v29.2.0 +Bitcoin Core version v29.3.0rc1 .PP The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Core. .PP diff --git a/doc/man/bitcoin-tx.1 b/doc/man/bitcoin-tx.1 index 90a233619fa7..4be1d5829118 100644 --- a/doc/man/bitcoin-tx.1 +++ b/doc/man/bitcoin-tx.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-TX "1" "October 2025" "bitcoin-tx v29.2.0" "User Commands" +.TH BITCOIN-TX "1" "January 2026" "bitcoin-tx v29.3.0rc1" "User Commands" .SH NAME -bitcoin-tx \- manual page for bitcoin-tx v29.2.0 +bitcoin-tx \- manual page for bitcoin-tx v29.3.0rc1 .SH SYNOPSIS .B bitcoin-tx [\fI\,options\/\fR] \fI\, \/\fR[\fI\,commands\/\fR] @@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.2.0 .B bitcoin-tx [\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR] .SH DESCRIPTION -Bitcoin Core bitcoin\-tx utility version v29.2.0 +Bitcoin Core bitcoin\-tx utility version v29.3.0rc1 .PP The bitcoin\-tx tool is used for creating and modifying bitcoin transactions. .PP diff --git a/doc/man/bitcoin-util.1 b/doc/man/bitcoin-util.1 index 4186bd3f5a79..f4bc33f4da37 100644 --- a/doc/man/bitcoin-util.1 +++ b/doc/man/bitcoin-util.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-UTIL "1" "October 2025" "bitcoin-util v29.2.0" "User Commands" +.TH BITCOIN-UTIL "1" "January 2026" "bitcoin-util v29.3.0rc1" "User Commands" .SH NAME -bitcoin-util \- manual page for bitcoin-util v29.2.0 +bitcoin-util \- manual page for bitcoin-util v29.3.0rc1 .SH SYNOPSIS .B bitcoin-util [\fI\,options\/\fR] [\fI\,command\/\fR] @@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.2.0 .B bitcoin-util [\fI\,options\/\fR] \fI\,grind \/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-util utility version v29.2.0 +Bitcoin Core bitcoin\-util utility version v29.3.0rc1 .PP The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below. .SH OPTIONS diff --git a/doc/man/bitcoin-wallet.1 b/doc/man/bitcoin-wallet.1 index 97c6144f81a0..000fb2a814d8 100644 --- a/doc/man/bitcoin-wallet.1 +++ b/doc/man/bitcoin-wallet.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-WALLET "1" "October 2025" "bitcoin-wallet v29.2.0" "User Commands" +.TH BITCOIN-WALLET "1" "January 2026" "bitcoin-wallet v29.3.0rc1" "User Commands" .SH NAME -bitcoin-wallet \- manual page for bitcoin-wallet v29.2.0 +bitcoin-wallet \- manual page for bitcoin-wallet v29.3.0rc1 .SH SYNOPSIS .B bitcoin-wallet [\fI\,options\/\fR] \fI\,\/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-wallet utility version v29.2.0 +Bitcoin Core bitcoin\-wallet utility version v29.3.0rc1 .PP bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files. .PP diff --git a/doc/man/bitcoind.1 b/doc/man/bitcoind.1 index 82804a50c83e..f0005de61c85 100644 --- a/doc/man/bitcoind.1 +++ b/doc/man/bitcoind.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIND "1" "October 2025" "bitcoind v29.2.0" "User Commands" +.TH BITCOIND "1" "January 2026" "bitcoind v29.3.0rc1" "User Commands" .SH NAME -bitcoind \- manual page for bitcoind v29.2.0 +bitcoind \- manual page for bitcoind v29.3.0rc1 .SH SYNOPSIS .B bitcoind [\fI\,options\/\fR] .SH DESCRIPTION -Bitcoin Core daemon version v29.2.0 +Bitcoin Core daemon version v29.3.0rc1 .PP The Bitcoin Core daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses. .PP From 7475d134f6a3a6039ab6b9d39706ade47c764aa8 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 15 Jan 2026 19:01:19 +0000 Subject: [PATCH 102/115] Wallet/bdb: Safely and correctly list files only used by the single wallet If any other files exist in the directory, we cannot assume the sharable files are exclusively for this wallet. But if they are, this also cleans up other log.* files --- src/wallet/bdb.cpp | 48 ++++++++++++++++++++++++++++++++++++++++++++++ src/wallet/bdb.h | 15 +-------------- 2 files changed, 49 insertions(+), 14 deletions(-) diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index 79851dff33fa..f5a18266edb5 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -340,6 +341,53 @@ bool BerkeleyDatabase::Verify(bilingual_str& errorStr) return true; } +std::vector BerkeleyDatabase::Files() +{ + std::vector files; + // If the wallet is the *only* file, clean up the entire BDB environment + constexpr auto build_files_list = [](std::vector& files, const std::shared_ptr& env, const fs::path& filename) { + if (env->m_databases.size() != 1) return false; + + const auto env_dir = env->Directory(); + const auto db_subdir = env_dir / "database"; + if (fs::exists(db_subdir)) { + if (!fs::is_directory(db_subdir)) return false; + for (const auto& entry : fs::directory_iterator(db_subdir)) { + const auto& path = entry.path().filename(); + if (!fs::PathToString(path).starts_with("log.")) { + return false; + } + files.emplace_back(entry.path()); + } + } + const std::set allowed_paths = { + filename, + "db.log", + ".walletlock", + "database" + }; + for (const auto& entry : fs::directory_iterator(env_dir)) { + const auto& path = entry.path().filename(); + if (allowed_paths.contains(path)) { + files.emplace_back(entry.path()); + } else if (fs::is_directory(entry.path())) { + // Subdirectories can't possibly be using this db env, and is expected if this is a non-directory wallet + // Do not include them in Files, but still allow the env cleanup + } else { + return false; + } + } + return true; + }; + try { + if (build_files_list(files, env, m_filename)) return files; + } catch (...) { + // Give up building the comprehensive file list if any error occurs + } + // Otherwise, it's only really safe to delete the one wallet file + return {env->Directory() / m_filename}; +} + void BerkeleyEnvironment::CheckpointLSN(const std::string& strFile) { dbenv->txn_checkpoint(0, 0, 0); diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h index ec773fd1770f..a7cf953ed218 100644 --- a/src/wallet/bdb.h +++ b/src/wallet/bdb.h @@ -132,20 +132,7 @@ class BerkeleyDatabase : public WalletDatabase /** Return path to main database filename */ std::string Filename() override { return fs::PathToString(env->Directory() / m_filename); } - std::vector Files() override - { - std::vector files; - files.emplace_back(env->Directory() / m_filename); - if (env->m_databases.size() == 1) { - files.emplace_back(env->Directory() / "db.log"); - files.emplace_back(env->Directory() / ".walletlock"); - files.emplace_back(env->Directory() / "database" / "log.0000000001"); - files.emplace_back(env->Directory() / "database"); - // Note that this list is not exhaustive as BDB may create more log files, and possibly other ones too - // However it should be good enough for the only calls to Files() - } - return files; - } + std::vector Files() override; std::string Format() override { return "bdb"; } /** From 60f529027c6eacbdc298fab50192f8c60d7082a1 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 15 Jan 2026 19:03:23 +0000 Subject: [PATCH 103/115] Wallet/Migration: If loading the new watchonly or solvables wallet fails, log the correct wallet name in error message --- src/wallet/wallet.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 913c74532058..992b02a98954 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4532,7 +4532,7 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr to_reload = LoadWallet(context, name, /*load_on_start=*/std::nullopt, options, status, error, warnings); if (!to_reload) { LogError("Failed to load wallet '%s' after migration. Rolling back migration to preserve consistency. " - "Error cause: %s\n", wallet_name, error.original); + "Error cause: %s\n", name, error.original); return false; } return true; From cef01d0be5223e9d33efc897d7fbe5d0a08692c0 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 15 Jan 2026 19:27:23 +0000 Subject: [PATCH 104/115] Wallet/Migration: Skip moving the backup file back and forth for no reason Since we no longer delete the wallet directory, there's no need to vacate it The moving only served to risk errors by crossing filesystem boundaries (which fs::rename can't handle) --- src/wallet/wallet.cpp | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 992b02a98954..df53f904cc2a 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4634,9 +4634,6 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr } if (!success) { // Migration failed, cleanup - // Before deleting the wallet's directory, copy the backup file to the top-level wallets dir - fs::path temp_backup_location = fsbridge::AbsPathJoin(GetWalletDir(), backup_filename); - fs::rename(backup_path, temp_backup_location); // Make list of wallets to cleanup std::vector> created_wallets; @@ -4680,15 +4677,12 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr // Convert the backup file to the wallet db file by renaming it and moving it into the wallet's directory. // Reload it into memory if the wallet was previously loaded. bilingual_str restore_error; - const auto& ptr_wallet = RestoreWallet(context, temp_backup_location, wallet_name, /*load_on_start=*/std::nullopt, status, restore_error, warnings, /*load_after_restore=*/was_loaded); + const auto& ptr_wallet = RestoreWallet(context, backup_path, wallet_name, /*load_on_start=*/std::nullopt, status, restore_error, warnings, /*load_after_restore=*/was_loaded); if (!restore_error.empty()) { error += restore_error + _("\nUnable to restore backup of wallet."); return util::Error{error}; } - // The wallet directory has been restored, but just in case, copy the previously created backup to the wallet dir - fs::rename(temp_backup_location, backup_path); - // Verify that there is no dangling wallet: when the wallet wasn't loaded before, expect null. // This check is performed after restoration to avoid an early error before saving the backup. bool wallet_reloaded = ptr_wallet != nullptr; From 69a6b9b1152ba0bb3edab6d2a54509fd416b24c8 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 20 Jan 2026 18:20:14 +0000 Subject: [PATCH 105/115] Bugfix: Wallet/Migration: Move backup into wallet directory when migrating from non-directory While 30.x+ keep backup files in walletdir, 29.x places them in the migrated wallet directory --- src/wallet/wallet.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index df53f904cc2a..45adda65ead9 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4581,6 +4581,12 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr // First change to using SQLite if (!local_wallet->MigrateToSQLite(error)) return util::Error{error}; + // In case we're migrating from file to directory, move the backup into it + this_wallet_dir = fs::absolute(fs::PathFromString(local_wallet->GetDatabase().Filename())).parent_path(); + backup_path = this_wallet_dir / backup_filename; + fs::rename(res.backup_path, backup_path); + res.backup_path = backup_path; + // Do the migration of keys and scripts for non-blank wallets, and cleanup if it fails success = local_wallet->IsWalletFlagSet(WALLET_FLAG_BLANK_WALLET); if (!success) { From 65173944ed60df3b9cffca95932aed8720921478 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 21 Jan 2026 21:32:40 +0000 Subject: [PATCH 106/115] QA: tool_wallet: Check that db.log is deleted with a lone legacy wallet, but not with a shared db environment --- test/functional/tool_wallet.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/test/functional/tool_wallet.py b/test/functional/tool_wallet.py index 979804a5fea8..788d9b0ee8df 100755 --- a/test/functional/tool_wallet.py +++ b/test/functional/tool_wallet.py @@ -410,17 +410,30 @@ def test_dump_createfromdump(self): self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=badload', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump') assert not (self.nodes[0].wallets_path / "badload").is_dir() if not self.options.descriptors: - os.rename(self.nodes[0].wallets_path / "wallet.dat", self.nodes[0].wallets_path / "default.wallet.dat") + os.rename(self.nodes[0].wallets_path / "wallet.dat", self.nodes[0].wallets_path / "../default.wallet.dat") + (self.nodes[0].wallets_path / "db.log").unlink(missing_ok=True) self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump') assert self.nodes[0].wallets_path.exists() assert not (self.nodes[0].wallets_path / "wallet.dat").exists() + if not self.options.descriptors: + assert not (self.nodes[0].wallets_path / "db.log").exists() self.log.info('Checking createfromdump with an unnamed wallet') self.do_tool_createfromdump("", "wallet.dump") assert (self.nodes[0].wallets_path / "wallet.dat").exists() os.unlink(self.nodes[0].wallets_path / "wallet.dat") if not self.options.descriptors: - os.rename(self.nodes[0].wallets_path / "default.wallet.dat", self.nodes[0].wallets_path / "wallet.dat") + os.rename(self.nodes[0].wallets_path / "../default.wallet.dat", self.nodes[0].wallets_path / "wallet.dat") + + self.log.info('Checking createfromdump with multiple non-directory wallets') + assert not (self.nodes[0].wallets_path / "wallet.dat").is_dir() + assert (self.nodes[0].wallets_path / "db.log").exists() + os.rename(self.nodes[0].wallets_path / "wallet.dat", self.nodes[0].wallets_path / "test.dat") + self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump') + assert not (self.nodes[0].wallets_path / "wallet.dat").exists() + assert (self.nodes[0].wallets_path / "test.dat").exists() + assert (self.nodes[0].wallets_path / "db.log").exists() + os.rename(self.nodes[0].wallets_path / "test.dat", self.nodes[0].wallets_path / "wallet.dat") def test_chainless_conflicts(self): self.log.info("Test wallet tool when wallet contains conflicting transactions") From c57009eefcf30091d86fccaa07f0722f6f235cb9 Mon Sep 17 00:00:00 2001 From: Padraic Slattery Date: Mon, 19 Jan 2026 17:45:37 +0100 Subject: [PATCH 107/115] chore: Update outdated GitHub Actions versions Github-Pull: #34344 Rebased-From: 9482f00df0b05e8ef710a7f0fac3262855ce335f --- .github/actions/configure-docker/action.yml | 2 +- .github/workflows/ci.yml | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/actions/configure-docker/action.yml b/.github/actions/configure-docker/action.yml index 131fdb1ccc37..9bf970ee78f3 100644 --- a/.github/actions/configure-docker/action.yml +++ b/.github/actions/configure-docker/action.yml @@ -16,7 +16,7 @@ runs: # This is required to allow buildkit to access the actions cache - name: Expose actions cache variables - uses: actions/github-script@v6 + uses: actions/github-script@v8 with: script: | Object.keys(process.env).forEach(function (key) { diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ae614d5bb299..88e52a627eec 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -49,7 +49,7 @@ jobs: steps: - name: Determine fetch depth run: echo "FETCH_DEPTH=$((${{ github.event.pull_request.commits }} + 2))" >> "$GITHUB_ENV" - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: ${{ env.FETCH_DEPTH }} @@ -125,7 +125,7 @@ jobs: steps: - &CHECKOUT name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: # Ensure the latest merged pull request state is used, even on re-runs. ref: &CHECKOUT_REF_TMPL ${{ github.event_name == 'pull_request' && github.ref || '' }} @@ -164,7 +164,7 @@ jobs: FILE_ENV: ${{ matrix.file-env }} - name: Save Ccache cache - uses: actions/cache/save@v4 + uses: actions/cache/save@v5 if: github.event_name != 'pull_request' && steps.ccache-cache.outputs.cache-hit != 'true' with: path: ${{ env.CCACHE_DIR }} @@ -222,13 +222,13 @@ jobs: sed -i '1s/^/set(ENV{CMAKE_POLICY_VERSION_MINIMUM} 3.5)\n/' "${VCPKG_INSTALLATION_ROOT}/scripts/ports.cmake" - name: vcpkg tools cache - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: C:/vcpkg/downloads/tools key: ${{ github.job }}-vcpkg-tools - name: Restore vcpkg binary cache - uses: actions/cache/restore@v4 + uses: actions/cache/restore@v5 id: vcpkg-binary-cache with: path: ~/AppData/Local/vcpkg/archives @@ -239,7 +239,7 @@ jobs: cmake -B build --preset vs2022-static -DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_INSTALLATION_ROOT\scripts\buildsystems\vcpkg.cmake" ${{ matrix.generate-options }} - name: Save vcpkg binary cache - uses: actions/cache/save@v4 + uses: actions/cache/save@v5 if: github.event_name != 'pull_request' && steps.vcpkg-binary-cache.outputs.cache-hit != 'true' && matrix.job-type == 'standard' with: path: ~/AppData/Local/vcpkg/archives @@ -414,7 +414,7 @@ jobs: CONTAINER_NAME: "bitcoin-linter" steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: ref: *CHECKOUT_REF_TMPL fetch-depth: 0 From 6aec0958f12a65567a354a1d08d4bfed126cf34b Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Thu, 25 Sep 2025 18:11:37 +0100 Subject: [PATCH 108/115] ci: remove 3rd party js from windows dll gha job We can use vswhere.exe directly to create a vs developer prompt and so can remove this third party dependency. Co-authored-by: David Gumberg Github-Pull: #32513 Rebased-From: 7ae0497eef8f5b37fc1184897a5bbc9f023dfa67 --- .github/workflows/ci.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 88e52a627eec..b0af3eb95fe1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -198,11 +198,15 @@ jobs: steps: - *CHECKOUT - - name: Configure Developer Command Prompt for Microsoft Visual C++ - # Using microsoft/setup-msbuild is not enough. - uses: ilammy/msvc-dev-cmd@v1 - with: - arch: x64 + - name: Set up VS Developer Prompt + shell: pwsh -Command "$PSVersionTable; $PSNativeCommandUseErrorActionPreference = $true; $ErrorActionPreference = 'Stop'; & '{0}'" + run: | + $vswherePath = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" + $installationPath = & $vswherePath -latest -property installationPath + & "${env:COMSPEC}" /s /c "`"$installationPath\Common7\Tools\vsdevcmd.bat`" -arch=x64 -no_logo && set" | foreach-object { + $name, $value = $_ -split '=', 2 + echo "$name=$value" >> $env:GITHUB_ENV + } - name: Get tool information run: | From 3835e16e5fe9d77d10fe1ce819157980dcea65f8 Mon Sep 17 00:00:00 2001 From: fanquake Date: Thu, 29 Jan 2026 14:21:13 +0000 Subject: [PATCH 109/115] doc: update release notes for v29.x --- doc/release-notes.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/release-notes.md b/doc/release-notes.md index 263ee553d1eb..4970d195e855 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -73,8 +73,10 @@ Notable changes ### Misc +- #32513 ci: remove 3rd party js from windows dll gha job - #33508 ci: fix buildx gha cache authentication on forks - #33581 ci: Properly include $FILE_ENV in DEPENDS_HASH +- #34344 ci: update GitHub Actions versions Credits ======= @@ -90,6 +92,8 @@ Thanks to everyone who directly contributed to this release: - furszy - Hennadii Stepanov - ismaelsadeeq +- m3dwards +- Padraic Slattery - Pieter Wuille - SatsAndSports - willcl-ark From 340b58a8cee1ee9d108af500129896f2928c8681 Mon Sep 17 00:00:00 2001 From: sedited Date: Thu, 8 Jan 2026 19:59:15 +0100 Subject: [PATCH 110/115] Add sedited to trusted-keys Github-Pull: bitcoin/bitcoin#34236 Rebased-From: d1b227f3ad19e1364c74fcb3b34717bb2b9b9243 --- contrib/verify-commits/trusted-keys | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/verify-commits/trusted-keys b/contrib/verify-commits/trusted-keys index f25486776f9f..0121f290b047 100644 --- a/contrib/verify-commits/trusted-keys +++ b/contrib/verify-commits/trusted-keys @@ -3,3 +3,4 @@ D1DBF2C4B96F2DEBF4C16654410108112E7EA81F 152812300785C96444D3334D17565732E08E5E41 6B002C6EA3F91B1B0DF0C9BC8F617F1200A6D25C 4D1B3D5ECBA1A7E05371EEBE46800E30FC748A66 +A8FC55F3B04BA3146F3492E79303B33A305224CB From 16493e35cd3dff081cc24285543ea65fc881b6ae Mon Sep 17 00:00:00 2001 From: sedited Date: Thu, 29 Jan 2026 18:01:16 +0100 Subject: [PATCH 111/115] Bump version to 29.3rc2 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8337d69535b1..ed59b307cd7b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,7 +30,7 @@ set(CLIENT_NAME "Bitcoin Core") set(CLIENT_VERSION_MAJOR 29) set(CLIENT_VERSION_MINOR 3) set(CLIENT_VERSION_BUILD 0) -set(CLIENT_VERSION_RC 1) +set(CLIENT_VERSION_RC 2) set(CLIENT_VERSION_IS_RELEASE "true") set(COPYRIGHT_YEAR "2025") From 75c2108a684d395dc59a224968bed9801628a35d Mon Sep 17 00:00:00 2001 From: sedited Date: Thu, 29 Jan 2026 18:03:29 +0100 Subject: [PATCH 112/115] [doc] update release notes for 29.3rc2 --- doc/release-notes.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index 4970d195e855..4aebc143d7dc 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.3rc1 is now available from: +Bitcoin Core version 29.3rc2 is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. @@ -54,6 +54,7 @@ Notable changes - #34226 wallet: test: Relative wallet failed migration cleanup - #34123 wallet: migration, avoid creating spendable wallet from a watch-only legacy wallet - #34215 wallettool: fix unnamed createfromdump failure walletsdir deletion +- #34370 wallet: Additional cleanups for migration, and fixes for createfromdump with BDB ### Mining @@ -92,10 +93,12 @@ Thanks to everyone who directly contributed to this release: - furszy - Hennadii Stepanov - ismaelsadeeq +- luke-jr - m3dwards - Padraic Slattery - Pieter Wuille - SatsAndSports +- sedited - willcl-ark As well as to everyone that helped with translations on From 2b2c4daa5357d4d6abefe2c87eb74caf12c09342 Mon Sep 17 00:00:00 2001 From: sedited Date: Thu, 29 Jan 2026 18:08:10 +0100 Subject: [PATCH 113/115] [doc] generate manpages 29.3rc2 --- doc/man/bitcoin-cli.1 | 6 +++--- doc/man/bitcoin-qt.1 | 6 +++--- doc/man/bitcoin-tx.1 | 6 +++--- doc/man/bitcoin-util.1 | 6 +++--- doc/man/bitcoin-wallet.1 | 6 +++--- doc/man/bitcoind.1 | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/doc/man/bitcoin-cli.1 b/doc/man/bitcoin-cli.1 index aad8ffd52bd0..e762c0fa39db 100644 --- a/doc/man/bitcoin-cli.1 +++ b/doc/man/bitcoin-cli.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-CLI "1" "January 2026" "bitcoin-cli v29.3.0rc1" "User Commands" +.TH BITCOIN-CLI "1" "January 2026" "bitcoin-cli v29.3.0rc2" "User Commands" .SH NAME -bitcoin-cli \- manual page for bitcoin-cli v29.3.0rc1 +bitcoin-cli \- manual page for bitcoin-cli v29.3.0rc2 .SH SYNOPSIS .B bitcoin-cli [\fI\,options\/\fR] \fI\, \/\fR[\fI\,params\/\fR] @@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.3.0rc1 .B bitcoin-cli [\fI\,options\/\fR] \fI\,help \/\fR .SH DESCRIPTION -Bitcoin Core RPC client version v29.3.0rc1 +Bitcoin Core RPC client version v29.3.0rc2 .PP The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Core RPC server. .PP diff --git a/doc/man/bitcoin-qt.1 b/doc/man/bitcoin-qt.1 index ba38159542f5..4a990e8082b8 100644 --- a/doc/man/bitcoin-qt.1 +++ b/doc/man/bitcoin-qt.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-QT "1" "January 2026" "bitcoin-qt v29.3.0rc1" "User Commands" +.TH BITCOIN-QT "1" "January 2026" "bitcoin-qt v29.3.0rc2" "User Commands" .SH NAME -bitcoin-qt \- manual page for bitcoin-qt v29.3.0rc1 +bitcoin-qt \- manual page for bitcoin-qt v29.3.0rc2 .SH SYNOPSIS .B bitcoin-qt [\fI\,options\/\fR] [\fI\,URI\/\fR] .SH DESCRIPTION -Bitcoin Core version v29.3.0rc1 +Bitcoin Core version v29.3.0rc2 .PP The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Core. .PP diff --git a/doc/man/bitcoin-tx.1 b/doc/man/bitcoin-tx.1 index 4be1d5829118..7b3b996afd89 100644 --- a/doc/man/bitcoin-tx.1 +++ b/doc/man/bitcoin-tx.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-TX "1" "January 2026" "bitcoin-tx v29.3.0rc1" "User Commands" +.TH BITCOIN-TX "1" "January 2026" "bitcoin-tx v29.3.0rc2" "User Commands" .SH NAME -bitcoin-tx \- manual page for bitcoin-tx v29.3.0rc1 +bitcoin-tx \- manual page for bitcoin-tx v29.3.0rc2 .SH SYNOPSIS .B bitcoin-tx [\fI\,options\/\fR] \fI\, \/\fR[\fI\,commands\/\fR] @@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.3.0rc1 .B bitcoin-tx [\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR] .SH DESCRIPTION -Bitcoin Core bitcoin\-tx utility version v29.3.0rc1 +Bitcoin Core bitcoin\-tx utility version v29.3.0rc2 .PP The bitcoin\-tx tool is used for creating and modifying bitcoin transactions. .PP diff --git a/doc/man/bitcoin-util.1 b/doc/man/bitcoin-util.1 index f4bc33f4da37..d10f4b48b35b 100644 --- a/doc/man/bitcoin-util.1 +++ b/doc/man/bitcoin-util.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-UTIL "1" "January 2026" "bitcoin-util v29.3.0rc1" "User Commands" +.TH BITCOIN-UTIL "1" "January 2026" "bitcoin-util v29.3.0rc2" "User Commands" .SH NAME -bitcoin-util \- manual page for bitcoin-util v29.3.0rc1 +bitcoin-util \- manual page for bitcoin-util v29.3.0rc2 .SH SYNOPSIS .B bitcoin-util [\fI\,options\/\fR] [\fI\,command\/\fR] @@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.3.0rc1 .B bitcoin-util [\fI\,options\/\fR] \fI\,grind \/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-util utility version v29.3.0rc1 +Bitcoin Core bitcoin\-util utility version v29.3.0rc2 .PP The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below. .SH OPTIONS diff --git a/doc/man/bitcoin-wallet.1 b/doc/man/bitcoin-wallet.1 index 000fb2a814d8..c8737d7989cd 100644 --- a/doc/man/bitcoin-wallet.1 +++ b/doc/man/bitcoin-wallet.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-WALLET "1" "January 2026" "bitcoin-wallet v29.3.0rc1" "User Commands" +.TH BITCOIN-WALLET "1" "January 2026" "bitcoin-wallet v29.3.0rc2" "User Commands" .SH NAME -bitcoin-wallet \- manual page for bitcoin-wallet v29.3.0rc1 +bitcoin-wallet \- manual page for bitcoin-wallet v29.3.0rc2 .SH SYNOPSIS .B bitcoin-wallet [\fI\,options\/\fR] \fI\,\/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-wallet utility version v29.3.0rc1 +Bitcoin Core bitcoin\-wallet utility version v29.3.0rc2 .PP bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files. .PP diff --git a/doc/man/bitcoind.1 b/doc/man/bitcoind.1 index f0005de61c85..bcf235445212 100644 --- a/doc/man/bitcoind.1 +++ b/doc/man/bitcoind.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIND "1" "January 2026" "bitcoind v29.3.0rc1" "User Commands" +.TH BITCOIND "1" "January 2026" "bitcoind v29.3.0rc2" "User Commands" .SH NAME -bitcoind \- manual page for bitcoind v29.3.0rc1 +bitcoind \- manual page for bitcoind v29.3.0rc2 .SH SYNOPSIS .B bitcoind [\fI\,options\/\fR] .SH DESCRIPTION -Bitcoin Core daemon version v29.3.0rc1 +Bitcoin Core daemon version v29.3.0rc2 .PP The Bitcoin Core daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses. .PP From cc0b54458c2b9c3c8cfe4df60f94d45afaa92603 Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Sat, 7 Feb 2026 02:37:42 +1000 Subject: [PATCH 114/115] validation: Report script-failure errors more consistently. Previously we returned either `mempool-script-verify-flag-failed` or `mandatory-script-verify-flag-failed` depending on whether any policy flags were set (that is, flags not included in MANDATORY_SCRIPT_VERIFY_FLAGS). This causes problems when we add new flags for future soft-forks, as we don't add them to the MANDATORY set immediately, so after they activate, block verification returns `mempool-` errors. --- src/test/txvalidationcache_tests.cpp | 3 ++- src/validation.cpp | 20 ++++++++++++-------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp index 46cb84824d78..581d98db9d70 100644 --- a/src/test/txvalidationcache_tests.cpp +++ b/src/test/txvalidationcache_tests.cpp @@ -24,7 +24,8 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, script_verify_flags flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, ValidationCache& validation_cache, - std::vector* pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + std::vector* pvChecks, + bool is_consensus = false) EXCLUSIVE_LOCKS_REQUIRED(cs_main); BOOST_AUTO_TEST_SUITE(txvalidationcache_tests) diff --git a/src/validation.cpp b/src/validation.cpp index 46428f30333a..523208d80f4e 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -141,7 +141,8 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, script_verify_flags flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, ValidationCache& validation_cache, - std::vector* pvChecks = nullptr) + std::vector* pvChecks = nullptr, + bool is_consensus = false) EXCLUSIVE_LOCKS_REQUIRED(cs_main); bool CheckFinalTxAtTip(const CBlockIndex& active_chain_tip, const CTransaction& tx) @@ -427,7 +428,7 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationS } // Call CheckInputScripts() to cache signature and script validity against current tip consensus rules. - return CheckInputScripts(tx, state, view, flags, /* cacheSigStore= */ true, /* cacheFullScriptStore= */ true, txdata, validation_cache); + return CheckInputScripts(tx, state, view, flags, /*cacheSigStore=*/ true, /*cacheFullScriptStore=*/ true, txdata, validation_cache, /*pvChecks=*/nullptr, /*is_consensus=*/true); } namespace { @@ -1249,7 +1250,7 @@ bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws) // Check input scripts and signatures. // This is done last to help prevent CPU exhaustion denial-of-service attacks. - if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, ws.m_precomputed_txdata, GetValidationCache())) { + if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, ws.m_precomputed_txdata, GetValidationCache(), /*pvChecks=*/nullptr, /*is_consensus=*/false)) { // Detect a failure due to a missing witness so that p2p code can handle rejection caching appropriately. if (!tx.HasWitness() && SpendsNonAnchorWitnessProg(tx, m_view)) { state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED, @@ -2173,8 +2174,11 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, script_verify_flags flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, ValidationCache& validation_cache, - std::vector* pvChecks) + std::vector* pvChecks, + bool is_consensus) { + if ((flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) == 0) is_consensus = true; + if (tx.IsCoinBase()) return true; if (pvChecks) { @@ -2227,10 +2231,10 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, // non-standard DER encodings or non-null dummy // arguments) or due to new consensus rules introduced in // soft forks. - if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { - return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("mempool-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second); - } else { + if (is_consensus) { return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second); + } else { + return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("mempool-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second); } } } @@ -2665,7 +2669,7 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, std::vector vChecks; bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */ TxValidationState tx_state; - if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], m_chainman.m_validation_cache, parallel_script_checks ? &vChecks : nullptr)) { + if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], m_chainman.m_validation_cache, parallel_script_checks ? &vChecks : nullptr, /*is_consensus=*/true)) { // Any transaction validation failure in ConnectBlock is a block consensus failure state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(), tx_state.GetDebugMessage()); From dda8da647888578185abbd2509186dcea95bddf5 Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Sat, 7 Feb 2026 04:45:02 +1000 Subject: [PATCH 115/115] tests: Update CAT and CTV tests for updated script-verify failure messages --- .../functional/feature_checktemplateverify.py | 26 +++++++++++-------- test/functional/feature_opcat.py | 8 +++--- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/test/functional/feature_checktemplateverify.py b/test/functional/feature_checktemplateverify.py index 65fec6b5a8f6..7d535a9ea29d 100755 --- a/test/functional/feature_checktemplateverify.py +++ b/test/functional/feature_checktemplateverify.py @@ -41,12 +41,16 @@ from io import BytesIO from test_framework.address import script_to_p2sh -CHECKTEMPLATEVERIFY_ERROR = "non-mandatory-script-verify-flag (Script failed an OP_CHECKTEMPLATEVERIFY operation)" -DISCOURAGED_ERROR = ( - "non-mandatory-script-verify-flag (NOPx reserved for soft-fork upgrades)" +CHECKTEMPLATEVERIFY_BLOCK_ERROR = "mandatory-script-verify-flag-failed (Script failed an OP_CHECKTEMPLATEVERIFY operation)" +CHECKTEMPLATEVERIFY_MEMPOOL_ERROR = "mempool-script-verify-flag-failed (Script failed an OP_CHECKTEMPLATEVERIFY operation)" +DISCOURAGED_MEMPOOL_ERROR = ( + "mempool-script-verify-flag-failed (NOPx reserved for soft-fork upgrades)" ) -STACK_TOO_SHORT_ERROR = ( - "non-mandatory-script-verify-flag (Operation not valid with the current stack size)" +STACK_TOO_SHORT_BLOCK_ERROR = ( + "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)" +) +STACK_TOO_SHORT_MEMPOOL_ERROR = ( + "mempool-script-verify-flag-failed (Operation not valid with the current stack size)" ) @@ -152,7 +156,7 @@ def add_block(self, txs): assert_equal(self.nodes[0].getbestblockhash(), h) return h - def fail_block(self, txs, cause=CHECKTEMPLATEVERIFY_ERROR): + def fail_block(self, txs, cause=CHECKTEMPLATEVERIFY_BLOCK_ERROR): block, h = self.get_block(txs) assert_equal(self.nodes[0].submitblock(block), cause) assert_equal(self.nodes[0].getbestblockhash(), self.tip) @@ -501,7 +505,7 @@ def run_test(self): assert_raises_rpc_error( -26, - DISCOURAGED_ERROR, + DISCOURAGED_MEMPOOL_ERROR, self.nodes[0].sendrawtransaction, check_template_verify_tx_wrongsize_stack.serialize().hex(), ) @@ -533,7 +537,7 @@ def run_test(self): assert_raises_rpc_error( -26, - STACK_TOO_SHORT_ERROR, + STACK_TOO_SHORT_MEMPOOL_ERROR, self.nodes[0].sendrawtransaction, check_template_verify_tx_empty_stack.serialize().hex(), ) @@ -543,7 +547,7 @@ def run_test(self): ) # Now we verify that a block with this transaction is invalid - self.fail_block([check_template_verify_tx_empty_stack], STACK_TOO_SHORT_ERROR) + self.fail_block([check_template_verify_tx_empty_stack], STACK_TOO_SHORT_BLOCK_ERROR) self.log.info( "Segwit OP_CHECKTEMPLATEVERIFY with wrong size stack spend rejected from block" ) @@ -558,7 +562,7 @@ def run_test(self): ] assert_raises_rpc_error( -26, - DISCOURAGED_ERROR, + DISCOURAGED_MEMPOOL_ERROR, self.nodes[0].sendrawtransaction, check_template_verify_tx_empty_stack.serialize().hex(), ) @@ -631,7 +635,7 @@ def run_test(self): assert_raises_rpc_error( -26, - CHECKTEMPLATEVERIFY_ERROR, + CHECKTEMPLATEVERIFY_MEMPOOL_ERROR, self.nodes[0].sendrawtransaction, p2sh_check_template_verify_tx.serialize().hex(), ) diff --git a/test/functional/feature_opcat.py b/test/functional/feature_opcat.py index c61da51b0a59..8e5dfb81db3a 100755 --- a/test/functional/feature_opcat.py +++ b/test/functional/feature_opcat.py @@ -39,16 +39,16 @@ from test_framework.address import script_to_p2sh DISCOURAGED_ERROR = ( - "non-mandatory-script-verify-flag (NOPx reserved for soft-fork upgrades)" + "mempool-script-verify-flag-failed (NOPx reserved for soft-fork upgrades)" ) STACK_TOO_SHORT_ERROR = ( - "non-mandatory-script-verify-flag (Operation not valid with the current stack size)" + "mempool-script-verify-flag-failed (Operation not valid with the current stack size)" ) DISABLED_OP_CODE = ( - "mandatory-script-verify-flag-failed (Attempted to use a disabled opcode)" + "mempool-script-verify-flag-failed (Attempted to use a disabled opcode)" ) MAX_PUSH_ERROR = ( - "non-mandatory-script-verify-flag (Push value size limit exceeded)" + "mempool-script-verify-flag-failed (Push value size limit exceeded)" ) def random_bytes(n):