mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-01-09 11:27:28 -03:00
Compare commits
95 commits
7de205d2c1
...
7c123c08dd
Author | SHA1 | Date | |
---|---|---|---|
|
7c123c08dd | ||
|
433412fd84 | ||
|
c506f2cee7 | ||
|
41a2ce9b7d | ||
|
6475849c40 | ||
|
49fc2258cf | ||
|
ac918c7cc0 | ||
|
a0f0c48ae2 | ||
|
558783625c | ||
|
3e936789b1 | ||
|
5af642bf48 | ||
|
29bca9713d | ||
|
4036ee3f2b | ||
|
6aa0e70ccb | ||
|
3e0a992a3f | ||
|
604bf2ea37 | ||
|
04249682e3 | ||
|
fa0411ee30 | ||
|
2bdaf52ed1 | ||
|
34e8ee23b8 | ||
|
228aba2c4d | ||
|
9b9752217f | ||
|
87c9ebd889 | ||
|
df5c643f92 | ||
|
fa3de038f7 | ||
|
ba0cb7d5a5 | ||
|
69e35f5c60 | ||
|
17db84dbb8 | ||
|
e6f14241f6 | ||
|
a137b0bd6b | ||
|
67bfe28995 | ||
|
ad174c2817 | ||
|
b29d68f942 | ||
|
9355578a77 | ||
|
f95fb79372 | ||
|
bc43ecaf6d | ||
|
226d03dd61 | ||
|
fa63b8232f | ||
|
fa62c8b1f0 | ||
|
366ae00b77 | ||
|
e366408590 | ||
|
5709718b83 | ||
|
b0b8d96d93 | ||
|
fc7b214847 | ||
|
273440d5c9 | ||
|
4cdf50c4ba | ||
|
faf7eac364 | ||
|
fafa9cc7a5 | ||
|
fa044857ca | ||
|
63b6b638aa | ||
|
ecaa786cc1 | ||
|
e196190a28 | ||
|
bb57017b29 | ||
|
5bbbc0d0ee | ||
|
d9d5bc2e74 | ||
|
fa494a1d53 | ||
|
faaf4800aa | ||
|
faa5391f77 | ||
|
fa86223475 | ||
|
faae6fa5f6 | ||
|
facc4f120b | ||
|
fac3a782ea | ||
|
c1252b14d7 | ||
|
be1a2e5dfb | ||
|
fa0c473d4c | ||
|
ea53568a06 | ||
|
b8710201fb | ||
|
23b8a424fb | ||
|
0a76c292ac | ||
|
fadd568931 | ||
|
fa83bec78e | ||
|
4f06ae05ed | ||
|
366fbf152c | ||
|
c991cea1a0 | ||
|
9a47852d88 | ||
|
bfc4e029d4 | ||
|
81cea5d4ee | ||
|
e058544d0e | ||
|
b9766c9977 | ||
|
e8f0e6efaf | ||
|
facb4d010c | ||
|
fa0998f0a0 | ||
|
fa9aacf614 | ||
|
fa397177ac | ||
|
b6f0593f43 | ||
|
f9cac63523 | ||
|
fa6e599cf9 | ||
|
f9650e18ea | ||
|
221c789e91 | ||
|
06443b8f28 | ||
|
1d01ad4d73 | ||
|
937ef9eb40 | ||
|
ad224429f8 | ||
|
a2c45ae548 | ||
|
e56fc7ce6a |
85 changed files with 1021 additions and 632 deletions
|
@ -71,7 +71,6 @@
|
|||
"BUILD_GUI_TESTS": "ON",
|
||||
"BUILD_KERNEL_LIB": "ON",
|
||||
"BUILD_SHARED_LIBS": "ON",
|
||||
"BUILD_TESTING": "ON",
|
||||
"BUILD_TESTS": "ON",
|
||||
"BUILD_TX": "ON",
|
||||
"BUILD_UTIL": "ON",
|
||||
|
|
|
@ -27,4 +27,3 @@ export BITCOIN_CONFIG="\
|
|||
-DAPPEND_CPPFLAGS='-U_FORTIFY_SOURCE' \
|
||||
"
|
||||
export USE_MEMORY_SANITIZER="true"
|
||||
export RUN_FUNCTIONAL_TESTS="false"
|
||||
|
|
|
@ -49,7 +49,7 @@ if [ -n "$PIP_PACKAGES" ]; then
|
|||
fi
|
||||
|
||||
if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then
|
||||
${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-19.1.0" /msan/llvm-project
|
||||
${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-19.1.6" /msan/llvm-project
|
||||
|
||||
cmake -G Ninja -B /msan/clang_build/ \
|
||||
-DLLVM_ENABLE_PROJECTS="clang" \
|
||||
|
|
|
@ -6,7 +6,7 @@ cmake_path(GET JSON_SOURCE_PATH STEM json_source_basename)
|
|||
|
||||
file(READ ${JSON_SOURCE_PATH} hex_content HEX)
|
||||
string(REGEX REPLACE "................" "\\0\n" formatted_bytes "${hex_content}")
|
||||
string(REGEX REPLACE "[^\n][^\n]" "0x\\0, " formatted_bytes "${formatted_bytes}")
|
||||
string(REGEX REPLACE "[^\n][^\n]" "'\\\\x\\0'," formatted_bytes "${formatted_bytes}")
|
||||
|
||||
set(header_content
|
||||
"#include <string_view>
|
||||
|
|
|
@ -6,7 +6,7 @@ cmake_path(GET RAW_SOURCE_PATH STEM raw_source_basename)
|
|||
|
||||
file(READ ${RAW_SOURCE_PATH} hex_content HEX)
|
||||
string(REGEX REPLACE "................" "\\0\n" formatted_bytes "${hex_content}")
|
||||
string(REGEX REPLACE "[^\n][^\n]" "std::byte{0x\\0}, " formatted_bytes "${formatted_bytes}")
|
||||
string(REGEX REPLACE "[^\n][^\n]" "std::byte{0x\\0}," formatted_bytes "${formatted_bytes}")
|
||||
|
||||
set(header_content
|
||||
"#include <cstddef>
|
||||
|
|
|
@ -451,7 +451,7 @@ inspecting signatures in Mach-O binaries.")
|
|||
#t))))))))
|
||||
|
||||
(define-public glibc-2.31
|
||||
(let ((commit "8e30f03744837a85e33d84ccd34ed3abe30d37c3"))
|
||||
(let ((commit "7b27c450c34563a28e634cccb399cd415e71ebfe"))
|
||||
(package
|
||||
(inherit glibc) ;; 2.35
|
||||
(version "2.31")
|
||||
|
@ -463,7 +463,7 @@ inspecting signatures in Mach-O binaries.")
|
|||
(file-name (git-file-name "glibc" commit))
|
||||
(sha256
|
||||
(base32
|
||||
"1zi0s9yy5zkisw823vivn7zlj8w6g9p3mm7lmlqiixcxdkz4dbn6"))
|
||||
"017qdpr5id7ddb4lpkzj2li1abvw916m3fc6n7nw28z4h5qbv2n0"))
|
||||
(patches (search-our-patches "glibc-guix-prefix.patch"))))
|
||||
(arguments
|
||||
(substitute-keyword-arguments (package-arguments glibc)
|
||||
|
@ -474,6 +474,8 @@ inspecting signatures in Mach-O binaries.")
|
|||
"--enable-cet",
|
||||
"--enable-bind-now",
|
||||
"--disable-werror",
|
||||
"--disable-timezone-tools",
|
||||
"--disable-profile",
|
||||
building-on)))
|
||||
((#:phases phases)
|
||||
`(modify-phases ,phases
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
package=native_capnp
|
||||
$(package)_version=1.0.2
|
||||
$(package)_version=1.1.0
|
||||
$(package)_download_path=https://capnproto.org/
|
||||
$(package)_download_file=capnproto-c++-$($(package)_version).tar.gz
|
||||
$(package)_file_name=capnproto-cxx-$($(package)_version).tar.gz
|
||||
$(package)_sha256_hash=9057dbc0223366b74bbeca33a05de164a229b0377927f1b7ef3828cdd8cb1d7e
|
||||
$(package)_sha256_hash=07167580e563f5e821e3b2af1c238c16ec7181612650c5901330fa9a0da50939
|
||||
|
||||
define $(package)_set_vars
|
||||
$(package)_config_opts := -DBUILD_TESTING=OFF
|
||||
|
|
|
@ -96,7 +96,7 @@ There is an included test suite that is useful for testing code changes when dev
|
|||
To run the test suite (recommended), you will need to have Python 3 installed:
|
||||
|
||||
```bash
|
||||
pkg install python3 databases/py-sqlite3
|
||||
pkg install python3 databases/py-sqlite3 net/py-pyzmq
|
||||
```
|
||||
---
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# NetBSD Build Guide
|
||||
|
||||
**Updated for NetBSD [10.0](https://netbsd.org/releases/formal-10/NetBSD-10.0.html)**
|
||||
**Updated for NetBSD [10.1](https://netbsd.org/releases/formal-10/NetBSD-10.1.html)**
|
||||
|
||||
This guide describes how to build bitcoind, command-line utilities, and GUI on NetBSD.
|
||||
|
||||
|
@ -83,6 +83,13 @@ pkgin install qrencode
|
|||
|
||||
Otherwise, if you don't need QR encoding support, use the `-DWITH_QRENCODE=OFF` option to disable this feature in order to compile the GUI.
|
||||
|
||||
#### Notifications
|
||||
###### ZeroMQ
|
||||
|
||||
Bitcoin Core can provide notifications via ZeroMQ. If the package is installed, support will be compiled in.
|
||||
```bash
|
||||
pkgin zeromq
|
||||
```
|
||||
|
||||
#### Test Suite Dependencies
|
||||
|
||||
|
@ -90,10 +97,10 @@ There is an included test suite that is useful for testing code changes when dev
|
|||
To run the test suite (recommended), you will need to have Python 3 installed:
|
||||
|
||||
```bash
|
||||
pkgin install python39
|
||||
pkgin install python310 py310-zmq
|
||||
```
|
||||
|
||||
### Building Bitcoin Core
|
||||
## Building Bitcoin Core
|
||||
|
||||
### 1. Configuration
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# OpenBSD Build Guide
|
||||
|
||||
**Updated for OpenBSD [7.5](https://www.openbsd.org/75.html)**
|
||||
**Updated for OpenBSD [7.6](https://www.openbsd.org/76.html)**
|
||||
|
||||
This guide describes how to build bitcoind, command-line utilities, and GUI on OpenBSD.
|
||||
|
||||
|
@ -90,7 +90,7 @@ There is an included test suite that is useful for testing code changes when dev
|
|||
To run the test suite (recommended), you will need to have Python 3 installed:
|
||||
|
||||
```bash
|
||||
pkg_add python # Select the newest version of the package.
|
||||
pkg_add python py3-zmq # Select the newest version of the python package if necessary.
|
||||
```
|
||||
|
||||
## Building Bitcoin Core
|
||||
|
|
2
doc/release-notes-28121.md
Normal file
2
doc/release-notes-28121.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
The RPC `testmempoolaccept` response now includes a "reject-details" field in some cases,
|
||||
similar to the complete error messages returned by `sendrawtransaction` (#28121)
|
|
@ -26,6 +26,7 @@ class base_uint
|
|||
protected:
|
||||
static_assert(BITS / 32 > 0 && BITS % 32 == 0, "Template parameter BITS must be a positive multiple of 32.");
|
||||
static constexpr int WIDTH = BITS / 32;
|
||||
/** Big integer represented with 32-bit digits, least-significant first. */
|
||||
uint32_t pn[WIDTH];
|
||||
public:
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@ std::string EncodeBase58Check(Span<const unsigned char> input)
|
|||
// add 4-byte hash check to the end
|
||||
std::vector<unsigned char> vch(input.begin(), input.end());
|
||||
uint256 hash = Hash(vch);
|
||||
vch.insert(vch.end(), (unsigned char*)&hash, (unsigned char*)&hash + 4);
|
||||
vch.insert(vch.end(), hash.data(), hash.data() + 4);
|
||||
return EncodeBase58(vch);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (c) 2017-2022 The Bitcoin Core developers
|
||||
// Copyright (c) 2017-present The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
|
@ -25,14 +25,14 @@
|
|||
void ChaCha20Aligned::SetKey(Span<const std::byte> key) noexcept
|
||||
{
|
||||
assert(key.size() == KEYLEN);
|
||||
input[0] = ReadLE32(UCharCast(key.data() + 0));
|
||||
input[1] = ReadLE32(UCharCast(key.data() + 4));
|
||||
input[2] = ReadLE32(UCharCast(key.data() + 8));
|
||||
input[3] = ReadLE32(UCharCast(key.data() + 12));
|
||||
input[4] = ReadLE32(UCharCast(key.data() + 16));
|
||||
input[5] = ReadLE32(UCharCast(key.data() + 20));
|
||||
input[6] = ReadLE32(UCharCast(key.data() + 24));
|
||||
input[7] = ReadLE32(UCharCast(key.data() + 28));
|
||||
input[0] = ReadLE32(key.data() + 0);
|
||||
input[1] = ReadLE32(key.data() + 4);
|
||||
input[2] = ReadLE32(key.data() + 8);
|
||||
input[3] = ReadLE32(key.data() + 12);
|
||||
input[4] = ReadLE32(key.data() + 16);
|
||||
input[5] = ReadLE32(key.data() + 20);
|
||||
input[6] = ReadLE32(key.data() + 24);
|
||||
input[7] = ReadLE32(key.data() + 28);
|
||||
input[8] = 0;
|
||||
input[9] = 0;
|
||||
input[10] = 0;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (c) 2014-2020 The Bitcoin Core developers
|
||||
// Copyright (c) 2014-present The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
|
@ -7,82 +7,99 @@
|
|||
|
||||
#include <compat/endian.h>
|
||||
|
||||
#include <concepts>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
|
||||
uint16_t static inline ReadLE16(const unsigned char* ptr)
|
||||
template <typename B>
|
||||
concept ByteType = std::same_as<B, unsigned char> || std::same_as<B, std::byte>;
|
||||
|
||||
template <ByteType B>
|
||||
inline uint16_t ReadLE16(const B* ptr)
|
||||
{
|
||||
uint16_t x;
|
||||
memcpy(&x, ptr, 2);
|
||||
return le16toh_internal(x);
|
||||
}
|
||||
|
||||
uint32_t static inline ReadLE32(const unsigned char* ptr)
|
||||
template <ByteType B>
|
||||
inline uint32_t ReadLE32(const B* ptr)
|
||||
{
|
||||
uint32_t x;
|
||||
memcpy(&x, ptr, 4);
|
||||
return le32toh_internal(x);
|
||||
}
|
||||
|
||||
uint64_t static inline ReadLE64(const unsigned char* ptr)
|
||||
template <ByteType B>
|
||||
inline uint64_t ReadLE64(const B* ptr)
|
||||
{
|
||||
uint64_t x;
|
||||
memcpy(&x, ptr, 8);
|
||||
return le64toh_internal(x);
|
||||
}
|
||||
|
||||
void static inline WriteLE16(unsigned char* ptr, uint16_t x)
|
||||
template <ByteType B>
|
||||
inline void WriteLE16(B* ptr, uint16_t x)
|
||||
{
|
||||
uint16_t v = htole16_internal(x);
|
||||
memcpy(ptr, &v, 2);
|
||||
}
|
||||
|
||||
void static inline WriteLE32(unsigned char* ptr, uint32_t x)
|
||||
template <ByteType B>
|
||||
inline void WriteLE32(B* ptr, uint32_t x)
|
||||
{
|
||||
uint32_t v = htole32_internal(x);
|
||||
memcpy(ptr, &v, 4);
|
||||
}
|
||||
|
||||
void static inline WriteLE64(unsigned char* ptr, uint64_t x)
|
||||
template <ByteType B>
|
||||
inline void WriteLE64(B* ptr, uint64_t x)
|
||||
{
|
||||
uint64_t v = htole64_internal(x);
|
||||
memcpy(ptr, &v, 8);
|
||||
}
|
||||
|
||||
uint16_t static inline ReadBE16(const unsigned char* ptr)
|
||||
template <ByteType B>
|
||||
inline uint16_t ReadBE16(const B* ptr)
|
||||
{
|
||||
uint16_t x;
|
||||
memcpy(&x, ptr, 2);
|
||||
return be16toh_internal(x);
|
||||
}
|
||||
|
||||
uint32_t static inline ReadBE32(const unsigned char* ptr)
|
||||
template <ByteType B>
|
||||
inline uint32_t ReadBE32(const B* ptr)
|
||||
{
|
||||
uint32_t x;
|
||||
memcpy(&x, ptr, 4);
|
||||
return be32toh_internal(x);
|
||||
}
|
||||
|
||||
uint64_t static inline ReadBE64(const unsigned char* ptr)
|
||||
template <ByteType B>
|
||||
inline uint64_t ReadBE64(const B* ptr)
|
||||
{
|
||||
uint64_t x;
|
||||
memcpy(&x, ptr, 8);
|
||||
return be64toh_internal(x);
|
||||
}
|
||||
|
||||
void static inline WriteBE16(unsigned char* ptr, uint16_t x)
|
||||
template <ByteType B>
|
||||
inline void WriteBE16(B* ptr, uint16_t x)
|
||||
{
|
||||
uint16_t v = htobe16_internal(x);
|
||||
memcpy(ptr, &v, 2);
|
||||
}
|
||||
|
||||
void static inline WriteBE32(unsigned char* ptr, uint32_t x)
|
||||
template <ByteType B>
|
||||
inline void WriteBE32(B* ptr, uint32_t x)
|
||||
{
|
||||
uint32_t v = htobe32_internal(x);
|
||||
memcpy(ptr, &v, 4);
|
||||
}
|
||||
|
||||
void static inline WriteBE64(unsigned char* ptr, uint64_t x)
|
||||
template <ByteType B>
|
||||
inline void WriteBE64(B* ptr, uint64_t x)
|
||||
{
|
||||
uint64_t v = htobe64_internal(x);
|
||||
memcpy(ptr, &v, 8);
|
||||
|
|
|
@ -14,8 +14,17 @@ namespace util {
|
|||
class SignalInterrupt;
|
||||
} // namespace util
|
||||
|
||||
static const int DEFAULT_HTTP_THREADS=4;
|
||||
static const int DEFAULT_HTTP_WORKQUEUE=16;
|
||||
/**
|
||||
* The default value for `-rpcthreads`. This number of threads will be created at startup.
|
||||
*/
|
||||
static const int DEFAULT_HTTP_THREADS=16;
|
||||
|
||||
/**
|
||||
* The default value for `-rpcworkqueue`. This is the maximum depth of the work queue,
|
||||
* we don't allocate this number of work queue items upfront.
|
||||
*/
|
||||
static const int DEFAULT_HTTP_WORKQUEUE=64;
|
||||
|
||||
static const int DEFAULT_HTTP_SERVER_TIMEOUT=30;
|
||||
|
||||
struct evhttp_request;
|
||||
|
|
|
@ -665,7 +665,7 @@ void SetupServerArgs(ArgsManager& argsman, bool can_listen_ipc)
|
|||
argsman.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
|
||||
argsman.AddArg("-rpcwhitelist=<whitelist>", "Set a whitelist to filter incoming RPC calls for a specific user. The field <whitelist> comes in the format: <USERNAME>:<rpc 1>,<rpc 2>,...,<rpc n>. If multiple whitelists are set for a given user, they are set-intersected. See -rpcwhitelistdefault documentation for information on default whitelist behavior.", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
|
||||
argsman.AddArg("-rpcwhitelistdefault", "Sets default behavior for rpc whitelisting. Unless rpcwhitelistdefault is set to 0, if any -rpcwhitelist is set, the rpc server acts as if all rpc users are subject to empty-unless-otherwise-specified whitelists. If rpcwhitelistdefault is set to 1 and no -rpcwhitelist is set, rpc server acts as if all rpc users are subject to empty whitelists.", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
|
||||
argsman.AddArg("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
|
||||
argsman.AddArg("-rpcworkqueue=<n>", strprintf("Set the maximum depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
|
||||
argsman.AddArg("-server", "Accept command line and JSON-RPC commands", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
|
||||
if (can_listen_ipc) {
|
||||
argsman.AddArg("-ipcbind=<address>", "Bind to Unix socket address and listen for incoming connections. Valid address values are \"unix\" to listen on the default path, <datadir>/node.sock, or \"unix:/custom/path\" to specify a custom path. Can be specified multiple times to listen on multiple paths. Default behavior is not to listen on any path. If relative paths are specified, they are interpreted relative to the network data directory. If paths include any parent directory components and the parent directories do not exist, they will be created.", ArgsManager::ALLOW_ANY, OptionsCategory::IPC);
|
||||
|
@ -1807,7 +1807,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
|
|||
{
|
||||
WAIT_LOCK(kernel_notifications.m_tip_block_mutex, lock);
|
||||
kernel_notifications.m_tip_block_cv.wait(lock, [&]() EXCLUSIVE_LOCKS_REQUIRED(kernel_notifications.m_tip_block_mutex) {
|
||||
return !kernel_notifications.m_tip_block.IsNull() || ShutdownRequested(node);
|
||||
return kernel_notifications.TipBlock() || ShutdownRequested(node);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -1827,7 +1827,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
|
|||
if (tip_info) {
|
||||
tip_info->block_height = chain_active_height;
|
||||
tip_info->block_time = best_block_time;
|
||||
tip_info->verification_progress = GuessVerificationProgress(chainman.GetParams().TxData(), &tip);
|
||||
tip_info->verification_progress = chainman.GuessVerificationProgress(&tip);
|
||||
}
|
||||
if (tip_info && chainman.m_best_header) {
|
||||
tip_info->header_height = chainman.m_best_header->nHeight;
|
||||
|
|
|
@ -93,31 +93,6 @@ public:
|
|||
*/
|
||||
virtual std::unique_ptr<BlockTemplate> createNewBlock(const node::BlockCreateOptions& options = {}) = 0;
|
||||
|
||||
/**
|
||||
* Processes new block. A valid new block is automatically relayed to peers.
|
||||
*
|
||||
* @param[in] block The block we want to process.
|
||||
* @param[out] new_block A boolean which is set to indicate if the block was first received via this call
|
||||
* @returns If the block was processed, independently of block validity
|
||||
*/
|
||||
virtual bool processNewBlock(const std::shared_ptr<const CBlock>& block, bool* new_block) = 0;
|
||||
|
||||
//! Return the number of transaction updates in the mempool,
|
||||
//! used to decide whether to make a new block template.
|
||||
virtual unsigned int getTransactionsUpdated() = 0;
|
||||
|
||||
/**
|
||||
* Check a block is completely valid from start to finish.
|
||||
* Only works on top of our current best block.
|
||||
* Does not check proof-of-work.
|
||||
*
|
||||
* @param[in] block the block to validate
|
||||
* @param[in] check_merkle_root call CheckMerkleRoot()
|
||||
* @param[out] state details of why a block failed to validate
|
||||
* @returns false if it does not build on the current tip, or any of the checks fail
|
||||
*/
|
||||
virtual bool testBlockValidity(const CBlock& block, bool check_merkle_root, BlockValidationState& state) = 0;
|
||||
|
||||
//! Get internal node context. Useful for RPC and testing,
|
||||
//! but not accessible across processes.
|
||||
virtual node::NodeContext* context() { return nullptr; }
|
||||
|
|
|
@ -18,9 +18,6 @@ interface Mining $Proxy.wrap("interfaces::Mining") {
|
|||
getTip @2 (context :Proxy.Context) -> (result: Common.BlockRef, hasResult: Bool);
|
||||
waitTipChanged @3 (context :Proxy.Context, currentTip: Data, timeout: Float64) -> (result: Common.BlockRef);
|
||||
createNewBlock @4 (options: BlockCreateOptions) -> (result: BlockTemplate);
|
||||
processNewBlock @5 (context :Proxy.Context, block: Data) -> (newBlock: Bool, result: Bool);
|
||||
getTransactionsUpdated @6 (context :Proxy.Context) -> (result: UInt32);
|
||||
testBlockValidity @7 (context :Proxy.Context, block: Data, checkMerkleRoot: Bool) -> (state: BlockValidationState, result: Bool);
|
||||
}
|
||||
|
||||
interface BlockTemplate $Proxy.wrap("interfaces::BlockTemplate") {
|
||||
|
|
55
src/net.cpp
55
src/net.cpp
|
@ -558,7 +558,6 @@ void CNode::CloseSocketDisconnect()
|
|||
fDisconnect = true;
|
||||
LOCK(m_sock_mutex);
|
||||
if (m_sock) {
|
||||
LogDebug(BCLog::NET, "disconnecting peer=%d\n", id);
|
||||
m_sock.reset();
|
||||
}
|
||||
m_i2p_sam_session.reset();
|
||||
|
@ -696,6 +695,18 @@ bool CNode::ReceiveMsgBytes(Span<const uint8_t> msg_bytes, bool& complete)
|
|||
return true;
|
||||
}
|
||||
|
||||
std::string CNode::LogIP(bool log_ip) const
|
||||
{
|
||||
return log_ip ? strprintf(" peeraddr=%s", addr.ToStringAddrPort()) : "";
|
||||
}
|
||||
|
||||
std::string CNode::DisconnectMsg(bool log_ip) const
|
||||
{
|
||||
return strprintf("disconnecting peer=%d%s",
|
||||
GetId(),
|
||||
LogIP(log_ip));
|
||||
}
|
||||
|
||||
V1Transport::V1Transport(const NodeId node_id) noexcept
|
||||
: m_magic_bytes{Params().MessageStart()}, m_node_id{node_id}
|
||||
{
|
||||
|
@ -1635,7 +1646,7 @@ std::pair<size_t, bool> CConnman::SocketSendData(CNode& node) const
|
|||
// error
|
||||
int nErr = WSAGetLastError();
|
||||
if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) {
|
||||
LogDebug(BCLog::NET, "socket send error for peer=%d: %s\n", node.GetId(), NetworkErrorString(nErr));
|
||||
LogDebug(BCLog::NET, "socket send error, %s: %s\n", node.DisconnectMsg(fLogIPs), NetworkErrorString(nErr));
|
||||
node.CloseSocketDisconnect();
|
||||
}
|
||||
}
|
||||
|
@ -1879,7 +1890,7 @@ void CConnman::DisconnectNodes()
|
|||
// Disconnect any connected nodes
|
||||
for (CNode* pnode : m_nodes) {
|
||||
if (!pnode->fDisconnect) {
|
||||
LogDebug(BCLog::NET, "Network not active, dropping peer=%d\n", pnode->GetId());
|
||||
LogDebug(BCLog::NET, "Network not active, %s\n", pnode->DisconnectMsg(fLogIPs));
|
||||
pnode->fDisconnect = true;
|
||||
}
|
||||
}
|
||||
|
@ -1971,26 +1982,43 @@ bool CConnman::InactivityCheck(const CNode& node) const
|
|||
|
||||
if (!ShouldRunInactivityChecks(node, now)) return false;
|
||||
|
||||
if (last_recv.count() == 0 || last_send.count() == 0) {
|
||||
LogDebug(BCLog::NET, "socket no message in first %i seconds, %d %d peer=%d\n", count_seconds(m_peer_connect_timeout), last_recv.count() != 0, last_send.count() != 0, node.GetId());
|
||||
bool has_received{last_recv.count() != 0};
|
||||
bool has_sent{last_send.count() != 0};
|
||||
|
||||
if (!has_received || !has_sent) {
|
||||
std::string has_never;
|
||||
if (!has_received) has_never += ", never received from peer";
|
||||
if (!has_sent) has_never += ", never sent to peer";
|
||||
LogDebug(BCLog::NET,
|
||||
"socket no message in first %i seconds%s, %s\n",
|
||||
count_seconds(m_peer_connect_timeout),
|
||||
has_never,
|
||||
node.DisconnectMsg(fLogIPs)
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (now > last_send + TIMEOUT_INTERVAL) {
|
||||
LogDebug(BCLog::NET, "socket sending timeout: %is peer=%d\n", count_seconds(now - last_send), node.GetId());
|
||||
LogDebug(BCLog::NET,
|
||||
"socket sending timeout: %is, %s\n", count_seconds(now - last_send),
|
||||
node.DisconnectMsg(fLogIPs)
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (now > last_recv + TIMEOUT_INTERVAL) {
|
||||
LogDebug(BCLog::NET, "socket receive timeout: %is peer=%d\n", count_seconds(now - last_recv), node.GetId());
|
||||
LogDebug(BCLog::NET,
|
||||
"socket receive timeout: %is, %s\n", count_seconds(now - last_recv),
|
||||
node.DisconnectMsg(fLogIPs)
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!node.fSuccessfullyConnected) {
|
||||
if (node.m_transport->GetInfo().transport_type == TransportProtocolType::DETECTING) {
|
||||
LogDebug(BCLog::NET, "V2 handshake timeout peer=%d\n", node.GetId());
|
||||
LogDebug(BCLog::NET, "V2 handshake timeout, %s\n", node.DisconnectMsg(fLogIPs));
|
||||
} else {
|
||||
LogDebug(BCLog::NET, "version handshake timeout peer=%d\n", node.GetId());
|
||||
LogDebug(BCLog::NET, "version handshake timeout, %s\n", node.DisconnectMsg(fLogIPs));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -2118,6 +2146,10 @@ void CConnman::SocketHandlerConnected(const std::vector<CNode*>& nodes,
|
|||
{
|
||||
bool notify = false;
|
||||
if (!pnode->ReceiveMsgBytes({pchBuf, (size_t)nBytes}, notify)) {
|
||||
LogDebug(BCLog::NET,
|
||||
"receiving message bytes failed, %s\n",
|
||||
pnode->DisconnectMsg(fLogIPs)
|
||||
);
|
||||
pnode->CloseSocketDisconnect();
|
||||
}
|
||||
RecordBytesRecv(nBytes);
|
||||
|
@ -2130,7 +2162,7 @@ void CConnman::SocketHandlerConnected(const std::vector<CNode*>& nodes,
|
|||
{
|
||||
// socket closed gracefully
|
||||
if (!pnode->fDisconnect) {
|
||||
LogDebug(BCLog::NET, "socket closed for peer=%d\n", pnode->GetId());
|
||||
LogDebug(BCLog::NET, "socket closed, %s\n", pnode->DisconnectMsg(fLogIPs));
|
||||
}
|
||||
pnode->CloseSocketDisconnect();
|
||||
}
|
||||
|
@ -2141,7 +2173,7 @@ void CConnman::SocketHandlerConnected(const std::vector<CNode*>& nodes,
|
|||
if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS)
|
||||
{
|
||||
if (!pnode->fDisconnect) {
|
||||
LogDebug(BCLog::NET, "socket recv error for peer=%d: %s\n", pnode->GetId(), NetworkErrorString(nErr));
|
||||
LogDebug(BCLog::NET, "socket recv error, %s: %s\n", pnode->DisconnectMsg(fLogIPs), NetworkErrorString(nErr));
|
||||
}
|
||||
pnode->CloseSocketDisconnect();
|
||||
}
|
||||
|
@ -3411,6 +3443,7 @@ void CConnman::StopNodes()
|
|||
std::vector<CNode*> nodes;
|
||||
WITH_LOCK(m_nodes_mutex, nodes.swap(m_nodes));
|
||||
for (CNode* pnode : nodes) {
|
||||
LogDebug(BCLog::NET, "%s\n", pnode->DisconnectMsg(fLogIPs));
|
||||
pnode->CloseSocketDisconnect();
|
||||
DeleteNode(pnode);
|
||||
}
|
||||
|
|
16
src/net.h
16
src/net.h
|
@ -947,6 +947,22 @@ public:
|
|||
|
||||
std::string ConnectionTypeAsString() const { return ::ConnectionTypeAsString(m_conn_type); }
|
||||
|
||||
/**
|
||||
* Helper function to optionally log the IP address.
|
||||
*
|
||||
* @param[in] log_ip whether to include the IP address
|
||||
* @return " peeraddr=..." or ""
|
||||
*/
|
||||
std::string LogIP(bool log_ip) const;
|
||||
|
||||
/**
|
||||
* Helper function to log disconnects.
|
||||
*
|
||||
* @param[in] log_ip whether to include the IP address
|
||||
* @return "disconnecting peer=..." and optionally "peeraddr=..."
|
||||
*/
|
||||
std::string DisconnectMsg(bool log_ip) const;
|
||||
|
||||
/** A ping-pong round trip has completed successfully. Update latest and minimum ping times. */
|
||||
void PongReceived(std::chrono::microseconds ping_time) {
|
||||
m_last_ping_time = ping_time;
|
||||
|
|
|
@ -2238,7 +2238,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
|
|||
(((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
|
||||
!pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target
|
||||
) {
|
||||
LogDebug(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "historical block serving limit reached, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -2247,7 +2247,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
|
|||
if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && (
|
||||
(((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (tip->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
|
||||
)) {
|
||||
LogDebug(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, disconnect peer=%d\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
//disconnect node and prevent it from stalling (would otherwise wait for the missing block)
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
|
@ -2270,9 +2270,9 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
|
|||
std::vector<uint8_t> block_data;
|
||||
if (!m_chainman.m_blockman.ReadRawBlockFromDisk(block_data, block_pos)) {
|
||||
if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
|
||||
LogDebug(BCLog::NET, "Block was pruned before it could be read, disconnect peer=%s\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
} else {
|
||||
LogError("Cannot load block from disk, disconnect peer=%d\n", pfrom.GetId());
|
||||
LogError("Cannot load block from disk, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
}
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
|
@ -2284,9 +2284,9 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
|
|||
std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
|
||||
if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, block_pos)) {
|
||||
if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
|
||||
LogDebug(BCLog::NET, "Block was pruned before it could be read, disconnect peer=%s\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
} else {
|
||||
LogError("Cannot load block from disk, disconnect peer=%d\n", pfrom.GetId());
|
||||
LogError("Cannot load block from disk, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
}
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
|
@ -2788,7 +2788,7 @@ void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer
|
|||
// the minimum chain work, even if a peer has a chain past our tip,
|
||||
// as an anti-DoS measure.
|
||||
if (pfrom.IsOutboundOrBlockRelayConn()) {
|
||||
LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
|
||||
LogInfo("outbound peer headers chain has insufficient work, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
}
|
||||
}
|
||||
|
@ -3111,8 +3111,8 @@ bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer,
|
|||
(filter_type == BlockFilterType::BASIC &&
|
||||
(peer.m_our_services & NODE_COMPACT_FILTERS));
|
||||
if (!supported_filter_type) {
|
||||
LogDebug(BCLog::NET, "peer %d requested unsupported block filter type: %d\n",
|
||||
node.GetId(), static_cast<uint8_t>(filter_type));
|
||||
LogDebug(BCLog::NET, "peer requested unsupported block filter type: %d, %s\n",
|
||||
static_cast<uint8_t>(filter_type), node.DisconnectMsg(fLogIPs));
|
||||
node.fDisconnect = true;
|
||||
return false;
|
||||
}
|
||||
|
@ -3123,8 +3123,8 @@ bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer,
|
|||
|
||||
// Check that the stop block exists and the peer would be allowed to fetch it.
|
||||
if (!stop_index || !BlockRequestAllowed(stop_index)) {
|
||||
LogDebug(BCLog::NET, "peer %d requested invalid block hash: %s\n",
|
||||
node.GetId(), stop_hash.ToString());
|
||||
LogDebug(BCLog::NET, "peer requested invalid block hash: %s, %s\n",
|
||||
stop_hash.ToString(), node.DisconnectMsg(fLogIPs));
|
||||
node.fDisconnect = true;
|
||||
return false;
|
||||
}
|
||||
|
@ -3132,15 +3132,15 @@ bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer,
|
|||
|
||||
uint32_t stop_height = stop_index->nHeight;
|
||||
if (start_height > stop_height) {
|
||||
LogDebug(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with "
|
||||
"start height %d and stop height %d\n",
|
||||
node.GetId(), start_height, stop_height);
|
||||
LogDebug(BCLog::NET, "peer sent invalid getcfilters/getcfheaders with "
|
||||
"start height %d and stop height %d, %s\n",
|
||||
start_height, stop_height, node.DisconnectMsg(fLogIPs));
|
||||
node.fDisconnect = true;
|
||||
return false;
|
||||
}
|
||||
if (stop_height - start_height >= max_height_diff) {
|
||||
LogDebug(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n",
|
||||
node.GetId(), stop_height - start_height + 1, max_height_diff);
|
||||
LogDebug(BCLog::NET, "peer requested too many cfilters/cfheaders: %d / %d, %s\n",
|
||||
stop_height - start_height + 1, max_height_diff, node.DisconnectMsg(fLogIPs));
|
||||
node.fDisconnect = true;
|
||||
return false;
|
||||
}
|
||||
|
@ -3407,14 +3407,17 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
}
|
||||
if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
|
||||
{
|
||||
LogDebug(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices));
|
||||
LogDebug(BCLog::NET, "peer does not offer the expected services (%08x offered, %08x expected), %s\n",
|
||||
nServices,
|
||||
GetDesirableServiceFlags(nServices),
|
||||
pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (nVersion < MIN_PEER_PROTO_VERSION) {
|
||||
// disconnect from peers older than this proto version
|
||||
LogDebug(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom.GetId(), nVersion);
|
||||
LogDebug(BCLog::NET, "peer using obsolete version %i, %s\n", nVersion, pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -3565,15 +3568,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
m_addrman.Good(pfrom.addr);
|
||||
}
|
||||
|
||||
std::string remoteAddr;
|
||||
if (fLogIPs)
|
||||
remoteAddr = ", peeraddr=" + pfrom.addr.ToStringAddrPort();
|
||||
|
||||
const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
|
||||
LogDebug(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n",
|
||||
cleanSubVer, pfrom.nVersion,
|
||||
peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(),
|
||||
remoteAddr, (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
|
||||
pfrom.LogIP(fLogIPs), (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
|
||||
|
||||
peer->m_time_offset = NodeSeconds{std::chrono::seconds{nTime}} - Now<NodeSeconds>();
|
||||
if (!pfrom.IsInboundConn()) {
|
||||
|
@ -3591,7 +3590,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
|
||||
// Feeler connections exist only to verify if address is online.
|
||||
if (pfrom.IsFeelerConn()) {
|
||||
LogDebug(BCLog::NET, "feeler connection completed peer=%d; disconnecting\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "feeler connection completed, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
}
|
||||
return;
|
||||
|
@ -3617,7 +3616,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
pfrom.ConnectionTypeAsString(),
|
||||
TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type),
|
||||
pfrom.nVersion.load(), peer->m_starting_height,
|
||||
pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToStringAddrPort()) : ""),
|
||||
pfrom.GetId(), pfrom.LogIP(fLogIPs),
|
||||
(mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
|
||||
}
|
||||
|
||||
|
@ -3695,7 +3694,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
if (msg_type == NetMsgType::WTXIDRELAY) {
|
||||
if (pfrom.fSuccessfullyConnected) {
|
||||
// Disconnect peers that send a wtxidrelay message after VERACK.
|
||||
LogDebug(BCLog::NET, "wtxidrelay received after verack from peer=%d; disconnecting\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "wtxidrelay received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -3717,7 +3716,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
if (msg_type == NetMsgType::SENDADDRV2) {
|
||||
if (pfrom.fSuccessfullyConnected) {
|
||||
// Disconnect peers that send a SENDADDRV2 message after VERACK.
|
||||
LogDebug(BCLog::NET, "sendaddrv2 received after verack from peer=%d; disconnecting\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "sendaddrv2 received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -3730,19 +3729,19 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
// from switching announcement protocols after the connection is up.
|
||||
if (msg_type == NetMsgType::SENDTXRCNCL) {
|
||||
if (!m_txreconciliation) {
|
||||
LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId());
|
||||
return;
|
||||
}
|
||||
|
||||
if (pfrom.fSuccessfullyConnected) {
|
||||
LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received after verack from peer=%d; disconnecting\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "sendtxrcncl received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
||||
// Peer must not offer us reconciliations if we specified no tx relay support in VERSION.
|
||||
if (RejectIncomingTxs(pfrom)) {
|
||||
LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d to which we indicated no tx relay; disconnecting\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "sendtxrcncl received to which we indicated no tx relay, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -3752,7 +3751,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
// eliminates them, so that this flag fully represents what we are looking for.
|
||||
const auto* tx_relay = peer->GetTxRelay();
|
||||
if (!tx_relay || !WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs)) {
|
||||
LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d which indicated no tx relay to us; disconnecting\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "sendtxrcncl received which indicated no tx relay to us, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -3765,16 +3764,16 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
peer_txreconcl_version, remote_salt);
|
||||
switch (result) {
|
||||
case ReconciliationRegisterResult::NOT_FOUND:
|
||||
LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId());
|
||||
break;
|
||||
case ReconciliationRegisterResult::SUCCESS:
|
||||
break;
|
||||
case ReconciliationRegisterResult::ALREADY_REGISTERED:
|
||||
LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d (sendtxrcncl received from already registered peer); disconnecting\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "txreconciliation protocol violation (sendtxrcncl received from already registered peer), %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
case ReconciliationRegisterResult::PROTOCOL_VIOLATION:
|
||||
LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d; disconnecting\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "txreconciliation protocol violation, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -3877,7 +3876,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
|
||||
// AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements
|
||||
if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
|
||||
LogDebug(BCLog::NET, "addrfetch connection completed peer=%d; disconnecting\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "addrfetch connection completed, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
}
|
||||
return;
|
||||
|
@ -3927,7 +3926,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
}
|
||||
} else if (inv.IsGenTxMsg()) {
|
||||
if (reject_tx_invs) {
|
||||
LogDebug(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "transaction (%s) inv sent in violation of protocol, %s\n", inv.hash.ToString(), pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -4004,7 +4003,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
vRecv >> locator >> hashStop;
|
||||
|
||||
if (locator.vHave.size() > MAX_LOCATOR_SZ) {
|
||||
LogDebug(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "getblocks locator size %lld > %d, %s\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -4126,7 +4125,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
vRecv >> locator >> hashStop;
|
||||
|
||||
if (locator.vHave.size() > MAX_LOCATOR_SZ) {
|
||||
LogDebug(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "getheaders locator size %lld > %d, %s\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -4667,7 +4666,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
{
|
||||
if (!pfrom.HasPermission(NetPermissionFlags::NoBan))
|
||||
{
|
||||
LogDebug(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "mempool request with bloom filters disabled, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
}
|
||||
return;
|
||||
|
@ -4677,7 +4676,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
{
|
||||
if (!pfrom.HasPermission(NetPermissionFlags::NoBan))
|
||||
{
|
||||
LogDebug(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "mempool request with bandwidth limit reached, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
}
|
||||
return;
|
||||
|
@ -4767,7 +4766,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
|
||||
if (msg_type == NetMsgType::FILTERLOAD) {
|
||||
if (!(peer->m_our_services & NODE_BLOOM)) {
|
||||
LogDebug(BCLog::NET, "filterload received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "filterload received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -4792,7 +4791,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
|
||||
if (msg_type == NetMsgType::FILTERADD) {
|
||||
if (!(peer->m_our_services & NODE_BLOOM)) {
|
||||
LogDebug(BCLog::NET, "filteradd received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "filteradd received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -4820,7 +4819,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
|||
|
||||
if (msg_type == NetMsgType::FILTERCLEAR) {
|
||||
if (!(peer->m_our_services & NODE_BLOOM)) {
|
||||
LogDebug(BCLog::NET, "filterclear received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
|
||||
LogDebug(BCLog::NET, "filterclear received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs));
|
||||
pfrom.fDisconnect = true;
|
||||
return;
|
||||
}
|
||||
|
@ -5041,7 +5040,7 @@ void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seco
|
|||
// message to give the peer a chance to update us.
|
||||
if (state.m_chain_sync.m_sent_getheaders) {
|
||||
// They've run out of time to catch up!
|
||||
LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
|
||||
LogInfo("Outbound peer has old chain, best known block = %s, %s\n", state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", pto.DisconnectMsg(fLogIPs));
|
||||
pto.fDisconnect = true;
|
||||
} else {
|
||||
assert(state.m_chain_sync.m_work_header);
|
||||
|
@ -5442,7 +5441,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
|
|||
const auto current_time{GetTime<std::chrono::microseconds>()};
|
||||
|
||||
if (pto->IsAddrFetchConn() && current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
|
||||
LogDebug(BCLog::NET, "addrfetch connection timeout; disconnecting peer=%d\n", pto->GetId());
|
||||
LogDebug(BCLog::NET, "addrfetch connection timeout, %s\n", pto->DisconnectMsg(fLogIPs));
|
||||
pto->fDisconnect = true;
|
||||
return true;
|
||||
}
|
||||
|
@ -5786,7 +5785,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
|
|||
// Stalling only triggers when the block download window cannot move. During normal steady state,
|
||||
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
|
||||
// should only happen during initial block download.
|
||||
LogPrintf("Peer=%d%s is stalling block download, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
|
||||
LogInfo("Peer is stalling block download, %s\n", pto->DisconnectMsg(fLogIPs));
|
||||
pto->fDisconnect = true;
|
||||
// Increase timeout for the next peer so that we don't disconnect multiple peers if our own
|
||||
// bandwidth is insufficient.
|
||||
|
@ -5805,7 +5804,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
|
|||
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
|
||||
int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
|
||||
if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
|
||||
LogPrintf("Timeout downloading block %s from peer=%d%s, disconnecting\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
|
||||
LogInfo("Timeout downloading block %s, %s\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->DisconnectMsg(fLogIPs));
|
||||
pto->fDisconnect = true;
|
||||
return true;
|
||||
}
|
||||
|
@ -5821,11 +5820,11 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
|
|||
// disconnect our sync peer for stalling; we have bigger
|
||||
// problems if we can't get any outbound peers.
|
||||
if (!pto->HasPermission(NetPermissionFlags::NoBan)) {
|
||||
LogPrintf("Timeout downloading headers from peer=%d%s, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
|
||||
LogInfo("Timeout downloading headers, %s\n", pto->DisconnectMsg(fLogIPs));
|
||||
pto->fDisconnect = true;
|
||||
return true;
|
||||
} else {
|
||||
LogPrintf("Timeout downloading headers from noban peer=%d%s, not disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
|
||||
LogInfo("Timeout downloading headers from noban peer, not %s\n", pto->DisconnectMsg(fLogIPs));
|
||||
// Reset the headers sync state so that we have a
|
||||
// chance to try downloading from a different peer.
|
||||
// Note: this will also result in at least one more
|
||||
|
|
|
@ -324,7 +324,7 @@ public:
|
|||
}
|
||||
double getVerificationProgress() override
|
||||
{
|
||||
return GuessVerificationProgress(chainman().GetParams().TxData(), WITH_LOCK(::cs_main, return chainman().ActiveChain().Tip()));
|
||||
return chainman().GuessVerificationProgress(WITH_LOCK(chainman().GetMutex(), return chainman().ActiveChain().Tip()));
|
||||
}
|
||||
bool isInitialBlockDownload() override
|
||||
{
|
||||
|
@ -406,9 +406,9 @@ public:
|
|||
}
|
||||
std::unique_ptr<Handler> handleNotifyBlockTip(NotifyBlockTipFn fn) override
|
||||
{
|
||||
return MakeSignalHandler(::uiInterface.NotifyBlockTip_connect([fn](SynchronizationState sync_state, const CBlockIndex* block) {
|
||||
return MakeSignalHandler(::uiInterface.NotifyBlockTip_connect([fn, this](SynchronizationState sync_state, const CBlockIndex* block) {
|
||||
fn(sync_state, BlockTip{block->nHeight, block->GetBlockTime(), block->GetBlockHash()},
|
||||
GuessVerificationProgress(Params().TxData(), block));
|
||||
chainman().GuessVerificationProgress(block));
|
||||
}));
|
||||
}
|
||||
std::unique_ptr<Handler> handleNotifyHeaderTip(NotifyHeaderTipFn fn) override
|
||||
|
@ -639,8 +639,8 @@ public:
|
|||
void findCoins(std::map<COutPoint, Coin>& coins) override { return FindCoins(m_node, coins); }
|
||||
double guessVerificationProgress(const uint256& block_hash) override
|
||||
{
|
||||
LOCK(::cs_main);
|
||||
return GuessVerificationProgress(chainman().GetParams().TxData(), chainman().m_blockman.LookupBlockIndex(block_hash));
|
||||
LOCK(chainman().GetMutex());
|
||||
return chainman().GuessVerificationProgress(chainman().m_blockman.LookupBlockIndex(block_hash));
|
||||
}
|
||||
bool hasBlocks(const uint256& block_hash, int min_height, std::optional<int> max_height) override
|
||||
{
|
||||
|
@ -971,7 +971,9 @@ public:
|
|||
{
|
||||
WAIT_LOCK(notifications().m_tip_block_mutex, lock);
|
||||
notifications().m_tip_block_cv.wait_for(lock, timeout, [&]() EXCLUSIVE_LOCKS_REQUIRED(notifications().m_tip_block_mutex) {
|
||||
return (notifications().m_tip_block != current_tip && notifications().m_tip_block != uint256::ZERO) || chainman().m_interrupt;
|
||||
// We need to wait for m_tip_block to be set AND for the value
|
||||
// to differ from the current_tip value.
|
||||
return (notifications().TipBlock() && notifications().TipBlock() != current_tip) || chainman().m_interrupt;
|
||||
});
|
||||
}
|
||||
// Must release m_tip_block_mutex before locking cs_main, to avoid deadlocks.
|
||||
|
@ -979,29 +981,6 @@ public:
|
|||
return BlockRef{chainman().ActiveChain().Tip()->GetBlockHash(), chainman().ActiveChain().Tip()->nHeight};
|
||||
}
|
||||
|
||||
bool processNewBlock(const std::shared_ptr<const CBlock>& block, bool* new_block) override
|
||||
{
|
||||
return chainman().ProcessNewBlock(block, /*force_processing=*/true, /*min_pow_checked=*/true, /*new_block=*/new_block);
|
||||
}
|
||||
|
||||
unsigned int getTransactionsUpdated() override
|
||||
{
|
||||
return context()->mempool->GetTransactionsUpdated();
|
||||
}
|
||||
|
||||
bool testBlockValidity(const CBlock& block, bool check_merkle_root, BlockValidationState& state) override
|
||||
{
|
||||
LOCK(cs_main);
|
||||
CBlockIndex* tip{chainman().ActiveChain().Tip()};
|
||||
// Fail if the tip updated before the lock was taken
|
||||
if (block.hashPrevBlock != tip->GetBlockHash()) {
|
||||
state.Error("Block does not connect to current chain tip.");
|
||||
return false;
|
||||
}
|
||||
|
||||
return TestBlockValidity(state, chainman().GetParams(), chainman().ActiveChainstate(), block, tip, /*fCheckPOW=*/false, check_merkle_root);
|
||||
}
|
||||
|
||||
std::unique_ptr<BlockTemplate> createNewBlock(const BlockCreateOptions& options) override
|
||||
{
|
||||
BlockAssembler::Options assemble_options{options};
|
||||
|
|
|
@ -52,6 +52,7 @@ kernel::InterruptResult KernelNotifications::blockTip(SynchronizationState state
|
|||
{
|
||||
{
|
||||
LOCK(m_tip_block_mutex);
|
||||
Assume(index.GetBlockHash() != uint256::ZERO);
|
||||
m_tip_block = index.GetBlockHash();
|
||||
m_tip_block_cv.notify_all();
|
||||
}
|
||||
|
@ -99,6 +100,13 @@ void KernelNotifications::fatalError(const bilingual_str& message)
|
|||
m_exit_status, message, &m_warnings);
|
||||
}
|
||||
|
||||
std::optional<uint256> KernelNotifications::TipBlock()
|
||||
{
|
||||
AssertLockHeld(m_tip_block_mutex);
|
||||
return m_tip_block;
|
||||
};
|
||||
|
||||
|
||||
void ReadNotificationArgs(const ArgsManager& args, KernelNotifications& notifications)
|
||||
{
|
||||
if (auto value{args.GetIntArg("-stopatheight")}) notifications.m_stop_at_height = *value;
|
||||
|
|
|
@ -59,12 +59,14 @@ public:
|
|||
//! The block for which the last blockTip notification was received.
|
||||
//! It's first set when the tip is connected during node initialization.
|
||||
//! Might be unset during an early shutdown.
|
||||
uint256 m_tip_block GUARDED_BY(m_tip_block_mutex){uint256::ZERO};
|
||||
std::optional<uint256> TipBlock() EXCLUSIVE_LOCKS_REQUIRED(m_tip_block_mutex);
|
||||
|
||||
private:
|
||||
const std::function<bool()>& m_shutdown_request;
|
||||
std::atomic<int>& m_exit_status;
|
||||
node::Warnings& m_warnings;
|
||||
|
||||
std::optional<uint256> m_tip_block GUARDED_BY(m_tip_block_mutex);
|
||||
};
|
||||
|
||||
void ReadNotificationArgs(const ArgsManager& args, KernelNotifications& notifications);
|
||||
|
|
|
@ -421,7 +421,7 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda
|
|||
}
|
||||
|
||||
++nPackagesSelected;
|
||||
pblocktemplate->vFeerateHistogram.emplace_back(packageFees, static_cast<int32_t>(packageSize));
|
||||
pblocktemplate->m_package_feerates.emplace_back(packageFees, static_cast<int32_t>(packageSize));
|
||||
|
||||
// Update transactions that depend on each of these
|
||||
nDescendantsUpdated += UpdatePackagesForAdded(mempool, ancestors, mapModifiedTx);
|
||||
|
|
|
@ -40,7 +40,9 @@ struct CBlockTemplate
|
|||
std::vector<CAmount> vTxFees;
|
||||
std::vector<int64_t> vTxSigOpsCost;
|
||||
std::vector<unsigned char> vchCoinbaseCommitment;
|
||||
std::vector<FeeFrac> vFeerateHistogram;
|
||||
/* A vector of package fee rates, ordered by the sequence in which
|
||||
* packages are selected for inclusion in the block template.*/
|
||||
std::vector<FeeFrac> m_package_feerates;
|
||||
};
|
||||
|
||||
// Container for tracking updates to ancestor feerate as we include (parent)
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
//! @file node/types.h is a home for public enum and struct type definitions
|
||||
//! that are used by internally by node code, but also used externally by wallet,
|
||||
//! that are used internally by node code, but also used externally by wallet,
|
||||
//! mining or GUI code.
|
||||
//!
|
||||
//! This file is intended to define only simple types that do not have external
|
||||
|
|
|
@ -89,8 +89,9 @@ bool IsChildWithParents(const Package& package);
|
|||
*/
|
||||
bool IsChildWithParentsTree(const Package& package);
|
||||
|
||||
/** Get the hash of these transactions' wtxids, concatenated in lexicographical order (treating the
|
||||
* wtxids as little endian encoded uint256, smallest to largest). */
|
||||
/** Get the hash of the concatenated wtxids of transactions, with wtxids
|
||||
* treated as a little-endian numbers and sorted in ascending numeric order.
|
||||
*/
|
||||
uint256 GetPackageHash(const std::vector<CTransactionRef>& transactions);
|
||||
|
||||
#endif // BITCOIN_POLICY_PACKAGES_H
|
||||
|
|
|
@ -71,7 +71,7 @@ std::optional<std::string> GetEntriesForConflicts(const CTransaction& tx,
|
|||
// descendants (i.e. if multiple conflicts share a descendant, it will be counted multiple
|
||||
// times), but we just want to be conservative to avoid doing too much work.
|
||||
if (nConflictingCount > MAX_REPLACEMENT_CANDIDATES) {
|
||||
return strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
|
||||
return strprintf("rejecting replacement %s; too many potential replacements (%d > %d)",
|
||||
txid.ToString(),
|
||||
nConflictingCount,
|
||||
MAX_REPLACEMENT_CANDIDATES);
|
||||
|
|
|
@ -1307,6 +1307,7 @@ RPCHelpMan getblockchaininfo()
|
|||
{RPCResult::Type::NUM, "pruneheight", /*optional=*/true, "height of the last block pruned, plus one (only present if pruning is enabled)"},
|
||||
{RPCResult::Type::BOOL, "automatic_pruning", /*optional=*/true, "whether automatic pruning is enabled (only present if pruning is enabled)"},
|
||||
{RPCResult::Type::NUM, "prune_target_size", /*optional=*/true, "the target size used by pruning (only present if automatic pruning is enabled)"},
|
||||
{RPCResult::Type::STR_HEX, "signet_challenge", /*optional=*/true, "the block challenge (aka. block script), in hexadecimal (only present if the current network is a signet)"},
|
||||
(IsDeprecatedRPCEnabled("warnings") ?
|
||||
RPCResult{RPCResult::Type::STR, "warnings", "any network and blockchain warnings (DEPRECATED)"} :
|
||||
RPCResult{RPCResult::Type::ARR, "warnings", "any network and blockchain warnings (run with `-deprecatedrpc=warnings` to return the latest warning as a single string)",
|
||||
|
@ -1336,7 +1337,7 @@ RPCHelpMan getblockchaininfo()
|
|||
obj.pushKV("difficulty", GetDifficulty(tip));
|
||||
obj.pushKV("time", tip.GetBlockTime());
|
||||
obj.pushKV("mediantime", tip.GetMedianTimePast());
|
||||
obj.pushKV("verificationprogress", GuessVerificationProgress(chainman.GetParams().TxData(), &tip));
|
||||
obj.pushKV("verificationprogress", chainman.GuessVerificationProgress(&tip));
|
||||
obj.pushKV("initialblockdownload", chainman.IsInitialBlockDownload());
|
||||
obj.pushKV("chainwork", tip.nChainWork.GetHex());
|
||||
obj.pushKV("size_on_disk", chainman.m_blockman.CalculateCurrentUsage());
|
||||
|
@ -1351,6 +1352,11 @@ RPCHelpMan getblockchaininfo()
|
|||
obj.pushKV("prune_target_size", chainman.m_blockman.GetPruneTarget());
|
||||
}
|
||||
}
|
||||
if (chainman.GetParams().GetChainType() == ChainType::SIGNET) {
|
||||
const std::vector<uint8_t>& signet_challenge =
|
||||
chainman.GetParams().GetConsensus().signet_challenge;
|
||||
obj.pushKV("signet_challenge", HexStr(signet_challenge));
|
||||
}
|
||||
|
||||
NodeContext& node = EnsureAnyNodeContext(request.context);
|
||||
obj.pushKV("warnings", node::GetWarningsForRpc(*CHECK_NONFATAL(node.warnings), IsDeprecatedRPCEnabled("warnings")));
|
||||
|
@ -3338,7 +3344,7 @@ return RPCHelpMan{
|
|||
data.pushKV("blocks", (int)chain.Height());
|
||||
data.pushKV("bestblockhash", tip->GetBlockHash().GetHex());
|
||||
data.pushKV("difficulty", GetDifficulty(*tip));
|
||||
data.pushKV("verificationprogress", GuessVerificationProgress(Params().TxData(), tip));
|
||||
data.pushKV("verificationprogress", chainman.GuessVerificationProgress(tip));
|
||||
data.pushKV("coins_db_cache_bytes", cs.m_coinsdb_cache_size_bytes);
|
||||
data.pushKV("coins_tip_cache_bytes", cs.m_coinstip_cache_size_bytes);
|
||||
if (cs.m_from_snapshot_blockhash) {
|
||||
|
|
|
@ -146,7 +146,8 @@ static RPCHelpMan testmempoolaccept()
|
|||
{RPCResult{RPCResult::Type::STR_HEX, "", "transaction wtxid in hex"},
|
||||
}},
|
||||
}},
|
||||
{RPCResult::Type::STR, "reject-reason", /*optional=*/true, "Rejection string (only present when 'allowed' is false)"},
|
||||
{RPCResult::Type::STR, "reject-reason", /*optional=*/true, "Rejection reason (only present when 'allowed' is false)"},
|
||||
{RPCResult::Type::STR, "reject-details", /*optional=*/true, "Rejection details (only present when 'allowed' is false and rejection details exist)"},
|
||||
}},
|
||||
}
|
||||
},
|
||||
|
@ -245,6 +246,7 @@ static RPCHelpMan testmempoolaccept()
|
|||
result_inner.pushKV("reject-reason", "missing-inputs");
|
||||
} else {
|
||||
result_inner.pushKV("reject-reason", state.GetRejectReason());
|
||||
result_inner.pushKV("reject-details", state.ToString());
|
||||
}
|
||||
}
|
||||
rpc_result.push_back(std::move(result_inner));
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
// Copyright (c) 2010 Satoshi Nakamoto
|
||||
// Copyright (c) 2009-2022 The Bitcoin Core developers
|
||||
// Copyright (c) 2009-present The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
|
@ -131,7 +131,7 @@ static RPCHelpMan getnetworkhashps()
|
|||
};
|
||||
}
|
||||
|
||||
static bool GenerateBlock(ChainstateManager& chainman, Mining& miner, CBlock&& block, uint64_t& max_tries, std::shared_ptr<const CBlock>& block_out, bool process_new_block)
|
||||
static bool GenerateBlock(ChainstateManager& chainman, CBlock&& block, uint64_t& max_tries, std::shared_ptr<const CBlock>& block_out, bool process_new_block)
|
||||
{
|
||||
block_out.reset();
|
||||
block.hashMerkleRoot = BlockMerkleRoot(block);
|
||||
|
@ -151,7 +151,7 @@ static bool GenerateBlock(ChainstateManager& chainman, Mining& miner, CBlock&& b
|
|||
|
||||
if (!process_new_block) return true;
|
||||
|
||||
if (!miner.processNewBlock(block_out, nullptr)) {
|
||||
if (!chainman.ProcessNewBlock(block_out, /*force_processing=*/true, /*min_pow_checked=*/true, nullptr)) {
|
||||
throw JSONRPCError(RPC_INTERNAL_ERROR, "ProcessNewBlock, block not accepted");
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ static UniValue generateBlocks(ChainstateManager& chainman, Mining& miner, const
|
|||
CHECK_NONFATAL(block_template);
|
||||
|
||||
std::shared_ptr<const CBlock> block_out;
|
||||
if (!GenerateBlock(chainman, miner, block_template->getBlock(), nMaxTries, block_out, /*process_new_block=*/true)) {
|
||||
if (!GenerateBlock(chainman, block_template->getBlock(), nMaxTries, block_out, /*process_new_block=*/true)) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -371,29 +371,30 @@ static RPCHelpMan generateblock()
|
|||
|
||||
ChainstateManager& chainman = EnsureChainman(node);
|
||||
{
|
||||
std::unique_ptr<BlockTemplate> block_template{miner.createNewBlock({.use_mempool = false, .coinbase_output_script = coinbase_output_script})};
|
||||
CHECK_NONFATAL(block_template);
|
||||
LOCK(chainman.GetMutex());
|
||||
{
|
||||
std::unique_ptr<BlockTemplate> block_template{miner.createNewBlock({.use_mempool = false, .coinbase_output_script = coinbase_output_script})};
|
||||
CHECK_NONFATAL(block_template);
|
||||
|
||||
block = block_template->getBlock();
|
||||
}
|
||||
block = block_template->getBlock();
|
||||
}
|
||||
|
||||
CHECK_NONFATAL(block.vtx.size() == 1);
|
||||
CHECK_NONFATAL(block.vtx.size() == 1);
|
||||
|
||||
// Add transactions
|
||||
block.vtx.insert(block.vtx.end(), txs.begin(), txs.end());
|
||||
RegenerateCommitments(block, chainman);
|
||||
// Add transactions
|
||||
block.vtx.insert(block.vtx.end(), txs.begin(), txs.end());
|
||||
RegenerateCommitments(block, chainman);
|
||||
|
||||
{
|
||||
BlockValidationState state;
|
||||
if (!miner.testBlockValidity(block, /*check_merkle_root=*/false, state)) {
|
||||
throw JSONRPCError(RPC_VERIFY_ERROR, strprintf("testBlockValidity failed: %s", state.ToString()));
|
||||
if (!TestBlockValidity(state, chainman.GetParams(), chainman.ActiveChainstate(), block, chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock), /*fCheckPOW=*/false, /*fCheckMerkleRoot=*/false)) {
|
||||
throw JSONRPCError(RPC_VERIFY_ERROR, strprintf("TestBlockValidity failed: %s", state.ToString()));
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<const CBlock> block_out;
|
||||
uint64_t max_tries{DEFAULT_MAX_TRIES};
|
||||
|
||||
if (!GenerateBlock(chainman, miner, std::move(block), max_tries, block_out, process_new_block) || !block_out) {
|
||||
if (!GenerateBlock(chainman, std::move(block), max_tries, block_out, process_new_block) || !block_out) {
|
||||
throw JSONRPCError(RPC_MISC_ERROR, "Failed to make block.");
|
||||
}
|
||||
|
||||
|
@ -424,6 +425,7 @@ static RPCHelpMan getmininginfo()
|
|||
{RPCResult::Type::NUM, "networkhashps", "The network hashes per second"},
|
||||
{RPCResult::Type::NUM, "pooledtx", "The size of the mempool"},
|
||||
{RPCResult::Type::STR, "chain", "current network name (" LIST_CHAIN_NAMES ")"},
|
||||
{RPCResult::Type::STR_HEX, "signet_challenge", /*optional=*/true, "The block challenge (aka. block script), in hexadecimal (only present if the current network is a signet)"},
|
||||
(IsDeprecatedRPCEnabled("warnings") ?
|
||||
RPCResult{RPCResult::Type::STR, "warnings", "any network and blockchain warnings (DEPRECATED)"} :
|
||||
RPCResult{RPCResult::Type::ARR, "warnings", "any network and blockchain warnings (run with `-deprecatedrpc=warnings` to return the latest warning as a single string)",
|
||||
|
@ -453,6 +455,11 @@ static RPCHelpMan getmininginfo()
|
|||
obj.pushKV("networkhashps", getnetworkhashps().HandleRequest(request));
|
||||
obj.pushKV("pooledtx", (uint64_t)mempool.size());
|
||||
obj.pushKV("chain", chainman.GetParams().GetChainTypeString());
|
||||
if (chainman.GetParams().GetChainType() == ChainType::SIGNET) {
|
||||
const std::vector<uint8_t>& signet_challenge =
|
||||
chainman.GetParams().GetConsensus().signet_challenge;
|
||||
obj.pushKV("signet_challenge", HexStr(signet_challenge));
|
||||
}
|
||||
obj.pushKV("warnings", node::GetWarningsForRpc(*CHECK_NONFATAL(node.warnings), IsDeprecatedRPCEnabled("warnings")));
|
||||
return obj;
|
||||
},
|
||||
|
@ -626,8 +633,8 @@ static RPCHelpMan getblocktemplate()
|
|||
{RPCResult::Type::OBJ, "", "",
|
||||
{
|
||||
{RPCResult::Type::STR_HEX, "data", "transaction data encoded in hexadecimal (byte-for-byte)"},
|
||||
{RPCResult::Type::STR_HEX, "txid", "transaction id encoded in little-endian hexadecimal"},
|
||||
{RPCResult::Type::STR_HEX, "hash", "hash encoded in little-endian hexadecimal (including witness data)"},
|
||||
{RPCResult::Type::STR_HEX, "txid", "transaction hash excluding witness data, shown in byte-reversed hex"},
|
||||
{RPCResult::Type::STR_HEX, "hash", "transaction hash including witness data, shown in byte-reversed hex"},
|
||||
{RPCResult::Type::ARR, "depends", "array of numbers",
|
||||
{
|
||||
{RPCResult::Type::NUM, "", "transactions before this one (by 1-based index in 'transactions' list) that must be present in the final block if this one is"},
|
||||
|
@ -709,12 +716,12 @@ static RPCHelpMan getblocktemplate()
|
|||
return "duplicate-inconclusive";
|
||||
}
|
||||
|
||||
// testBlockValidity only supports blocks built on the current Tip
|
||||
// TestBlockValidity only supports blocks built on the current Tip
|
||||
if (block.hashPrevBlock != tip) {
|
||||
return "inconclusive-not-best-prevblk";
|
||||
}
|
||||
BlockValidationState state;
|
||||
miner.testBlockValidity(block, /*check_merkle_root=*/true, state);
|
||||
TestBlockValidity(state, chainman.GetParams(), chainman.ActiveChainstate(), block, chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock), /*fCheckPOW=*/false, /*fCheckMerkleRoot=*/true);
|
||||
return BIP22ValidationResult(state);
|
||||
}
|
||||
|
||||
|
@ -742,6 +749,7 @@ static RPCHelpMan getblocktemplate()
|
|||
}
|
||||
|
||||
static unsigned int nTransactionsUpdatedLast;
|
||||
const CTxMemPool& mempool = EnsureMemPool(node);
|
||||
|
||||
if (!lpval.isNull())
|
||||
{
|
||||
|
@ -772,7 +780,7 @@ static RPCHelpMan getblocktemplate()
|
|||
tip = miner.waitTipChanged(hashWatchedChain, checktxtime).hash;
|
||||
// Timeout: Check transactions for update
|
||||
// without holding the mempool lock to avoid deadlocks
|
||||
if (miner.getTransactionsUpdated() != nTransactionsUpdatedLastLP)
|
||||
if (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP)
|
||||
break;
|
||||
checktxtime = std::chrono::seconds(10);
|
||||
}
|
||||
|
@ -803,13 +811,13 @@ static RPCHelpMan getblocktemplate()
|
|||
static int64_t time_start;
|
||||
static std::unique_ptr<BlockTemplate> block_template;
|
||||
if (!pindexPrev || pindexPrev->GetBlockHash() != tip ||
|
||||
(miner.getTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - time_start > 5))
|
||||
(mempool.GetTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - time_start > 5))
|
||||
{
|
||||
// Clear pindexPrev so future calls make a new block, despite any failures from here on
|
||||
pindexPrev = nullptr;
|
||||
|
||||
// Store the pindexBest used before createNewBlock, to avoid races
|
||||
nTransactionsUpdatedLast = miner.getTransactionsUpdated();
|
||||
nTransactionsUpdatedLast = mempool.GetTransactionsUpdated();
|
||||
CBlockIndex* pindexPrevNew = chainman.m_blockman.LookupBlockIndex(tip);
|
||||
time_start = GetTime();
|
||||
|
||||
|
@ -1032,13 +1040,10 @@ static RPCHelpMan submitblock()
|
|||
}
|
||||
}
|
||||
|
||||
NodeContext& node = EnsureAnyNodeContext(request.context);
|
||||
Mining& miner = EnsureMining(node);
|
||||
|
||||
bool new_block;
|
||||
auto sc = std::make_shared<submitblock_StateCatcher>(block.GetHash());
|
||||
CHECK_NONFATAL(chainman.m_options.signals)->RegisterSharedValidationInterface(sc);
|
||||
bool accepted = miner.processNewBlock(blockptr, /*new_block=*/&new_block);
|
||||
bool accepted = chainman.ProcessNewBlock(blockptr, /*force_processing=*/true, /*min_pow_checked=*/true, /*new_block=*/&new_block);
|
||||
CHECK_NONFATAL(chainman.m_options.signals)->UnregisterSharedValidationInterface(sc);
|
||||
if (!new_block && accepted) {
|
||||
return "duplicate";
|
||||
|
|
|
@ -1761,7 +1761,7 @@ struct KeyParser {
|
|||
std::vector<std::unique_ptr<DescriptorImpl>> ParseScript(uint32_t& key_exp_index, Span<const char>& sp, ParseScriptContext ctx, FlatSigningProvider& out, std::string& error)
|
||||
{
|
||||
using namespace script;
|
||||
|
||||
Assume(ctx == ParseScriptContext::TOP || ctx == ParseScriptContext::P2SH || ctx == ParseScriptContext::P2WSH || ctx == ParseScriptContext::P2TR);
|
||||
std::vector<std::unique_ptr<DescriptorImpl>> ret;
|
||||
auto expr = Expr(sp);
|
||||
if (Func("pk", expr)) {
|
||||
|
@ -1787,10 +1787,6 @@ std::vector<std::unique_ptr<DescriptorImpl>> ParseScript(uint32_t& key_exp_index
|
|||
ret.emplace_back(std::make_unique<PKHDescriptor>(std::move(pubkey)));
|
||||
}
|
||||
return ret;
|
||||
} else if (ctx != ParseScriptContext::P2TR && Func("pkh", expr)) {
|
||||
// Under Taproot, always the Miniscript parser deal with it.
|
||||
error = "Can only have pkh at top level, in sh(), wsh(), or in tr()";
|
||||
return {};
|
||||
}
|
||||
if (ctx == ParseScriptContext::TOP && Func("combo", expr)) {
|
||||
auto pubkeys = ParsePubkey(key_exp_index, expr, ctx, out, error);
|
||||
|
|
|
@ -1798,7 +1798,7 @@ inline NodeRef<Key> Parse(Span<const char> in, const Ctx& ctx)
|
|||
// Get threshold
|
||||
int next_comma = FindNextChar(in, ',');
|
||||
if (next_comma < 1) return false;
|
||||
const auto k_to_integral{ToIntegral<int64_t>(std::string_view(in.begin(), next_comma))};
|
||||
const auto k_to_integral{ToIntegral<int64_t>(std::string_view(in.data(), next_comma))};
|
||||
if (!k_to_integral.has_value()) return false;
|
||||
const int64_t k{k_to_integral.value()};
|
||||
in = in.subspan(next_comma + 1);
|
||||
|
@ -1954,7 +1954,7 @@ inline NodeRef<Key> Parse(Span<const char> in, const Ctx& ctx)
|
|||
} else if (Const("after(", in)) {
|
||||
int arg_size = FindNextChar(in, ')');
|
||||
if (arg_size < 1) return {};
|
||||
const auto num{ToIntegral<int64_t>(std::string_view(in.begin(), arg_size))};
|
||||
const auto num{ToIntegral<int64_t>(std::string_view(in.data(), arg_size))};
|
||||
if (!num.has_value() || *num < 1 || *num >= 0x80000000L) return {};
|
||||
constructed.push_back(MakeNodeRef<Key>(internal::NoDupCheck{}, ctx.MsContext(), Fragment::AFTER, *num));
|
||||
in = in.subspan(arg_size + 1);
|
||||
|
@ -1962,7 +1962,7 @@ inline NodeRef<Key> Parse(Span<const char> in, const Ctx& ctx)
|
|||
} else if (Const("older(", in)) {
|
||||
int arg_size = FindNextChar(in, ')');
|
||||
if (arg_size < 1) return {};
|
||||
const auto num{ToIntegral<int64_t>(std::string_view(in.begin(), arg_size))};
|
||||
const auto num{ToIntegral<int64_t>(std::string_view(in.data(), arg_size))};
|
||||
if (!num.has_value() || *num < 1 || *num >= 0x80000000L) return {};
|
||||
constructed.push_back(MakeNodeRef<Key>(internal::NoDupCheck{}, ctx.MsContext(), Fragment::OLDER, *num));
|
||||
in = in.subspan(arg_size + 1);
|
||||
|
@ -1974,7 +1974,7 @@ inline NodeRef<Key> Parse(Span<const char> in, const Ctx& ctx)
|
|||
} else if (Const("thresh(", in)) {
|
||||
int next_comma = FindNextChar(in, ',');
|
||||
if (next_comma < 1) return {};
|
||||
const auto k{ToIntegral<int64_t>(std::string_view(in.begin(), next_comma))};
|
||||
const auto k{ToIntegral<int64_t>(std::string_view(in.data(), next_comma))};
|
||||
if (!k.has_value() || *k < 1) return {};
|
||||
in = in.subspan(next_comma + 1);
|
||||
// n = 1 here because we read the first WRAPPED_EXPR before reaching THRESH
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include <attributes.h>
|
||||
#include <script/script.h>
|
||||
#include <span.h>
|
||||
|
||||
#include <string>
|
||||
#include <optional>
|
||||
|
@ -17,7 +18,6 @@
|
|||
#include <vector>
|
||||
|
||||
class CPubKey;
|
||||
template <typename C> class Span;
|
||||
|
||||
enum class TxoutType {
|
||||
NONSTANDARD,
|
||||
|
|
|
@ -264,6 +264,7 @@ template<typename Stream> inline void Serialize(Stream& s, int64_t a ) { ser_wri
|
|||
template<typename Stream> inline void Serialize(Stream& s, uint64_t a) { ser_writedata64(s, a); }
|
||||
template <typename Stream, BasicByte B, int N> void Serialize(Stream& s, const B (&a)[N]) { s.write(MakeByteSpan(a)); }
|
||||
template <typename Stream, BasicByte B, std::size_t N> void Serialize(Stream& s, const std::array<B, N>& a) { s.write(MakeByteSpan(a)); }
|
||||
template <typename Stream, BasicByte B, std::size_t N> void Serialize(Stream& s, std::span<B, N> span) { s.write(std::as_bytes(span)); }
|
||||
template <typename Stream, BasicByte B> void Serialize(Stream& s, Span<B> span) { s.write(AsBytes(span)); }
|
||||
|
||||
template <typename Stream, CharNotInt8 V> void Unserialize(Stream&, V) = delete; // char serialization forbidden. Use uint8_t or int8_t
|
||||
|
@ -278,6 +279,7 @@ template<typename Stream> inline void Unserialize(Stream& s, int64_t& a ) { a =
|
|||
template<typename Stream> inline void Unserialize(Stream& s, uint64_t& a) { a = ser_readdata64(s); }
|
||||
template <typename Stream, BasicByte B, int N> void Unserialize(Stream& s, B (&a)[N]) { s.read(MakeWritableByteSpan(a)); }
|
||||
template <typename Stream, BasicByte B, std::size_t N> void Unserialize(Stream& s, std::array<B, N>& a) { s.read(MakeWritableByteSpan(a)); }
|
||||
template <typename Stream, BasicByte B, std::size_t N> void Unserialize(Stream& s, std::span<B, N> span) { s.read(std::as_writable_bytes(span)); }
|
||||
template <typename Stream, BasicByte B> void Unserialize(Stream& s, Span<B> span) { s.read(AsWritableBytes(span)); }
|
||||
|
||||
template <typename Stream> inline void Serialize(Stream& s, bool a) { uint8_t f = a; ser_writedata8(s, f); }
|
||||
|
|
|
@ -248,9 +248,8 @@ template <typename T>
|
|||
T& SpanPopBack(Span<T>& span)
|
||||
{
|
||||
size_t size = span.size();
|
||||
ASSERT_IF_DEBUG(size > 0);
|
||||
T& back = span[size - 1];
|
||||
span = Span<T>(span.data(), size - 1);
|
||||
T& back = span.back();
|
||||
span = span.first(size - 1);
|
||||
return back;
|
||||
}
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ public:
|
|||
memcpy(vchData.data() + nPos, src.data(), nOverwrite);
|
||||
}
|
||||
if (nOverwrite < src.size()) {
|
||||
vchData.insert(vchData.end(), UCharCast(src.data()) + nOverwrite, UCharCast(src.end()));
|
||||
vchData.insert(vchData.end(), UCharCast(src.data()) + nOverwrite, UCharCast(src.data() + src.size()));
|
||||
}
|
||||
nPos += src.size();
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
||||
// Copyright (c) 2009-2021 The Bitcoin Core developers
|
||||
// Copyright (c) 2009-present The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
|
@ -74,7 +74,7 @@ secure_unique_ptr<T> make_secure_unique(Args&&... as)
|
|||
|
||||
// initialize in place, and return as secure_unique_ptr
|
||||
try {
|
||||
return secure_unique_ptr<T>(new (p) T(std::forward(as)...));
|
||||
return secure_unique_ptr<T>(new (p) T(std::forward<Args>(as)...));
|
||||
} catch (...) {
|
||||
secure_allocator<T>().deallocate(p, 1);
|
||||
throw;
|
||||
|
|
|
@ -161,7 +161,8 @@ void DoCheck(std::string prv, std::string pub, const std::string& norm_pub, int
|
|||
// We must be able to estimate the max satisfaction size for any solvable descriptor top descriptor (but combo).
|
||||
const bool is_nontop_or_nonsolvable{!parse_priv->IsSolvable() || !parse_priv->GetOutputType()};
|
||||
const auto max_sat_maxsig{parse_priv->MaxSatisfactionWeight(true)};
|
||||
const auto max_sat_nonmaxsig{parse_priv->MaxSatisfactionWeight(true)};
|
||||
const auto max_sat_nonmaxsig{parse_priv->MaxSatisfactionWeight(false)};
|
||||
BOOST_CHECK(max_sat_nonmaxsig <= max_sat_maxsig);
|
||||
const auto max_elems{parse_priv->MaxSatisfactionElems()};
|
||||
const bool is_input_size_info_set{max_sat_maxsig && max_sat_nonmaxsig && max_elems};
|
||||
BOOST_CHECK_MESSAGE(is_input_size_info_set || is_nontop_or_nonsolvable, prv);
|
||||
|
|
|
@ -24,12 +24,12 @@ struct CheckGlobalsImpl {
|
|||
"The current fuzz target used the global random state.\n\n"
|
||||
|
||||
"This is acceptable, but requires the fuzz target to call \n"
|
||||
"SeedRandomStateForTest(SeedRand::ZEROS) at the beginning \n"
|
||||
"of processing the fuzz input.\n\n"
|
||||
"SeedRandomStateForTest(SeedRand::ZEROS) in the first line \n"
|
||||
"of the FUZZ_TARGET function.\n\n"
|
||||
|
||||
"An alternative solution would be to avoid any use of globals.\n\n"
|
||||
|
||||
"Without a solution, fuzz stability and determinism can lead \n"
|
||||
"Without a solution, fuzz instability and non-determinism can lead \n"
|
||||
"to non-reproducible bugs or inefficient fuzzing.\n\n"
|
||||
<< std::endl;
|
||||
std::abort(); // Abort, because AFL may try to recover from a std::exit
|
||||
|
|
|
@ -21,6 +21,7 @@ using node::BlockAssembler;
|
|||
|
||||
FUZZ_TARGET(utxo_total_supply)
|
||||
{
|
||||
SeedRandomStateForTest(SeedRand::ZEROS);
|
||||
/** The testing setup that creates a chainman only (no chainstate) */
|
||||
ChainTestingSetup test_setup{
|
||||
ChainType::REGTEST,
|
||||
|
@ -28,7 +29,6 @@ FUZZ_TARGET(utxo_total_supply)
|
|||
.extra_args = {"-testactivationheight=bip34@2"},
|
||||
},
|
||||
};
|
||||
SeedRandomStateForTest(SeedRand::ZEROS); // Can not be done before test_setup
|
||||
// Create chainstate
|
||||
test_setup.LoadVerifyActivateChainstate();
|
||||
auto& node{test_setup.m_node};
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <consensus/consensus.h>
|
||||
#include <consensus/merkle.h>
|
||||
#include <consensus/tx_verify.h>
|
||||
#include <interfaces/mining.h>
|
||||
#include <node/miner.h>
|
||||
#include <policy/policy.h>
|
||||
#include <test/util/random.h>
|
||||
|
@ -15,6 +16,7 @@
|
|||
#include <txmempool.h>
|
||||
#include <uint256.h>
|
||||
#include <util/check.h>
|
||||
#include <util/feefrac.h>
|
||||
#include <util/strencodings.h>
|
||||
#include <util/time.h>
|
||||
#include <util/translation.h>
|
||||
|
@ -29,8 +31,9 @@
|
|||
#include <boost/test/unit_test.hpp>
|
||||
|
||||
using namespace util::hex_literals;
|
||||
using interfaces::BlockTemplate;
|
||||
using interfaces::Mining;
|
||||
using node::BlockAssembler;
|
||||
using node::CBlockTemplate;
|
||||
|
||||
namespace miner_tests {
|
||||
struct MinerTestingSetup : public TestingSetup {
|
||||
|
@ -55,7 +58,10 @@ struct MinerTestingSetup : public TestingSetup {
|
|||
Assert(error.empty());
|
||||
return *m_node.mempool;
|
||||
}
|
||||
BlockAssembler AssemblerForTest(CTxMemPool& tx_mempool, BlockAssembler::Options options);
|
||||
std::unique_ptr<Mining> MakeMining()
|
||||
{
|
||||
return interfaces::MakeMining(m_node);
|
||||
}
|
||||
};
|
||||
} // namespace miner_tests
|
||||
|
||||
|
@ -63,13 +69,6 @@ BOOST_FIXTURE_TEST_SUITE(miner_tests, MinerTestingSetup)
|
|||
|
||||
static CFeeRate blockMinFeeRate = CFeeRate(DEFAULT_BLOCK_MIN_TX_FEE);
|
||||
|
||||
BlockAssembler MinerTestingSetup::AssemblerForTest(CTxMemPool& tx_mempool, BlockAssembler::Options options)
|
||||
{
|
||||
options.nBlockMaxWeight = MAX_BLOCK_WEIGHT;
|
||||
options.blockMinFeeRate = blockMinFeeRate;
|
||||
return BlockAssembler{m_node.chainman->ActiveChainstate(), &tx_mempool, options};
|
||||
}
|
||||
|
||||
constexpr static struct {
|
||||
unsigned char extranonce;
|
||||
unsigned int nonce;
|
||||
|
@ -107,6 +106,10 @@ static std::unique_ptr<CBlockIndex> CreateBlockIndex(int nHeight, CBlockIndex* a
|
|||
void MinerTestingSetup::TestPackageSelection(const CScript& scriptPubKey, const std::vector<CTransactionRef>& txFirst)
|
||||
{
|
||||
CTxMemPool& tx_mempool{MakeMempool()};
|
||||
auto mining{MakeMining()};
|
||||
BlockAssembler::Options options;
|
||||
options.coinbase_output_script = scriptPubKey;
|
||||
|
||||
LOCK(tx_mempool.cs);
|
||||
// Test the ancestor feerate transaction selection.
|
||||
TestMemPoolEntryHelper entry;
|
||||
|
@ -122,42 +125,45 @@ void MinerTestingSetup::TestPackageSelection(const CScript& scriptPubKey, const
|
|||
tx.vout[0].nValue = 5000000000LL - 1000;
|
||||
// This tx has a low fee: 1000 satoshis
|
||||
Txid hashParentTx = tx.GetHash(); // save this txid for later use
|
||||
const auto lowFeeTx{entry.Fee(1000).Time(Now<NodeSeconds>()).SpendsCoinbase(true).FromTx(tx)};
|
||||
AddToMempool(tx_mempool, lowFeeTx);
|
||||
const auto parent_tx{entry.Fee(1000).Time(Now<NodeSeconds>()).SpendsCoinbase(true).FromTx(tx)};
|
||||
AddToMempool(tx_mempool, parent_tx);
|
||||
|
||||
// This tx has a medium fee: 10000 satoshis
|
||||
tx.vin[0].prevout.hash = txFirst[1]->GetHash();
|
||||
tx.vout[0].nValue = 5000000000LL - 10000;
|
||||
Txid hashMediumFeeTx = tx.GetHash();
|
||||
const auto mediumFeeTx{entry.Fee(10000).Time(Now<NodeSeconds>()).SpendsCoinbase(true).FromTx(tx)};
|
||||
AddToMempool(tx_mempool, mediumFeeTx);
|
||||
const auto medium_fee_tx{entry.Fee(10000).Time(Now<NodeSeconds>()).SpendsCoinbase(true).FromTx(tx)};
|
||||
AddToMempool(tx_mempool, medium_fee_tx);
|
||||
|
||||
// This tx has a high fee, but depends on the first transaction
|
||||
tx.vin[0].prevout.hash = hashParentTx;
|
||||
tx.vout[0].nValue = 5000000000LL - 1000 - 50000; // 50k satoshi fee
|
||||
Txid hashHighFeeTx = tx.GetHash();
|
||||
const auto highFeeChildTx{entry.Fee(50000).Time(Now<NodeSeconds>()).SpendsCoinbase(false).FromTx(tx)};
|
||||
AddToMempool(tx_mempool, highFeeChildTx);
|
||||
const auto high_fee_tx{entry.Fee(50000).Time(Now<NodeSeconds>()).SpendsCoinbase(false).FromTx(tx)};
|
||||
AddToMempool(tx_mempool, high_fee_tx);
|
||||
|
||||
BlockAssembler::Options options;
|
||||
options.coinbase_output_script = scriptPubKey;
|
||||
auto assembler{AssemblerForTest(tx_mempool, options)};
|
||||
auto pblocktemplate{assembler.CreateNewBlock()};
|
||||
const auto blockFeerateHistogram{pblocktemplate->vFeerateHistogram};
|
||||
BOOST_REQUIRE_EQUAL(pblocktemplate->block.vtx.size(), 4U);
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[1]->GetHash() == hashParentTx);
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[2]->GetHash() == hashHighFeeTx);
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[3]->GetHash() == hashMediumFeeTx);
|
||||
std::unique_ptr<BlockTemplate> block_template = mining->createNewBlock(options);
|
||||
BOOST_REQUIRE(block_template);
|
||||
CBlock block{block_template->getBlock()};
|
||||
BOOST_REQUIRE_EQUAL(block.vtx.size(), 4U);
|
||||
BOOST_CHECK(block.vtx[1]->GetHash() == hashParentTx);
|
||||
BOOST_CHECK(block.vtx[2]->GetHash() == hashHighFeeTx);
|
||||
BOOST_CHECK(block.vtx[3]->GetHash() == hashMediumFeeTx);
|
||||
|
||||
BOOST_CHECK(blockFeerateHistogram.size() == 2);
|
||||
// lowFeeTx and highFeeChildTx are added to the block as a package.
|
||||
const auto packageFee{lowFeeTx.GetFee() + highFeeChildTx.GetFee()};
|
||||
const auto packageSize{lowFeeTx.GetTxSize() + highFeeChildTx.GetTxSize()};
|
||||
FeeFrac packageFeeFrac{packageFee, packageSize};
|
||||
BOOST_CHECK(blockFeerateHistogram[0] == packageFeeFrac);
|
||||
// Test the inclusion of package feerates in the block template and ensure they are sequential.
|
||||
const auto block_package_feerates = BlockAssembler{m_node.chainman->ActiveChainstate(), &tx_mempool, options}.CreateNewBlock()->m_package_feerates;
|
||||
BOOST_CHECK(block_package_feerates.size() == 2);
|
||||
|
||||
FeeFrac mediumTxFeeFrac{mediumFeeTx.GetFee(), mediumFeeTx.GetTxSize()};
|
||||
BOOST_CHECK(blockFeerateHistogram[1] == mediumTxFeeFrac);
|
||||
// parent_tx and high_fee_tx are added to the block as a package.
|
||||
const auto combined_txs_fee = parent_tx.GetFee() + high_fee_tx.GetFee();
|
||||
const auto combined_txs_size = parent_tx.GetTxSize() + high_fee_tx.GetTxSize();
|
||||
FeeFrac package_feefrac{combined_txs_fee, combined_txs_size};
|
||||
// The package should be added first.
|
||||
BOOST_CHECK(block_package_feerates[0] == package_feefrac);
|
||||
|
||||
// The medium_fee_tx should be added next.
|
||||
FeeFrac medium_tx_feefrac{medium_fee_tx.GetFee(), medium_fee_tx.GetTxSize()};
|
||||
BOOST_CHECK(block_package_feerates[1] == medium_tx_feefrac);
|
||||
|
||||
// Test that a package below the block min tx fee doesn't get included
|
||||
tx.vin[0].prevout.hash = hashHighFeeTx;
|
||||
|
@ -174,11 +180,13 @@ void MinerTestingSetup::TestPackageSelection(const CScript& scriptPubKey, const
|
|||
tx.vout[0].nValue = 5000000000LL - 1000 - 50000 - feeToUse;
|
||||
Txid hashLowFeeTx = tx.GetHash();
|
||||
AddToMempool(tx_mempool, entry.Fee(feeToUse).FromTx(tx));
|
||||
pblocktemplate = AssemblerForTest(tx_mempool, options).CreateNewBlock();
|
||||
block_template = mining->createNewBlock(options);
|
||||
BOOST_REQUIRE(block_template);
|
||||
block = block_template->getBlock();
|
||||
// Verify that the free tx and the low fee tx didn't get selected
|
||||
for (size_t i=0; i<pblocktemplate->block.vtx.size(); ++i) {
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashFreeTx);
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashLowFeeTx);
|
||||
for (size_t i=0; i<block.vtx.size(); ++i) {
|
||||
BOOST_CHECK(block.vtx[i]->GetHash() != hashFreeTx);
|
||||
BOOST_CHECK(block.vtx[i]->GetHash() != hashLowFeeTx);
|
||||
}
|
||||
|
||||
// Test that packages above the min relay fee do get included, even if one
|
||||
|
@ -188,10 +196,12 @@ void MinerTestingSetup::TestPackageSelection(const CScript& scriptPubKey, const
|
|||
tx.vout[0].nValue -= 2; // Now we should be just over the min relay fee
|
||||
hashLowFeeTx = tx.GetHash();
|
||||
AddToMempool(tx_mempool, entry.Fee(feeToUse + 2).FromTx(tx));
|
||||
pblocktemplate = AssemblerForTest(tx_mempool, options).CreateNewBlock();
|
||||
BOOST_REQUIRE_EQUAL(pblocktemplate->block.vtx.size(), 6U);
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[4]->GetHash() == hashFreeTx);
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[5]->GetHash() == hashLowFeeTx);
|
||||
block_template = mining->createNewBlock(options);
|
||||
BOOST_REQUIRE(block_template);
|
||||
block = block_template->getBlock();
|
||||
BOOST_REQUIRE_EQUAL(block.vtx.size(), 6U);
|
||||
BOOST_CHECK(block.vtx[4]->GetHash() == hashFreeTx);
|
||||
BOOST_CHECK(block.vtx[5]->GetHash() == hashLowFeeTx);
|
||||
|
||||
// Test that transaction selection properly updates ancestor fee
|
||||
// calculations as ancestor transactions get included in a block.
|
||||
|
@ -210,12 +220,14 @@ void MinerTestingSetup::TestPackageSelection(const CScript& scriptPubKey, const
|
|||
tx.vout[0].nValue = 5000000000LL - 100000000 - feeToUse;
|
||||
Txid hashLowFeeTx2 = tx.GetHash();
|
||||
AddToMempool(tx_mempool, entry.Fee(feeToUse).SpendsCoinbase(false).FromTx(tx));
|
||||
pblocktemplate = AssemblerForTest(tx_mempool, options).CreateNewBlock();
|
||||
block_template = mining->createNewBlock(options);
|
||||
BOOST_REQUIRE(block_template);
|
||||
block = block_template->getBlock();
|
||||
|
||||
// Verify that this tx isn't selected.
|
||||
for (size_t i=0; i<pblocktemplate->block.vtx.size(); ++i) {
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashFreeTx2);
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashLowFeeTx2);
|
||||
for (size_t i=0; i<block.vtx.size(); ++i) {
|
||||
BOOST_CHECK(block.vtx[i]->GetHash() != hashFreeTx2);
|
||||
BOOST_CHECK(block.vtx[i]->GetHash() != hashLowFeeTx2);
|
||||
}
|
||||
|
||||
// This tx will be mineable, and should cause hashLowFeeTx2 to be selected
|
||||
|
@ -223,9 +235,11 @@ void MinerTestingSetup::TestPackageSelection(const CScript& scriptPubKey, const
|
|||
tx.vin[0].prevout.n = 1;
|
||||
tx.vout[0].nValue = 100000000 - 10000; // 10k satoshi fee
|
||||
AddToMempool(tx_mempool, entry.Fee(10000).FromTx(tx));
|
||||
pblocktemplate = AssemblerForTest(tx_mempool, options).CreateNewBlock();
|
||||
BOOST_REQUIRE_EQUAL(pblocktemplate->block.vtx.size(), 9U);
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[8]->GetHash() == hashLowFeeTx2);
|
||||
block_template = mining->createNewBlock(options);
|
||||
BOOST_REQUIRE(block_template);
|
||||
block = block_template->getBlock();
|
||||
BOOST_REQUIRE_EQUAL(block.vtx.size(), 9U);
|
||||
BOOST_CHECK(block.vtx[8]->GetHash() == hashLowFeeTx2);
|
||||
}
|
||||
|
||||
void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::vector<CTransactionRef>& txFirst, int baseheight)
|
||||
|
@ -241,6 +255,9 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
const CAmount HIGHFEE = COIN;
|
||||
const CAmount HIGHERFEE = 4 * COIN;
|
||||
|
||||
auto mining{MakeMining()};
|
||||
BOOST_REQUIRE(mining);
|
||||
|
||||
BlockAssembler::Options options;
|
||||
options.coinbase_output_script = scriptPubKey;
|
||||
|
||||
|
@ -249,8 +266,9 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
LOCK(tx_mempool.cs);
|
||||
|
||||
// Just to make sure we can still make simple blocks
|
||||
auto pblocktemplate = AssemblerForTest(tx_mempool, options).CreateNewBlock();
|
||||
BOOST_CHECK(pblocktemplate);
|
||||
auto block_template{mining->createNewBlock(options)};
|
||||
BOOST_REQUIRE(block_template);
|
||||
CBlock block{block_template->getBlock()};
|
||||
|
||||
// block sigops > limit: 1000 CHECKMULTISIG + 1
|
||||
tx.vin.resize(1);
|
||||
|
@ -269,7 +287,7 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
tx.vin[0].prevout.hash = hash;
|
||||
}
|
||||
|
||||
BOOST_CHECK_EXCEPTION(AssemblerForTest(tx_mempool, options).CreateNewBlock(), std::runtime_error, HasReason("bad-blk-sigops"));
|
||||
BOOST_CHECK_EXCEPTION(mining->createNewBlock(options), std::runtime_error, HasReason("bad-blk-sigops"));
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -286,7 +304,7 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
AddToMempool(tx_mempool, entry.Fee(LOWFEE).Time(Now<NodeSeconds>()).SpendsCoinbase(spendsCoinbase).SigOpsCost(80).FromTx(tx));
|
||||
tx.vin[0].prevout.hash = hash;
|
||||
}
|
||||
BOOST_CHECK(AssemblerForTest(tx_mempool, options).CreateNewBlock());
|
||||
BOOST_REQUIRE(mining->createNewBlock(options));
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -310,7 +328,7 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
AddToMempool(tx_mempool, entry.Fee(LOWFEE).Time(Now<NodeSeconds>()).SpendsCoinbase(spendsCoinbase).FromTx(tx));
|
||||
tx.vin[0].prevout.hash = hash;
|
||||
}
|
||||
BOOST_CHECK(AssemblerForTest(tx_mempool, options).CreateNewBlock());
|
||||
BOOST_REQUIRE(mining->createNewBlock(options));
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -320,7 +338,7 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
// orphan in tx_mempool, template creation fails
|
||||
hash = tx.GetHash();
|
||||
AddToMempool(tx_mempool, entry.Fee(LOWFEE).Time(Now<NodeSeconds>()).FromTx(tx));
|
||||
BOOST_CHECK_EXCEPTION(AssemblerForTest(tx_mempool, options).CreateNewBlock(), std::runtime_error, HasReason("bad-txns-inputs-missingorspent"));
|
||||
BOOST_CHECK_EXCEPTION(mining->createNewBlock(options), std::runtime_error, HasReason("bad-txns-inputs-missingorspent"));
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -341,7 +359,7 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
tx.vout[0].nValue = tx.vout[0].nValue + BLOCKSUBSIDY - HIGHERFEE; // First txn output + fresh coinbase - new txn fee
|
||||
hash = tx.GetHash();
|
||||
AddToMempool(tx_mempool, entry.Fee(HIGHERFEE).Time(Now<NodeSeconds>()).SpendsCoinbase(true).FromTx(tx));
|
||||
BOOST_CHECK(AssemblerForTest(tx_mempool, options).CreateNewBlock());
|
||||
BOOST_REQUIRE(mining->createNewBlock(options));
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -357,7 +375,7 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
// give it a fee so it'll get mined
|
||||
AddToMempool(tx_mempool, entry.Fee(LOWFEE).Time(Now<NodeSeconds>()).SpendsCoinbase(false).FromTx(tx));
|
||||
// Should throw bad-cb-multiple
|
||||
BOOST_CHECK_EXCEPTION(AssemblerForTest(tx_mempool, options).CreateNewBlock(), std::runtime_error, HasReason("bad-cb-multiple"));
|
||||
BOOST_CHECK_EXCEPTION(mining->createNewBlock(options), std::runtime_error, HasReason("bad-cb-multiple"));
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -374,7 +392,7 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
tx.vout[0].scriptPubKey = CScript() << OP_2;
|
||||
hash = tx.GetHash();
|
||||
AddToMempool(tx_mempool, entry.Fee(HIGHFEE).Time(Now<NodeSeconds>()).SpendsCoinbase(true).FromTx(tx));
|
||||
BOOST_CHECK_EXCEPTION(AssemblerForTest(tx_mempool, options).CreateNewBlock(), std::runtime_error, HasReason("bad-txns-inputs-missingorspent"));
|
||||
BOOST_CHECK_EXCEPTION(mining->createNewBlock(options), std::runtime_error, HasReason("bad-txns-inputs-missingorspent"));
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -394,7 +412,7 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
next->BuildSkip();
|
||||
m_node.chainman->ActiveChain().SetTip(*next);
|
||||
}
|
||||
BOOST_CHECK(AssemblerForTest(tx_mempool, options).CreateNewBlock());
|
||||
BOOST_REQUIRE(mining->createNewBlock(options));
|
||||
// Extend to a 210000-long block chain.
|
||||
while (m_node.chainman->ActiveChain().Tip()->nHeight < 210000) {
|
||||
CBlockIndex* prev = m_node.chainman->ActiveChain().Tip();
|
||||
|
@ -406,7 +424,7 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
next->BuildSkip();
|
||||
m_node.chainman->ActiveChain().SetTip(*next);
|
||||
}
|
||||
BOOST_CHECK(AssemblerForTest(tx_mempool, options).CreateNewBlock());
|
||||
BOOST_REQUIRE(mining->createNewBlock(options));
|
||||
|
||||
// invalid p2sh txn in tx_mempool, template creation fails
|
||||
tx.vin[0].prevout.hash = txFirst[0]->GetHash();
|
||||
|
@ -422,7 +440,7 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
tx.vout[0].nValue -= LOWFEE;
|
||||
hash = tx.GetHash();
|
||||
AddToMempool(tx_mempool, entry.Fee(LOWFEE).Time(Now<NodeSeconds>()).SpendsCoinbase(false).FromTx(tx));
|
||||
BOOST_CHECK_EXCEPTION(AssemblerForTest(tx_mempool, options).CreateNewBlock(), std::runtime_error, HasReason("mandatory-script-verify-flag-failed"));
|
||||
BOOST_CHECK_EXCEPTION(mining->createNewBlock(options), std::runtime_error, HasReason("mandatory-script-verify-flag-failed"));
|
||||
|
||||
// Delete the dummy blocks again.
|
||||
while (m_node.chainman->ActiveChain().Tip()->nHeight > nHeight) {
|
||||
|
@ -524,14 +542,15 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG | 1;
|
||||
BOOST_CHECK(!TestSequenceLocks(CTransaction{tx}, tx_mempool)); // Sequence locks fail
|
||||
|
||||
auto pblocktemplate = AssemblerForTest(tx_mempool, options).CreateNewBlock();
|
||||
BOOST_CHECK(pblocktemplate);
|
||||
auto block_template = mining->createNewBlock(options);
|
||||
BOOST_REQUIRE(block_template);
|
||||
|
||||
// None of the of the absolute height/time locked tx should have made
|
||||
// it into the template because we still check IsFinalTx in CreateNewBlock,
|
||||
// but relative locked txs will if inconsistently added to mempool.
|
||||
// For now these will still generate a valid template until BIP68 soft fork
|
||||
BOOST_CHECK_EQUAL(pblocktemplate->block.vtx.size(), 3U);
|
||||
CBlock block{block_template->getBlock()};
|
||||
BOOST_CHECK_EQUAL(block.vtx.size(), 3U);
|
||||
// However if we advance height by 1 and time by SEQUENCE_LOCK_TIME, all of them should be mined
|
||||
for (int i = 0; i < CBlockIndex::nMedianTimeSpan; ++i) {
|
||||
CBlockIndex* ancestor{Assert(m_node.chainman->ActiveChain().Tip()->GetAncestor(m_node.chainman->ActiveChain().Tip()->nHeight - i))};
|
||||
|
@ -540,12 +559,17 @@ void MinerTestingSetup::TestBasicMining(const CScript& scriptPubKey, const std::
|
|||
m_node.chainman->ActiveChain().Tip()->nHeight++;
|
||||
SetMockTime(m_node.chainman->ActiveChain().Tip()->GetMedianTimePast() + 1);
|
||||
|
||||
BOOST_CHECK(pblocktemplate = AssemblerForTest(tx_mempool, options).CreateNewBlock());
|
||||
BOOST_CHECK_EQUAL(pblocktemplate->block.vtx.size(), 5U);
|
||||
block_template = mining->createNewBlock(options);
|
||||
BOOST_REQUIRE(block_template);
|
||||
block = block_template->getBlock();
|
||||
BOOST_CHECK_EQUAL(block.vtx.size(), 5U);
|
||||
}
|
||||
|
||||
void MinerTestingSetup::TestPrioritisedMining(const CScript& scriptPubKey, const std::vector<CTransactionRef>& txFirst)
|
||||
{
|
||||
auto mining{MakeMining()};
|
||||
BOOST_REQUIRE(mining);
|
||||
|
||||
BlockAssembler::Options options;
|
||||
options.coinbase_output_script = scriptPubKey;
|
||||
|
||||
|
@ -610,34 +634,34 @@ void MinerTestingSetup::TestPrioritisedMining(const CScript& scriptPubKey, const
|
|||
Txid hashFreeGrandchild = tx.GetHash();
|
||||
AddToMempool(tx_mempool, entry.Fee(0).SpendsCoinbase(false).FromTx(tx));
|
||||
|
||||
auto pblocktemplate = AssemblerForTest(tx_mempool, options).CreateNewBlock();
|
||||
BOOST_REQUIRE_EQUAL(pblocktemplate->block.vtx.size(), 6U);
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[1]->GetHash() == hashFreeParent);
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[2]->GetHash() == hashFreePrioritisedTx);
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[3]->GetHash() == hashParentTx);
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[4]->GetHash() == hashPrioritsedChild);
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[5]->GetHash() == hashFreeChild);
|
||||
for (size_t i=0; i<pblocktemplate->block.vtx.size(); ++i) {
|
||||
auto block_template = mining->createNewBlock(options);
|
||||
BOOST_REQUIRE(block_template);
|
||||
CBlock block{block_template->getBlock()};
|
||||
BOOST_REQUIRE_EQUAL(block.vtx.size(), 6U);
|
||||
BOOST_CHECK(block.vtx[1]->GetHash() == hashFreeParent);
|
||||
BOOST_CHECK(block.vtx[2]->GetHash() == hashFreePrioritisedTx);
|
||||
BOOST_CHECK(block.vtx[3]->GetHash() == hashParentTx);
|
||||
BOOST_CHECK(block.vtx[4]->GetHash() == hashPrioritsedChild);
|
||||
BOOST_CHECK(block.vtx[5]->GetHash() == hashFreeChild);
|
||||
for (size_t i=0; i<block.vtx.size(); ++i) {
|
||||
// The FreeParent and FreeChild's prioritisations should not impact the child.
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashFreeGrandchild);
|
||||
BOOST_CHECK(block.vtx[i]->GetHash() != hashFreeGrandchild);
|
||||
// De-prioritised transaction should not be included.
|
||||
BOOST_CHECK(pblocktemplate->block.vtx[i]->GetHash() != hashMediumFeeTx);
|
||||
BOOST_CHECK(block.vtx[i]->GetHash() != hashMediumFeeTx);
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: These tests rely on CreateNewBlock doing its own self-validation!
|
||||
BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
|
||||
{
|
||||
auto mining{MakeMining()};
|
||||
BOOST_REQUIRE(mining);
|
||||
|
||||
// Note that by default, these tests run with size accounting enabled.
|
||||
CScript scriptPubKey = CScript() << "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f"_hex << OP_CHECKSIG;
|
||||
std::unique_ptr<CBlockTemplate> pblocktemplate;
|
||||
|
||||
BlockAssembler::Options options;
|
||||
options.coinbase_output_script = scriptPubKey;
|
||||
|
||||
CTxMemPool& tx_mempool{*m_node.mempool};
|
||||
// Simple block creation, nothing special yet:
|
||||
BOOST_CHECK(pblocktemplate = AssemblerForTest(tx_mempool, options).CreateNewBlock());
|
||||
std::unique_ptr<BlockTemplate> block_template;
|
||||
|
||||
// We can't make transactions until we have inputs
|
||||
// Therefore, load 110 blocks :)
|
||||
|
@ -645,27 +669,48 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
|
|||
int baseheight = 0;
|
||||
std::vector<CTransactionRef> txFirst;
|
||||
for (const auto& bi : BLOCKINFO) {
|
||||
CBlock *pblock = &pblocktemplate->block; // pointer for convenience
|
||||
const int current_height{mining->getTip()->height};
|
||||
|
||||
// Simple block creation, nothing special yet:
|
||||
block_template = mining->createNewBlock(options);
|
||||
BOOST_REQUIRE(block_template);
|
||||
|
||||
CBlock block{block_template->getBlock()};
|
||||
CMutableTransaction txCoinbase(*block.vtx[0]);
|
||||
{
|
||||
LOCK(cs_main);
|
||||
pblock->nVersion = VERSIONBITS_TOP_BITS;
|
||||
pblock->nTime = m_node.chainman->ActiveChain().Tip()->GetMedianTimePast()+1;
|
||||
CMutableTransaction txCoinbase(*pblock->vtx[0]);
|
||||
block.nVersion = VERSIONBITS_TOP_BITS;
|
||||
block.nTime = Assert(m_node.chainman)->ActiveChain().Tip()->GetMedianTimePast()+1;
|
||||
txCoinbase.version = 1;
|
||||
txCoinbase.vin[0].scriptSig = CScript{} << (m_node.chainman->ActiveChain().Height() + 1) << bi.extranonce;
|
||||
txCoinbase.vin[0].scriptSig = CScript{} << (current_height + 1) << bi.extranonce;
|
||||
txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this)
|
||||
txCoinbase.vout[0].scriptPubKey = CScript();
|
||||
pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
|
||||
block.vtx[0] = MakeTransactionRef(txCoinbase);
|
||||
if (txFirst.size() == 0)
|
||||
baseheight = m_node.chainman->ActiveChain().Height();
|
||||
baseheight = current_height;
|
||||
if (txFirst.size() < 4)
|
||||
txFirst.push_back(pblock->vtx[0]);
|
||||
pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
|
||||
pblock->nNonce = bi.nonce;
|
||||
txFirst.push_back(block.vtx[0]);
|
||||
block.hashMerkleRoot = BlockMerkleRoot(block);
|
||||
block.nNonce = bi.nonce;
|
||||
}
|
||||
std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock);
|
||||
BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlock(shared_pblock, true, true, nullptr));
|
||||
pblock->hashPrevBlock = pblock->GetHash();
|
||||
std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(block);
|
||||
// Alternate calls between Chainman's ProcessNewBlock and submitSolution
|
||||
// via the Mining interface. The former is used by net_processing as well
|
||||
// as the submitblock RPC.
|
||||
if (current_height % 2 == 0) {
|
||||
BOOST_REQUIRE(Assert(m_node.chainman)->ProcessNewBlock(shared_pblock, /*force_processing=*/true, /*min_pow_checked=*/true, nullptr));
|
||||
} else {
|
||||
BOOST_REQUIRE(block_template->submitSolution(block.nVersion, block.nTime, block.nNonce, MakeTransactionRef(txCoinbase)));
|
||||
}
|
||||
{
|
||||
LOCK(cs_main);
|
||||
// The above calls don't guarantee the tip is actually updated, so
|
||||
// we explictly check this.
|
||||
auto maybe_new_tip{Assert(m_node.chainman)->ActiveChain().Tip()};
|
||||
BOOST_REQUIRE_EQUAL(maybe_new_tip->GetBlockHash(), block.GetHash());
|
||||
}
|
||||
// This just adds coverage
|
||||
mining->waitTipChanged(block.hashPrevBlock);
|
||||
}
|
||||
|
||||
LOCK(cs_main);
|
||||
|
|
|
@ -1700,9 +1700,8 @@ BOOST_AUTO_TEST_CASE(bip341_keypath_test_vectors)
|
|||
BOOST_CHECK_EQUAL(HexStr(sighash), input["intermediary"]["sigHash"].get_str());
|
||||
|
||||
// To verify the sigmsg, hash the expected sigmsg, and compare it with the (expected) sighash.
|
||||
BOOST_CHECK_EQUAL(HexStr((HashWriter{HASHER_TAPSIGHASH} << Span{ParseHex(input["intermediary"]["sigMsg"].get_str())}).GetSHA256()), input["intermediary"]["sigHash"].get_str());
|
||||
BOOST_CHECK_EQUAL(HexStr((HashWriter{HASHER_TAPSIGHASH} << std::span<const uint8_t>{ParseHex(input["intermediary"]["sigMsg"].get_str())}).GetSHA256()), input["intermediary"]["sigHash"].get_str());
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -304,7 +304,7 @@ public:
|
|||
if (s.template GetParams<BaseFormat>().m_base_format == BaseFormat::RAW) {
|
||||
s << m_base_data;
|
||||
} else {
|
||||
s << Span{HexStr(Span{&m_base_data, 1})};
|
||||
s << std::span<const char>{HexStr(Span{&m_base_data, 1})};
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ COutPoint generatetoaddress(const NodeContext& node, const std::string& address)
|
|||
const auto dest = DecodeDestination(address);
|
||||
assert(IsValidDestination(dest));
|
||||
BlockAssembler::Options assembler_options;
|
||||
assembler_options.coinbase_output_script = {GetScriptForDestination(dest)};
|
||||
assembler_options.coinbase_output_script = GetScriptForDestination(dest);
|
||||
|
||||
return MineBlock(node, assembler_options);
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <netaddress.h>
|
||||
#include <node/connection_types.h>
|
||||
#include <node/eviction.h>
|
||||
#include <span.h>
|
||||
#include <sync.h>
|
||||
#include <util/sock.h>
|
||||
|
||||
|
@ -28,9 +29,6 @@
|
|||
|
||||
class FastRandomContext;
|
||||
|
||||
template <typename C>
|
||||
class Span;
|
||||
|
||||
struct ConnmanTestMsg : public CConnman {
|
||||
using CConnman::CConnman;
|
||||
|
||||
|
|
|
@ -72,7 +72,8 @@ BOOST_FIXTURE_TEST_CASE(chainstate_update_tip, TestChain100Setup)
|
|||
ChainstateManager& chainman = *Assert(m_node.chainman);
|
||||
const auto get_notify_tip{[&]() {
|
||||
LOCK(m_node.notifications->m_tip_block_mutex);
|
||||
return m_node.notifications->m_tip_block;
|
||||
BOOST_REQUIRE(m_node.notifications->TipBlock());
|
||||
return *m_node.notifications->TipBlock();
|
||||
}};
|
||||
uint256 curr_tip = get_notify_tip();
|
||||
|
||||
|
|
|
@ -442,7 +442,7 @@ void CTxMemPool::Apply(ChangeSet* changeset)
|
|||
std::optional<CTxMemPool::setEntries> ancestors;
|
||||
if (i == 0) {
|
||||
// Note: ChangeSet::CalculateMemPoolAncestors() will return a
|
||||
// cached value if mempool ancestors for this tranaction were
|
||||
// cached value if mempool ancestors for this transaction were
|
||||
// previously calculated.
|
||||
// We can only use a cached ancestor calculation for the first
|
||||
// transaction in a package, because in-package parents won't be
|
||||
|
|
|
@ -810,7 +810,7 @@ public:
|
|||
* mempool.
|
||||
*
|
||||
* CalculateMemPoolAncestors() calculates the in-mempool (not including
|
||||
* what is in the change set itself) ancestors of a given transacion.
|
||||
* what is in the change set itself) ancestors of a given transaction.
|
||||
*
|
||||
* Apply() will apply the removals and additions that are staged into the
|
||||
* mempool.
|
||||
|
|
|
@ -69,16 +69,27 @@ public:
|
|||
|
||||
/** @name Hex representation
|
||||
*
|
||||
* The reverse-byte hex representation is a convenient way to view the blob
|
||||
* as a number, because it is consistent with the way the base_uint class
|
||||
* converts blobs to numbers.
|
||||
* The hex representation used by GetHex(), ToString(), FromHex() and
|
||||
* SetHexDeprecated() is unusual, since it shows bytes of the base_blob in
|
||||
* reverse order. For example, a 4-byte blob {0x12, 0x34, 0x56, 0x78} is
|
||||
* represented as "78563412" instead of the more typical "12345678"
|
||||
* representation that would be shown in a hex editor or used by typical
|
||||
* byte-array / hex conversion functions like python's bytes.hex() and
|
||||
* bytes.fromhex().
|
||||
*
|
||||
* The nice thing about the reverse-byte representation, even though it is
|
||||
* unusual, is that if a blob contains an arithmetic number in little endian
|
||||
* format (with least significant bytes first, and most significant bytes
|
||||
* last), the GetHex() output will match the way the number would normally
|
||||
* be written in base-16 (with most significant digits first and least
|
||||
* significant digits last).
|
||||
*
|
||||
* This means, for example, that ArithToUint256(num).GetHex() can be used to
|
||||
* display an arith_uint256 num value as a number, because
|
||||
* ArithToUint256() converts the number to a blob in little-endian format,
|
||||
* so the arith_uint256 class doesn't need to have its own number parsing
|
||||
* and formatting functions.
|
||||
*
|
||||
* @note base_uint treats the blob as an array of bytes with the numerically
|
||||
* least significant byte first and the most significant byte last. Because
|
||||
* numbers are typically written with the most significant digit first and
|
||||
* the least significant digit last, the reverse hex display of the blob
|
||||
* corresponds to the same numeric value that base_uint interprets from the
|
||||
* blob.
|
||||
* @{*/
|
||||
std::string GetHex() const;
|
||||
/** Unlike FromHex this accepts any invalid input, thus it is fragile and deprecated!
|
||||
|
|
|
@ -15,10 +15,119 @@ target_include_directories(univalue
|
|||
target_link_libraries(univalue PRIVATE core_interface)
|
||||
|
||||
if(BUILD_TESTS)
|
||||
add_executable(unitester test/unitester.cpp)
|
||||
target_compile_definitions(unitester
|
||||
PRIVATE
|
||||
JSON_TEST_SRC=\"${CMAKE_CURRENT_SOURCE_DIR}/test\"
|
||||
include(GenerateHeaders)
|
||||
generate_header_from_json(test/fail1.json)
|
||||
generate_header_from_json(test/fail10.json)
|
||||
generate_header_from_json(test/fail11.json)
|
||||
generate_header_from_json(test/fail12.json)
|
||||
generate_header_from_json(test/fail13.json)
|
||||
generate_header_from_json(test/fail14.json)
|
||||
generate_header_from_json(test/fail15.json)
|
||||
generate_header_from_json(test/fail16.json)
|
||||
generate_header_from_json(test/fail17.json)
|
||||
generate_header_from_json(test/fail18.json)
|
||||
generate_header_from_json(test/fail19.json)
|
||||
generate_header_from_json(test/fail2.json)
|
||||
generate_header_from_json(test/fail20.json)
|
||||
generate_header_from_json(test/fail21.json)
|
||||
generate_header_from_json(test/fail22.json)
|
||||
generate_header_from_json(test/fail23.json)
|
||||
generate_header_from_json(test/fail24.json)
|
||||
generate_header_from_json(test/fail25.json)
|
||||
generate_header_from_json(test/fail26.json)
|
||||
generate_header_from_json(test/fail27.json)
|
||||
generate_header_from_json(test/fail28.json)
|
||||
generate_header_from_json(test/fail29.json)
|
||||
generate_header_from_json(test/fail3.json)
|
||||
generate_header_from_json(test/fail30.json)
|
||||
generate_header_from_json(test/fail31.json)
|
||||
generate_header_from_json(test/fail32.json)
|
||||
generate_header_from_json(test/fail33.json)
|
||||
generate_header_from_json(test/fail34.json)
|
||||
generate_header_from_json(test/fail35.json)
|
||||
generate_header_from_json(test/fail36.json)
|
||||
generate_header_from_json(test/fail37.json)
|
||||
generate_header_from_json(test/fail38.json)
|
||||
generate_header_from_json(test/fail39.json)
|
||||
generate_header_from_json(test/fail4.json)
|
||||
generate_header_from_json(test/fail40.json)
|
||||
generate_header_from_json(test/fail41.json)
|
||||
generate_header_from_json(test/fail42.json)
|
||||
generate_header_from_json(test/fail44.json)
|
||||
generate_header_from_json(test/fail45.json)
|
||||
generate_header_from_json(test/fail5.json)
|
||||
generate_header_from_json(test/fail6.json)
|
||||
generate_header_from_json(test/fail7.json)
|
||||
generate_header_from_json(test/fail8.json)
|
||||
generate_header_from_json(test/fail9.json)
|
||||
generate_header_from_json(test/pass1.json)
|
||||
generate_header_from_json(test/pass2.json)
|
||||
generate_header_from_json(test/pass3.json)
|
||||
generate_header_from_json(test/pass4.json)
|
||||
generate_header_from_json(test/round1.json)
|
||||
generate_header_from_json(test/round2.json)
|
||||
generate_header_from_json(test/round3.json)
|
||||
generate_header_from_json(test/round4.json)
|
||||
generate_header_from_json(test/round5.json)
|
||||
generate_header_from_json(test/round6.json)
|
||||
generate_header_from_json(test/round7.json)
|
||||
add_executable(unitester
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail1.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail10.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail11.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail12.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail13.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail14.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail15.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail16.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail17.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail18.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail19.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail2.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail20.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail21.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail22.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail23.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail24.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail25.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail26.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail27.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail28.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail29.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail3.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail30.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail31.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail32.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail33.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail34.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail35.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail36.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail37.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail38.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail39.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail4.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail40.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail41.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail42.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail44.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail45.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail5.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail6.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail7.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail8.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/fail9.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/pass1.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/pass2.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/pass3.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/pass4.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/round1.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/round2.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/round3.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/round4.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/round5.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/round6.json.h
|
||||
${CMAKE_CURRENT_BINARY_DIR}/test/round7.json.h
|
||||
test/unitester.cpp
|
||||
)
|
||||
target_link_libraries(unitester
|
||||
PRIVATE
|
||||
|
|
|
@ -1 +1 @@
|
|||
[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]
|
||||
[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
|
||||
|
|
|
@ -1 +1 @@
|
|||
[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]]
|
||||
[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
|
||||
|
|
|
@ -1,130 +1,159 @@
|
|||
// Copyright 2014 BitPay Inc.
|
||||
// Distributed under the MIT/X11 software license, see the accompanying
|
||||
// Copyright (c) 2015-present The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or https://opensource.org/licenses/mit-license.php.
|
||||
|
||||
#include <univalue.h>
|
||||
|
||||
#include <univalue/test/fail1.json.h>
|
||||
#include <univalue/test/fail10.json.h>
|
||||
#include <univalue/test/fail11.json.h>
|
||||
#include <univalue/test/fail12.json.h>
|
||||
#include <univalue/test/fail13.json.h>
|
||||
#include <univalue/test/fail14.json.h>
|
||||
#include <univalue/test/fail15.json.h>
|
||||
#include <univalue/test/fail16.json.h>
|
||||
#include <univalue/test/fail17.json.h>
|
||||
#include <univalue/test/fail18.json.h>
|
||||
#include <univalue/test/fail19.json.h>
|
||||
#include <univalue/test/fail2.json.h>
|
||||
#include <univalue/test/fail20.json.h>
|
||||
#include <univalue/test/fail21.json.h>
|
||||
#include <univalue/test/fail22.json.h>
|
||||
#include <univalue/test/fail23.json.h>
|
||||
#include <univalue/test/fail24.json.h>
|
||||
#include <univalue/test/fail25.json.h>
|
||||
#include <univalue/test/fail26.json.h>
|
||||
#include <univalue/test/fail27.json.h>
|
||||
#include <univalue/test/fail28.json.h>
|
||||
#include <univalue/test/fail29.json.h>
|
||||
#include <univalue/test/fail3.json.h>
|
||||
#include <univalue/test/fail30.json.h>
|
||||
#include <univalue/test/fail31.json.h>
|
||||
#include <univalue/test/fail32.json.h>
|
||||
#include <univalue/test/fail33.json.h>
|
||||
#include <univalue/test/fail34.json.h>
|
||||
#include <univalue/test/fail35.json.h>
|
||||
#include <univalue/test/fail36.json.h>
|
||||
#include <univalue/test/fail37.json.h>
|
||||
#include <univalue/test/fail38.json.h>
|
||||
#include <univalue/test/fail39.json.h>
|
||||
#include <univalue/test/fail4.json.h>
|
||||
#include <univalue/test/fail40.json.h>
|
||||
#include <univalue/test/fail41.json.h>
|
||||
#include <univalue/test/fail42.json.h>
|
||||
#include <univalue/test/fail44.json.h>
|
||||
#include <univalue/test/fail45.json.h>
|
||||
#include <univalue/test/fail5.json.h>
|
||||
#include <univalue/test/fail6.json.h>
|
||||
#include <univalue/test/fail7.json.h>
|
||||
#include <univalue/test/fail8.json.h>
|
||||
#include <univalue/test/fail9.json.h>
|
||||
#include <univalue/test/pass1.json.h>
|
||||
#include <univalue/test/pass2.json.h>
|
||||
#include <univalue/test/pass3.json.h>
|
||||
#include <univalue/test/pass4.json.h>
|
||||
#include <univalue/test/round1.json.h>
|
||||
#include <univalue/test/round2.json.h>
|
||||
#include <univalue/test/round3.json.h>
|
||||
#include <univalue/test/round4.json.h>
|
||||
#include <univalue/test/round5.json.h>
|
||||
#include <univalue/test/round6.json.h>
|
||||
#include <univalue/test/round7.json.h>
|
||||
|
||||
#include <array>
|
||||
#include <cassert>
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
|
||||
#ifndef JSON_TEST_SRC
|
||||
#error JSON_TEST_SRC must point to test source directory
|
||||
#endif
|
||||
|
||||
std::string srcdir(JSON_TEST_SRC);
|
||||
|
||||
static std::string rtrim(std::string s)
|
||||
{
|
||||
s.erase(s.find_last_not_of(" \n\r\t")+1);
|
||||
s.erase(s.find_last_not_of(" \n\r\t") + 1);
|
||||
return s;
|
||||
}
|
||||
|
||||
static void runtest(std::string filename, const std::string& jdata)
|
||||
{
|
||||
std::string prefix = filename.substr(0, 4);
|
||||
std::string prefix = filename.substr(0, 4);
|
||||
|
||||
bool wantPass = (prefix == "pass") || (prefix == "roun");
|
||||
bool wantFail = (prefix == "fail");
|
||||
bool wantRoundTrip = (prefix == "roun");
|
||||
assert(wantPass || wantFail);
|
||||
bool wantPass = (prefix == "pass") || (prefix == "roun");
|
||||
bool wantFail = (prefix == "fail");
|
||||
bool wantRoundTrip = (prefix == "roun");
|
||||
assert(wantPass || wantFail);
|
||||
|
||||
UniValue val;
|
||||
bool testResult = val.read(jdata);
|
||||
UniValue val;
|
||||
bool testResult = val.read(jdata);
|
||||
|
||||
if (wantPass) {
|
||||
assert(testResult == true);
|
||||
} else {
|
||||
assert(testResult == false);
|
||||
}
|
||||
if (wantPass) {
|
||||
assert(testResult == true);
|
||||
} else {
|
||||
assert(testResult == false);
|
||||
}
|
||||
|
||||
if (wantRoundTrip) {
|
||||
std::string odata = val.write(0, 0);
|
||||
assert(odata == rtrim(jdata));
|
||||
}
|
||||
if (wantRoundTrip) {
|
||||
std::string odata = val.write(0, 0);
|
||||
assert(odata == rtrim(jdata));
|
||||
}
|
||||
}
|
||||
|
||||
static void runtest_file(const char *filename_)
|
||||
{
|
||||
std::string basename(filename_);
|
||||
std::string filename = srcdir + "/" + basename;
|
||||
FILE *f = fopen(filename.c_str(), "r");
|
||||
assert(f != nullptr);
|
||||
|
||||
std::string jdata;
|
||||
|
||||
char buf[4096];
|
||||
while (!feof(f)) {
|
||||
int bread = fread(buf, 1, sizeof(buf), f);
|
||||
assert(!ferror(f));
|
||||
|
||||
std::string s(buf, bread);
|
||||
jdata += s;
|
||||
}
|
||||
|
||||
assert(!ferror(f));
|
||||
fclose(f);
|
||||
|
||||
runtest(basename, jdata);
|
||||
}
|
||||
|
||||
static const char *filenames[] = {
|
||||
"fail10.json",
|
||||
"fail11.json",
|
||||
"fail12.json",
|
||||
"fail13.json",
|
||||
"fail14.json",
|
||||
"fail15.json",
|
||||
"fail16.json",
|
||||
"fail17.json",
|
||||
//"fail18.json", // investigate
|
||||
"fail19.json",
|
||||
"fail1.json",
|
||||
"fail20.json",
|
||||
"fail21.json",
|
||||
"fail22.json",
|
||||
"fail23.json",
|
||||
"fail24.json",
|
||||
"fail25.json",
|
||||
"fail26.json",
|
||||
"fail27.json",
|
||||
"fail28.json",
|
||||
"fail29.json",
|
||||
"fail2.json",
|
||||
"fail30.json",
|
||||
"fail31.json",
|
||||
"fail32.json",
|
||||
"fail33.json",
|
||||
"fail34.json",
|
||||
"fail35.json",
|
||||
"fail36.json",
|
||||
"fail37.json",
|
||||
"fail38.json", // invalid unicode: only first half of surrogate pair
|
||||
"fail39.json", // invalid unicode: only second half of surrogate pair
|
||||
"fail40.json", // invalid unicode: broken UTF-8
|
||||
"fail41.json", // invalid unicode: unfinished UTF-8
|
||||
"fail42.json", // valid json with garbage following a nul byte
|
||||
"fail44.json", // unterminated string
|
||||
"fail45.json", // nested beyond max depth
|
||||
"fail3.json",
|
||||
"fail4.json", // extra comma
|
||||
"fail5.json",
|
||||
"fail6.json",
|
||||
"fail7.json",
|
||||
"fail8.json",
|
||||
"fail9.json", // extra comma
|
||||
"pass1.json",
|
||||
"pass2.json",
|
||||
"pass3.json",
|
||||
"pass4.json",
|
||||
"round1.json", // round-trip test
|
||||
"round2.json", // unicode
|
||||
"round3.json", // bare string
|
||||
"round4.json", // bare number
|
||||
"round5.json", // bare true
|
||||
"round6.json", // bare false
|
||||
"round7.json", // bare null
|
||||
};
|
||||
#define TEST_FILE(name) {#name, json_tests::name}
|
||||
inline constexpr std::array tests{std::to_array<std::tuple<std::string_view, std::string_view>>({
|
||||
TEST_FILE(fail1),
|
||||
TEST_FILE(fail10),
|
||||
TEST_FILE(fail11),
|
||||
TEST_FILE(fail12),
|
||||
TEST_FILE(fail13),
|
||||
TEST_FILE(fail14),
|
||||
TEST_FILE(fail15),
|
||||
TEST_FILE(fail16),
|
||||
TEST_FILE(fail17),
|
||||
TEST_FILE(fail18),
|
||||
TEST_FILE(fail19),
|
||||
TEST_FILE(fail2),
|
||||
TEST_FILE(fail20),
|
||||
TEST_FILE(fail21),
|
||||
TEST_FILE(fail22),
|
||||
TEST_FILE(fail23),
|
||||
TEST_FILE(fail24),
|
||||
TEST_FILE(fail25),
|
||||
TEST_FILE(fail26),
|
||||
TEST_FILE(fail27),
|
||||
TEST_FILE(fail28),
|
||||
TEST_FILE(fail29),
|
||||
TEST_FILE(fail3),
|
||||
TEST_FILE(fail30),
|
||||
TEST_FILE(fail31),
|
||||
TEST_FILE(fail32),
|
||||
TEST_FILE(fail33),
|
||||
TEST_FILE(fail34),
|
||||
TEST_FILE(fail35),
|
||||
TEST_FILE(fail36),
|
||||
TEST_FILE(fail37),
|
||||
TEST_FILE(fail38), // invalid unicode: only first half of surrogate pair
|
||||
TEST_FILE(fail39), // invalid unicode: only second half of surrogate pair
|
||||
TEST_FILE(fail4), // extra comma
|
||||
TEST_FILE(fail40), // invalid unicode: broken UTF-8
|
||||
TEST_FILE(fail41), // invalid unicode: unfinished UTF-8
|
||||
TEST_FILE(fail42), // valid json with garbage following a nul byte
|
||||
TEST_FILE(fail44), // unterminated string
|
||||
TEST_FILE(fail45), // nested beyond max depth
|
||||
TEST_FILE(fail5),
|
||||
TEST_FILE(fail6),
|
||||
TEST_FILE(fail7),
|
||||
TEST_FILE(fail8),
|
||||
TEST_FILE(fail9), // extra comma
|
||||
TEST_FILE(pass1),
|
||||
TEST_FILE(pass2),
|
||||
TEST_FILE(pass3),
|
||||
TEST_FILE(pass4),
|
||||
TEST_FILE(round1), // round-trip test
|
||||
TEST_FILE(round2), // unicode
|
||||
TEST_FILE(round3), // bare string
|
||||
TEST_FILE(round4), // bare number
|
||||
TEST_FILE(round5), // bare true
|
||||
TEST_FILE(round6), // bare false
|
||||
TEST_FILE(round7), // bare null
|
||||
})};
|
||||
|
||||
// Test \u handling
|
||||
void unescape_unicode_test()
|
||||
|
@ -156,10 +185,10 @@ void no_nul_test()
|
|||
assert(val.read({buf + 3, 7}));
|
||||
}
|
||||
|
||||
int main (int argc, char *argv[])
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
for (const auto& f: filenames) {
|
||||
runtest_file(f);
|
||||
for (const auto& [file, json] : tests) {
|
||||
runtest(std::string{file}, std::string{json});
|
||||
}
|
||||
|
||||
unescape_unicode_test();
|
||||
|
@ -167,4 +196,3 @@ int main (int argc, char *argv[])
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -8,13 +8,12 @@
|
|||
#include <crypto/common.h>
|
||||
#include <crypto/siphash.h>
|
||||
#include <primitives/transaction.h>
|
||||
#include <span.h>
|
||||
#include <uint256.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
|
||||
template <typename C> class Span;
|
||||
|
||||
class SaltedTxidHasher
|
||||
{
|
||||
private:
|
||||
|
|
|
@ -88,6 +88,8 @@ using node::CBlockIndexHeightOnlyComparator;
|
|||
using node::CBlockIndexWorkComparator;
|
||||
using node::SnapshotMetadata;
|
||||
|
||||
/** Size threshold for warning about slow UTXO set flush to disk. */
|
||||
static constexpr size_t WARN_FLUSH_COINS_SIZE = 1 << 30; // 1 GiB
|
||||
/** Time to wait between writing blocks/block index to disk. */
|
||||
static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
|
||||
/** Time to wait between flushing chainstate to disk. */
|
||||
|
@ -2929,8 +2931,9 @@ bool Chainstate::FlushStateToDisk(
|
|||
}
|
||||
// Flush best chain related state. This can only be done if the blocks / block index write was also done.
|
||||
if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
|
||||
LOG_TIME_MILLIS_WITH_CATEGORY(strprintf("write coins cache to disk (%d coins, %.2fkB)",
|
||||
coins_count, coins_mem_usage / 1000), BCLog::BENCH);
|
||||
if (coins_mem_usage >= WARN_FLUSH_COINS_SIZE) LogWarning("Flushing large (%d GiB) UTXO set to disk, it may take several minutes", coins_mem_usage >> 30);
|
||||
LOG_TIME_MILLIS_WITH_CATEGORY(strprintf("write coins cache to disk (%d coins, %.2fKiB)",
|
||||
coins_count, coins_mem_usage >> 10), BCLog::BENCH);
|
||||
|
||||
// Typical Coin structures on disk are around 48 bytes in size.
|
||||
// Pushing a new one to the database can cause it to be written
|
||||
|
@ -2983,9 +2986,9 @@ void Chainstate::PruneAndFlush()
|
|||
}
|
||||
|
||||
static void UpdateTipLog(
|
||||
const ChainstateManager& chainman,
|
||||
const CCoinsViewCache& coins_tip,
|
||||
const CBlockIndex* tip,
|
||||
const CChainParams& params,
|
||||
const std::string& func_name,
|
||||
const std::string& prefix,
|
||||
const std::string& warning_messages) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
|
||||
|
@ -2997,7 +3000,7 @@ static void UpdateTipLog(
|
|||
tip->GetBlockHash().ToString(), tip->nHeight, tip->nVersion,
|
||||
log(tip->nChainWork.getdouble()) / log(2.0), tip->m_chain_tx_count,
|
||||
FormatISO8601DateTime(tip->GetBlockTime()),
|
||||
GuessVerificationProgress(params.TxData(), tip),
|
||||
chainman.GuessVerificationProgress(tip),
|
||||
coins_tip.DynamicMemoryUsage() * (1.0 / (1 << 20)),
|
||||
coins_tip.GetCacheSize(),
|
||||
!warning_messages.empty() ? strprintf(" warning='%s'", warning_messages) : "");
|
||||
|
@ -3008,15 +3011,13 @@ void Chainstate::UpdateTip(const CBlockIndex* pindexNew)
|
|||
AssertLockHeld(::cs_main);
|
||||
const auto& coins_tip = this->CoinsTip();
|
||||
|
||||
const CChainParams& params{m_chainman.GetParams()};
|
||||
|
||||
// The remainder of the function isn't relevant if we are not acting on
|
||||
// the active chainstate, so return if need be.
|
||||
if (this != &m_chainman.ActiveChainstate()) {
|
||||
// Only log every so often so that we don't bury log messages at the tip.
|
||||
constexpr int BACKGROUND_LOG_INTERVAL = 2000;
|
||||
if (pindexNew->nHeight % BACKGROUND_LOG_INTERVAL == 0) {
|
||||
UpdateTipLog(coins_tip, pindexNew, params, __func__, "[background validation] ", "");
|
||||
UpdateTipLog(m_chainman, coins_tip, pindexNew, __func__, "[background validation] ", "");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -3031,7 +3032,7 @@ void Chainstate::UpdateTip(const CBlockIndex* pindexNew)
|
|||
const CBlockIndex* pindex = pindexNew;
|
||||
for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
|
||||
WarningBitsConditionChecker checker(m_chainman, bit);
|
||||
ThresholdState state = checker.GetStateFor(pindex, params.GetConsensus(), m_chainman.m_warningcache.at(bit));
|
||||
ThresholdState state = checker.GetStateFor(pindex, m_chainman.GetConsensus(), m_chainman.m_warningcache.at(bit));
|
||||
if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
|
||||
const bilingual_str warning = strprintf(_("Unknown new rules activated (versionbit %i)"), bit);
|
||||
if (state == ThresholdState::ACTIVE) {
|
||||
|
@ -3042,7 +3043,7 @@ void Chainstate::UpdateTip(const CBlockIndex* pindexNew)
|
|||
}
|
||||
}
|
||||
}
|
||||
UpdateTipLog(coins_tip, pindexNew, params, __func__, "",
|
||||
UpdateTipLog(m_chainman, coins_tip, pindexNew, __func__, "",
|
||||
util::Join(warning_messages, Untranslated(", ")).original);
|
||||
}
|
||||
|
||||
|
@ -3528,6 +3529,10 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr<
|
|||
|
||||
bool fInvalidFound = false;
|
||||
std::shared_ptr<const CBlock> nullBlockPtr;
|
||||
// BlockConnected signals must be sent for the original role;
|
||||
// in case snapshot validation is completed during ActivateBestChainStep, the
|
||||
// result of GetRole() changes from BACKGROUND to NORMAL.
|
||||
const ChainstateRole chainstate_role{this->GetRole()};
|
||||
if (!ActivateBestChainStep(state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) {
|
||||
// A system error occurred
|
||||
return false;
|
||||
|
@ -3543,7 +3548,7 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr<
|
|||
for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
|
||||
assert(trace.pblock && trace.pindex);
|
||||
if (m_chainman.m_options.signals) {
|
||||
m_chainman.m_options.signals->BlockConnected(this->GetRole(), trace.pblock, trace.pindex);
|
||||
m_chainman.m_options.signals->BlockConnected(chainstate_role, trace.pblock, trace.pindex);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4720,7 +4725,7 @@ bool Chainstate::LoadChainTip()
|
|||
tip->GetBlockHash().ToString(),
|
||||
m_chain.Height(),
|
||||
FormatISO8601DateTime(tip->GetBlockTime()),
|
||||
GuessVerificationProgress(m_chainman.GetParams().TxData(), tip));
|
||||
m_chainman.GuessVerificationProgress(tip));
|
||||
|
||||
// Ensure KernelNotifications m_tip_block is set even if no new block arrives.
|
||||
if (this->GetRole() != ChainstateRole::BACKGROUND) {
|
||||
|
@ -5611,9 +5616,12 @@ bool Chainstate::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
|
|||
|
||||
//! Guess how far we are in the verification process at the given block index
|
||||
//! require cs_main if pindex has not been validated yet (because m_chain_tx_count might be unset)
|
||||
double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pindex) {
|
||||
if (pindex == nullptr)
|
||||
double ChainstateManager::GuessVerificationProgress(const CBlockIndex* pindex) const
|
||||
{
|
||||
const ChainTxData& data{GetParams().TxData()};
|
||||
if (pindex == nullptr) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
if (!Assume(pindex->m_chain_tx_count > 0)) {
|
||||
LogWarning("Internal bug detected: block %d has unset m_chain_tx_count (%s %s). Please report this issue here: %s\n",
|
||||
|
|
|
@ -95,9 +95,6 @@ CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams);
|
|||
|
||||
bool FatalError(kernel::Notifications& notifications, BlockValidationState& state, const bilingual_str& message);
|
||||
|
||||
/** Guess verification progress (as a fraction between 0.0=genesis and 1.0=current tip). */
|
||||
double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex* pindex);
|
||||
|
||||
/** Prune block files up to a given height */
|
||||
void PruneBlockFilesManual(Chainstate& active_chainstate, int nManualPruneHeight);
|
||||
|
||||
|
@ -1151,6 +1148,9 @@ public:
|
|||
/** Check whether we are doing an initial block download (synchronizing from disk or network) */
|
||||
bool IsInitialBlockDownload() const;
|
||||
|
||||
/** Guess verification progress (as a fraction between 0.0=genesis and 1.0=current tip). */
|
||||
double GuessVerificationProgress(const CBlockIndex* pindex) const;
|
||||
|
||||
/**
|
||||
* Import blocks from an external file
|
||||
*
|
||||
|
|
|
@ -478,7 +478,6 @@ util::Result<SelectionResult> CoinGrinder(std::vector<OutputGroup>& utxo_pool, c
|
|||
// Neither adding to the current selection nor exploring the omission branch of the last selected UTXO can
|
||||
// find any solutions. Redirect to exploring the Omission branch of the penultimate selected UTXO (i.e.
|
||||
// set `next_utxo` to one after the penultimate selected, then deselect the last two selected UTXOs)
|
||||
should_cut = false;
|
||||
deselect_last();
|
||||
should_shift = true;
|
||||
}
|
||||
|
|
|
@ -34,9 +34,9 @@ inline std::ostream& operator<<(std::ostream& os, const std::pair<const Serializ
|
|||
|
||||
namespace wallet {
|
||||
|
||||
static Span<const std::byte> StringBytes(std::string_view str)
|
||||
inline std::span<const std::byte> StringBytes(std::string_view str)
|
||||
{
|
||||
return AsBytes<const char>({str.data(), str.size()});
|
||||
return std::as_bytes(std::span{str});
|
||||
}
|
||||
|
||||
static SerializeData StringData(std::string_view str)
|
||||
|
|
|
@ -148,6 +148,10 @@ class BIP65Test(BitcoinTestFramework):
|
|||
# create and test one invalid tx per CLTV failure reason (5 in total)
|
||||
for i in range(5):
|
||||
spendtx = wallet.create_self_transfer()['tx']
|
||||
assert_equal(len(spendtx.vin), 1)
|
||||
coin = spendtx.vin[0]
|
||||
coin_txid = format(coin.prevout.hash, '064x')
|
||||
coin_vout = coin.prevout.n
|
||||
cltv_invalidate(spendtx, i)
|
||||
|
||||
expected_cltv_reject_reason = [
|
||||
|
@ -159,12 +163,15 @@ class BIP65Test(BitcoinTestFramework):
|
|||
][i]
|
||||
# First we show that this tx is valid except for CLTV by getting it
|
||||
# rejected from the mempool for exactly that reason.
|
||||
spendtx_txid = spendtx.hash
|
||||
spendtx_wtxid = spendtx.getwtxid()
|
||||
assert_equal(
|
||||
[{
|
||||
'txid': spendtx.hash,
|
||||
'wtxid': spendtx.getwtxid(),
|
||||
'txid': spendtx_txid,
|
||||
'wtxid': spendtx_wtxid,
|
||||
'allowed': False,
|
||||
'reject-reason': expected_cltv_reject_reason,
|
||||
'reject-details': expected_cltv_reject_reason + f", input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:{coin_vout}"
|
||||
}],
|
||||
self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0),
|
||||
)
|
||||
|
|
|
@ -109,18 +109,23 @@ class BIP66Test(BitcoinTestFramework):
|
|||
self.log.info("Test that transactions with non-DER signatures cannot appear in a block")
|
||||
block.nVersion = 4
|
||||
|
||||
spendtx = self.create_tx(self.coinbase_txids[1])
|
||||
coin_txid = self.coinbase_txids[1]
|
||||
spendtx = self.create_tx(coin_txid)
|
||||
unDERify(spendtx)
|
||||
spendtx.rehash()
|
||||
|
||||
# First we show that this tx is valid except for DERSIG by getting it
|
||||
# rejected from the mempool for exactly that reason.
|
||||
spendtx_txid = spendtx.hash
|
||||
spendtx_wtxid = spendtx.getwtxid()
|
||||
assert_equal(
|
||||
[{
|
||||
'txid': spendtx.hash,
|
||||
'wtxid': spendtx.getwtxid(),
|
||||
'txid': spendtx_txid,
|
||||
'wtxid': spendtx_wtxid,
|
||||
'allowed': False,
|
||||
'reject-reason': 'mandatory-script-verify-flag-failed (Non-canonical DER signature)',
|
||||
'reject-details': 'mandatory-script-verify-flag-failed (Non-canonical DER signature), ' +
|
||||
f"input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:0"
|
||||
}],
|
||||
self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0),
|
||||
)
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2020-2022 The Bitcoin Core developers
|
||||
# Copyright (c) 2020-present The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Test indices in conjunction with prune."""
|
||||
import concurrent.futures
|
||||
import os
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
|
@ -19,9 +20,25 @@ class FeatureIndexPruneTest(BitcoinTestFramework):
|
|||
["-fastprune", "-prune=1", "-blockfilterindex=1"],
|
||||
["-fastprune", "-prune=1", "-coinstatsindex=1"],
|
||||
["-fastprune", "-prune=1", "-blockfilterindex=1", "-coinstatsindex=1"],
|
||||
[]
|
||||
[],
|
||||
]
|
||||
|
||||
def setup_network(self):
|
||||
self.setup_nodes() # No P2P connection, so that linear_sync works
|
||||
|
||||
def linear_sync(self, node_from, *, height_from=None):
|
||||
# Linear sync over RPC, because P2P sync may not be linear
|
||||
to_height = node_from.getblockcount()
|
||||
if height_from is None:
|
||||
height_from = min([n.getblockcount() for n in self.nodes]) + 1
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=self.num_nodes) as rpc_threads:
|
||||
for i in range(height_from, to_height + 1):
|
||||
b = node_from.getblock(blockhash=node_from.getblockhash(i), verbosity=0)
|
||||
list(rpc_threads.map(lambda n: n.submitblock(b), self.nodes))
|
||||
|
||||
def generate(self, node, num_blocks, sync_fun=None):
|
||||
return super().generate(node, num_blocks, sync_fun=sync_fun or (lambda: self.linear_sync(node)))
|
||||
|
||||
def sync_index(self, height):
|
||||
expected_filter = {
|
||||
'basic block filter index': {'synced': True, 'best_block_height': height},
|
||||
|
@ -36,22 +53,9 @@ class FeatureIndexPruneTest(BitcoinTestFramework):
|
|||
expected = {**expected_filter, **expected_stats}
|
||||
self.wait_until(lambda: self.nodes[2].getindexinfo() == expected)
|
||||
|
||||
def reconnect_nodes(self):
|
||||
self.connect_nodes(0,1)
|
||||
self.connect_nodes(0,2)
|
||||
self.connect_nodes(0,3)
|
||||
|
||||
def mine_batches(self, blocks):
|
||||
n = blocks // 250
|
||||
for _ in range(n):
|
||||
self.generate(self.nodes[0], 250)
|
||||
self.generate(self.nodes[0], blocks % 250)
|
||||
self.sync_blocks()
|
||||
|
||||
def restart_without_indices(self):
|
||||
for i in range(3):
|
||||
self.restart_node(i, extra_args=["-fastprune", "-prune=1"])
|
||||
self.reconnect_nodes()
|
||||
|
||||
def run_test(self):
|
||||
filter_nodes = [self.nodes[0], self.nodes[2]]
|
||||
|
@ -65,7 +69,7 @@ class FeatureIndexPruneTest(BitcoinTestFramework):
|
|||
for node in stats_nodes:
|
||||
assert node.gettxoutsetinfo(hash_type="muhash", hash_or_height=tip)['muhash']
|
||||
|
||||
self.mine_batches(500)
|
||||
self.generate(self.nodes[0], 500)
|
||||
self.sync_index(height=700)
|
||||
|
||||
self.log.info("prune some blocks")
|
||||
|
@ -104,7 +108,7 @@ class FeatureIndexPruneTest(BitcoinTestFramework):
|
|||
msg = "Querying specific block heights requires coinstatsindex"
|
||||
assert_raises_rpc_error(-8, msg, node.gettxoutsetinfo, "muhash", height_hash)
|
||||
|
||||
self.mine_batches(749)
|
||||
self.generate(self.nodes[0], 749)
|
||||
|
||||
self.log.info("prune exactly up to the indices best blocks while the indices are disabled")
|
||||
for i in range(3):
|
||||
|
@ -118,7 +122,7 @@ class FeatureIndexPruneTest(BitcoinTestFramework):
|
|||
|
||||
self.log.info("prune further than the indices best blocks while the indices are disabled")
|
||||
self.restart_without_indices()
|
||||
self.mine_batches(1000)
|
||||
self.generate(self.nodes[0], 1000)
|
||||
|
||||
for i in range(3):
|
||||
pruneheight_3 = self.nodes[i].pruneblockchain(2000)
|
||||
|
@ -134,12 +138,10 @@ class FeatureIndexPruneTest(BitcoinTestFramework):
|
|||
|
||||
self.log.info("make sure the nodes start again with the indices and an additional -reindex arg")
|
||||
for i in range(3):
|
||||
restart_args = self.extra_args[i]+["-reindex"]
|
||||
restart_args = self.extra_args[i] + ["-reindex"]
|
||||
self.restart_node(i, extra_args=restart_args)
|
||||
# The nodes need to be reconnected to the non-pruning node upon restart, otherwise they will be stuck
|
||||
self.connect_nodes(i, 3)
|
||||
|
||||
self.sync_blocks(timeout=300)
|
||||
self.linear_sync(self.nodes[3])
|
||||
self.sync_index(height=2500)
|
||||
|
||||
for node in self.nodes[:2]:
|
||||
|
@ -150,8 +152,7 @@ class FeatureIndexPruneTest(BitcoinTestFramework):
|
|||
self.log.info("ensure that prune locks don't prevent indices from failing in a reorg scenario")
|
||||
with self.nodes[0].assert_debug_log(['basic block filter index prune lock moved back to 2480']):
|
||||
self.nodes[3].invalidateblock(self.nodes[0].getblockhash(2480))
|
||||
self.generate(self.nodes[3], 30)
|
||||
self.sync_blocks()
|
||||
self.generate(self.nodes[3], 30, sync_fun=lambda: self.linear_sync(self.nodes[3], height_from=2480))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -122,7 +122,7 @@ class MaxUploadTest(BitcoinTestFramework):
|
|||
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
|
||||
# At most a couple more tries should succeed (depending on how long
|
||||
# the test has been running so far).
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=["historical block serving limit reached, disconnect peer"]):
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=["historical block serving limit reached, disconnecting peer=0"]):
|
||||
for _ in range(3):
|
||||
p2p_conns[0].send_message(getdata_request)
|
||||
p2p_conns[0].wait_for_disconnect()
|
||||
|
@ -147,7 +147,7 @@ class MaxUploadTest(BitcoinTestFramework):
|
|||
|
||||
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
|
||||
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=["historical block serving limit reached, disconnect peer"]):
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=["historical block serving limit reached, disconnecting peer=1"]):
|
||||
p2p_conns[1].send_message(getdata_request)
|
||||
p2p_conns[1].wait_for_disconnect()
|
||||
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
|
||||
|
@ -197,7 +197,7 @@ class MaxUploadTest(BitcoinTestFramework):
|
|||
assert_equal(peer_info[0]['permissions'], ['download'])
|
||||
|
||||
self.log.info("Peer gets disconnected for a mempool request after limit is reached")
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=["mempool request with bandwidth limit reached, disconnect peer"]):
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=["mempool request with bandwidth limit reached, disconnecting peer=0"]):
|
||||
peer.send_message(msg_mempool())
|
||||
peer.wait_for_disconnect()
|
||||
|
||||
|
|
|
@ -103,14 +103,22 @@ class ReplaceByFeeTest(BitcoinTestFramework):
|
|||
"""Simple doublespend"""
|
||||
# we use MiniWallet to create a transaction template with inputs correctly set,
|
||||
# and modify the output (amount, scriptPubKey) according to our needs
|
||||
tx = self.wallet.create_self_transfer()["tx"]
|
||||
tx = self.wallet.create_self_transfer(fee_rate=Decimal("0.003"))["tx"]
|
||||
tx1a_txid = self.nodes[0].sendrawtransaction(tx.serialize().hex())
|
||||
|
||||
# Should fail because we haven't changed the fee
|
||||
tx.vout[0].scriptPubKey[-1] ^= 1
|
||||
tx.rehash()
|
||||
tx_hex = tx.serialize().hex()
|
||||
|
||||
# This will raise an exception due to insufficient fee
|
||||
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx.serialize().hex(), 0)
|
||||
reject_reason = "insufficient fee"
|
||||
reject_details = f"{reject_reason}, rejecting replacement {tx.hash}; new feerate 0.00300000 BTC/kvB <= old feerate 0.00300000 BTC/kvB"
|
||||
res = self.nodes[0].testmempoolaccept(rawtxs=[tx_hex])[0]
|
||||
assert_equal(res["reject-reason"], reject_reason)
|
||||
assert_equal(res["reject-details"], reject_details)
|
||||
assert_raises_rpc_error(-26, f"{reject_details}", self.nodes[0].sendrawtransaction, tx_hex, 0)
|
||||
|
||||
|
||||
# Extra 0.1 BTC fee
|
||||
tx.vout[0].nValue -= int(0.1 * COIN)
|
||||
|
@ -154,7 +162,14 @@ class ReplaceByFeeTest(BitcoinTestFramework):
|
|||
dbl_tx_hex = dbl_tx.serialize().hex()
|
||||
|
||||
# This will raise an exception due to insufficient fee
|
||||
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
|
||||
reject_reason = "insufficient fee"
|
||||
reject_details = f"{reject_reason}, rejecting replacement {dbl_tx.hash}, less fees than conflicting txs; 3.00 < 4.00"
|
||||
res = self.nodes[0].testmempoolaccept(rawtxs=[dbl_tx_hex])[0]
|
||||
assert_equal(res["reject-reason"], reject_reason)
|
||||
assert_equal(res["reject-details"], reject_details)
|
||||
assert_raises_rpc_error(-26, f"{reject_details}", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
|
||||
|
||||
|
||||
|
||||
# Accepted with sufficient fee
|
||||
dbl_tx.vout[0].nValue = int(0.1 * COIN)
|
||||
|
@ -273,22 +288,30 @@ class ReplaceByFeeTest(BitcoinTestFramework):
|
|||
utxo1 = self.make_utxo(self.nodes[0], int(1.2 * COIN))
|
||||
utxo2 = self.make_utxo(self.nodes[0], 3 * COIN)
|
||||
|
||||
tx1a_utxo = self.wallet.send_self_transfer(
|
||||
tx1a = self.wallet.send_self_transfer(
|
||||
from_node=self.nodes[0],
|
||||
utxo_to_spend=utxo1,
|
||||
sequence=0,
|
||||
fee=Decimal("0.1"),
|
||||
)["new_utxo"]
|
||||
)
|
||||
tx1a_utxo = tx1a["new_utxo"]
|
||||
|
||||
# Direct spend an output of the transaction we're replacing.
|
||||
tx2_hex = self.wallet.create_self_transfer_multi(
|
||||
tx2 = self.wallet.create_self_transfer_multi(
|
||||
utxos_to_spend=[utxo1, utxo2, tx1a_utxo],
|
||||
sequence=0,
|
||||
amount_per_output=int(COIN * tx1a_utxo["value"]),
|
||||
)["hex"]
|
||||
)["tx"]
|
||||
tx2_hex = tx2.serialize().hex()
|
||||
|
||||
# This will raise an exception
|
||||
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0)
|
||||
reject_reason = "bad-txns-spends-conflicting-tx"
|
||||
reject_details = f"{reject_reason}, {tx2.hash} spends conflicting transaction {tx1a['tx'].hash}"
|
||||
res = self.nodes[0].testmempoolaccept(rawtxs=[tx2_hex])[0]
|
||||
assert_equal(res["reject-reason"], reject_reason)
|
||||
assert_equal(res["reject-details"], reject_details)
|
||||
assert_raises_rpc_error(-26, f"{reject_details}", self.nodes[0].sendrawtransaction, tx2_hex, 0)
|
||||
|
||||
|
||||
# Spend tx1a's output to test the indirect case.
|
||||
tx1b_utxo = self.wallet.send_self_transfer(
|
||||
|
@ -319,14 +342,21 @@ class ReplaceByFeeTest(BitcoinTestFramework):
|
|||
fee=Decimal("0.1"),
|
||||
)
|
||||
|
||||
tx2_hex = self.wallet.create_self_transfer_multi(
|
||||
tx2 = self.wallet.create_self_transfer_multi(
|
||||
utxos_to_spend=[confirmed_utxo, unconfirmed_utxo],
|
||||
sequence=0,
|
||||
amount_per_output=1 * COIN,
|
||||
)["hex"]
|
||||
)["tx"]
|
||||
tx2_hex = tx2.serialize().hex()
|
||||
|
||||
# This will raise an exception
|
||||
assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, 0)
|
||||
reject_reason = "replacement-adds-unconfirmed"
|
||||
reject_details = f"{reject_reason}, replacement {tx2.hash} adds unconfirmed input, idx 1"
|
||||
res = self.nodes[0].testmempoolaccept(rawtxs=[tx2_hex])[0]
|
||||
assert_equal(res["reject-reason"], reject_reason)
|
||||
assert_equal(res["reject-details"], reject_details)
|
||||
assert_raises_rpc_error(-26, f"{reject_details}", self.nodes[0].sendrawtransaction, tx2_hex, 0)
|
||||
|
||||
|
||||
def test_too_many_replacements(self):
|
||||
"""Replacements that evict too many transactions are rejected"""
|
||||
|
@ -368,7 +398,13 @@ class ReplaceByFeeTest(BitcoinTestFramework):
|
|||
double_tx_hex = double_tx.serialize().hex()
|
||||
|
||||
# This will raise an exception
|
||||
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, 0)
|
||||
reject_reason = "too many potential replacements"
|
||||
reject_details = f"{reject_reason}, rejecting replacement {double_tx.hash}; too many potential replacements ({MAX_REPLACEMENT_LIMIT + 1} > {MAX_REPLACEMENT_LIMIT})"
|
||||
res = self.nodes[0].testmempoolaccept(rawtxs=[double_tx_hex])[0]
|
||||
assert_equal(res["reject-reason"], reject_reason)
|
||||
assert_equal(res["reject-details"], reject_details)
|
||||
assert_raises_rpc_error(-26, f"{reject_details}", self.nodes[0].sendrawtransaction, double_tx_hex, 0)
|
||||
|
||||
|
||||
# If we remove an input, it should pass
|
||||
double_tx.vin.pop()
|
||||
|
|
|
@ -9,6 +9,8 @@ from decimal import Decimal
|
|||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import assert_equal
|
||||
|
||||
SIGNET_DEFAULT_CHALLENGE = '512103ad5e0edad18cb1f0fc0d28a3d4f1f3e445640337489abb10404f2d1e086be430210359ef5021964fe22d6f8e05b2463c9540ce96883fe3b278760f048f5189f2e6c452ae'
|
||||
|
||||
signet_blocks = [
|
||||
'00000020f61eee3b63a380a477a063af32b2bbc97c9ff9f01f2c4225e973988108000000f575c83235984e7dc4afc1f30944c170462e84437ab6f2d52e16878a79e4678bd1914d5fae77031eccf4070001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025151feffffff0200f2052a010000001600149243f727dd5343293eb83174324019ec16c2630f0000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa2490047304402205e423a8754336ca99dbe16509b877ef1bf98d008836c725005b3c787c41ebe46022047246e4467ad7cc7f1ad98662afcaf14c115e0095a227c7b05c5182591c23e7e01000120000000000000000000000000000000000000000000000000000000000000000000000000',
|
||||
'00000020533b53ded9bff4adc94101d32400a144c54edc5ed492a3b26c63b2d686000000b38fef50592017cfafbcab88eb3d9cf50b2c801711cad8299495d26df5e54812e7914d5fae77031ecfdd0b0001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025251feffffff0200f2052a01000000160014fd09839740f0e0b4fc6d5e2527e4022aa9b89dfa0000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa24900473044022031d64a1692cdad1fc0ced69838169fe19ae01be524d831b95fcf5ea4e6541c3c02204f9dea0801df8b4d0cd0857c62ab35c6c25cc47c930630dc7fe723531daa3e9b01000120000000000000000000000000000000000000000000000000000000000000000000000000',
|
||||
|
@ -22,21 +24,31 @@ signet_blocks = [
|
|||
'00000020a868e8514be5e46dabd6a122132f423f36a43b716a40c394e2a8d063e1010000f4c6c717e99d800c699c25a2006a75a0c5c09f432a936f385e6fce139cdbd1a5e9964d5fae77031e7d026e0001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025a51feffffff0200f2052a01000000160014aaa671c82b138e3b8f510cd801e5f2bd0aa305940000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa24900473044022042309f4c3c7a1a2ac8c24f890f962df1c0086cec10be0868087cfc427520cb2702201dafee8911c269b7e786e242045bb57cef3f5b0f177010c6159abae42f646cc501000120000000000000000000000000000000000000000000000000000000000000000000000000',
|
||||
]
|
||||
|
||||
class SignetParams:
|
||||
def __init__(self, challenge=None):
|
||||
if challenge is None:
|
||||
self.challenge = SIGNET_DEFAULT_CHALLENGE
|
||||
self.shared_args = []
|
||||
else:
|
||||
self.challenge = challenge
|
||||
self.shared_args = [f"-signetchallenge={challenge}"]
|
||||
|
||||
class SignetBasicTest(BitcoinTestFramework):
|
||||
def set_test_params(self):
|
||||
self.chain = "signet"
|
||||
self.num_nodes = 6
|
||||
self.setup_clean_chain = True
|
||||
shared_args1 = ["-signetchallenge=51"] # OP_TRUE
|
||||
shared_args2 = [] # default challenge
|
||||
# we use the exact same challenge except we do it as a 2-of-2, which means it should fail
|
||||
shared_args3 = ["-signetchallenge=522103ad5e0edad18cb1f0fc0d28a3d4f1f3e445640337489abb10404f2d1e086be430210359ef5021964fe22d6f8e05b2463c9540ce96883fe3b278760f048f5189f2e6c452ae"]
|
||||
self.signets = [
|
||||
SignetParams(challenge='51'), # OP_TRUE
|
||||
SignetParams(), # default challenge
|
||||
# default challenge as a 2-of-2, which means it should fail
|
||||
SignetParams(challenge='522103ad5e0edad18cb1f0fc0d28a3d4f1f3e445640337489abb10404f2d1e086be430210359ef5021964fe22d6f8e05b2463c9540ce96883fe3b278760f048f5189f2e6c452ae')
|
||||
]
|
||||
|
||||
self.extra_args = [
|
||||
shared_args1, shared_args1,
|
||||
shared_args2, shared_args2,
|
||||
shared_args3, shared_args3,
|
||||
self.signets[0].shared_args, self.signets[0].shared_args,
|
||||
self.signets[1].shared_args, self.signets[1].shared_args,
|
||||
self.signets[2].shared_args, self.signets[2].shared_args,
|
||||
]
|
||||
|
||||
def setup_network(self):
|
||||
|
@ -50,14 +62,28 @@ class SignetBasicTest(BitcoinTestFramework):
|
|||
def run_test(self):
|
||||
self.log.info("basic tests using OP_TRUE challenge")
|
||||
|
||||
self.log.info('getblockchaininfo')
|
||||
def check_getblockchaininfo(node_idx, signet_idx):
|
||||
blockchain_info = self.nodes[node_idx].getblockchaininfo()
|
||||
assert_equal(blockchain_info['chain'], 'signet')
|
||||
assert_equal(blockchain_info['signet_challenge'], self.signets[signet_idx].challenge)
|
||||
check_getblockchaininfo(node_idx=1, signet_idx=0)
|
||||
check_getblockchaininfo(node_idx=2, signet_idx=1)
|
||||
check_getblockchaininfo(node_idx=5, signet_idx=2)
|
||||
|
||||
self.log.info('getmininginfo')
|
||||
mining_info = self.nodes[0].getmininginfo()
|
||||
assert_equal(mining_info['blocks'], 0)
|
||||
assert_equal(mining_info['chain'], 'signet')
|
||||
assert 'currentblocktx' not in mining_info
|
||||
assert 'currentblockweight' not in mining_info
|
||||
assert_equal(mining_info['networkhashps'], Decimal('0'))
|
||||
assert_equal(mining_info['pooledtx'], 0)
|
||||
def check_getmininginfo(node_idx, signet_idx):
|
||||
mining_info = self.nodes[node_idx].getmininginfo()
|
||||
assert_equal(mining_info['blocks'], 0)
|
||||
assert_equal(mining_info['chain'], 'signet')
|
||||
assert 'currentblocktx' not in mining_info
|
||||
assert 'currentblockweight' not in mining_info
|
||||
assert_equal(mining_info['networkhashps'], Decimal('0'))
|
||||
assert_equal(mining_info['pooledtx'], 0)
|
||||
assert_equal(mining_info['signet_challenge'], self.signets[signet_idx].challenge)
|
||||
check_getmininginfo(node_idx=0, signet_idx=0)
|
||||
check_getmininginfo(node_idx=3, signet_idx=1)
|
||||
check_getmininginfo(node_idx=4, signet_idx=2)
|
||||
|
||||
self.generate(self.nodes[0], 1, sync_fun=self.no_op)
|
||||
|
||||
|
|
|
@ -67,6 +67,8 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
|
|||
if "fees" in r:
|
||||
r["fees"].pop("effective-feerate")
|
||||
r["fees"].pop("effective-includes")
|
||||
if "reject-details" in r:
|
||||
r.pop("reject-details")
|
||||
assert_equal(result_expected, result_test)
|
||||
assert_equal(self.nodes[0].getmempoolinfo()['size'], self.mempool_size) # Must not change mempool state
|
||||
|
||||
|
|
|
@ -100,13 +100,15 @@ class MempoolWtxidTest(BitcoinTestFramework):
|
|||
"txid": child_one_txid,
|
||||
"wtxid": child_one_wtxid,
|
||||
"allowed": False,
|
||||
"reject-reason": "txn-already-in-mempool"
|
||||
"reject-reason": "txn-already-in-mempool",
|
||||
"reject-details": "txn-already-in-mempool"
|
||||
}])
|
||||
assert_equal(node.testmempoolaccept([child_two.serialize().hex()])[0], {
|
||||
"txid": child_two_txid,
|
||||
"wtxid": child_two_wtxid,
|
||||
"allowed": False,
|
||||
"reject-reason": "txn-same-nonwitness-data-in-mempool"
|
||||
"reject-reason": "txn-same-nonwitness-data-in-mempool",
|
||||
"reject-details": "txn-same-nonwitness-data-in-mempool"
|
||||
})
|
||||
|
||||
# sendrawtransaction will not throw but quits early when the exact same transaction is already in mempool
|
||||
|
|
|
@ -126,7 +126,7 @@ class EphemeralDustTest(BitcoinTestFramework):
|
|||
assert_equal(len(self.nodes[0].getrawmempool()), 2)
|
||||
assert_mempool_contents(self, self.nodes[0], expected=[dusty_tx["tx"], sweep_tx["tx"]])
|
||||
|
||||
# Node restart; doesn't allow allow ephemeral transaction back in due to individual submission
|
||||
# Node restart; doesn't allow ephemeral transaction back in due to individual submission
|
||||
# resulting in 0-fee. Supporting re-submission of CPFP packages on restart is desired but not
|
||||
# yet implemented.
|
||||
self.restart_node(0)
|
||||
|
@ -325,7 +325,7 @@ class EphemeralDustTest(BitcoinTestFramework):
|
|||
dusty_tx, _ = self.create_ephemeral_dust_package(tx_version=3)
|
||||
assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, dusty_tx["hex"])
|
||||
|
||||
block_res = self.nodes[0].rpc.generateblock(self.wallet.get_address(), [dusty_tx["hex"]])
|
||||
block_res = self.generateblock(self.nodes[0], self.wallet.get_address(), [dusty_tx["hex"]], sync_fun=self.no_op)
|
||||
self.nodes[0].invalidateblock(block_res["hash"])
|
||||
assert_mempool_contents(self, self.nodes[0], expected=[dusty_tx["tx"]], sync=False)
|
||||
|
||||
|
@ -335,7 +335,7 @@ class EphemeralDustTest(BitcoinTestFramework):
|
|||
assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, sweep_tx["hex"])
|
||||
|
||||
# Mine the sweep then re-org, the sweep will not make it back in due to spend checks
|
||||
block_res = self.nodes[0].rpc.generateblock(self.wallet.get_address(), [dusty_tx["hex"], sweep_tx["hex"]])
|
||||
block_res = self.generateblock(self.nodes[0], self.wallet.get_address(), [dusty_tx["hex"], sweep_tx["hex"]], sync_fun=self.no_op)
|
||||
self.nodes[0].invalidateblock(block_res["hash"])
|
||||
assert_mempool_contents(self, self.nodes[0], expected=[dusty_tx["tx"]], sync=False)
|
||||
|
||||
|
@ -344,7 +344,7 @@ class EphemeralDustTest(BitcoinTestFramework):
|
|||
self.add_output_to_create_multi_result(sweep_tx_2)
|
||||
assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, sweep_tx_2["hex"])
|
||||
|
||||
reconsider_block_res = self.nodes[0].rpc.generateblock(self.wallet.get_address(), [dusty_tx["hex"], sweep_tx_2["hex"]])
|
||||
reconsider_block_res = self.generateblock(self.nodes[0], self.wallet.get_address(), [dusty_tx["hex"], sweep_tx_2["hex"]], sync_fun=self.no_op)
|
||||
self.nodes[0].invalidateblock(reconsider_block_res["hash"])
|
||||
assert_mempool_contents(self, self.nodes[0], expected=[dusty_tx["tx"], sweep_tx_2["tx"]], sync=False)
|
||||
|
||||
|
@ -357,13 +357,13 @@ class EphemeralDustTest(BitcoinTestFramework):
|
|||
|
||||
self.log.info("Test that ephemeral dust tx with fees or multi dust don't enter mempool via reorg")
|
||||
multi_dusty_tx, _ = self.create_ephemeral_dust_package(tx_version=3, num_dust_outputs=2)
|
||||
block_res = self.nodes[0].rpc.generateblock(self.wallet.get_address(), [multi_dusty_tx["hex"]])
|
||||
block_res = self.generateblock(self.nodes[0], self.wallet.get_address(), [multi_dusty_tx["hex"]], sync_fun=self.no_op)
|
||||
self.nodes[0].invalidateblock(block_res["hash"])
|
||||
assert_equal(self.nodes[0].getrawmempool(), [])
|
||||
|
||||
# With fee and one dust
|
||||
dusty_fee_tx, _ = self.create_ephemeral_dust_package(tx_version=3, dust_tx_fee=1)
|
||||
block_res = self.nodes[0].rpc.generateblock(self.wallet.get_address(), [dusty_fee_tx["hex"]])
|
||||
block_res = self.generateblock(self.nodes[0], self.wallet.get_address(), [dusty_fee_tx["hex"]], sync_fun=self.no_op)
|
||||
self.nodes[0].invalidateblock(block_res["hash"])
|
||||
assert_equal(self.nodes[0].getrawmempool(), [])
|
||||
|
||||
|
|
|
@ -219,7 +219,7 @@ class PackageRBFTest(BitcoinTestFramework):
|
|||
package_child = self.wallet.create_self_transfer(fee_rate=child_feerate, utxo_to_spend=package_parent["new_utxos"][0])
|
||||
|
||||
pkg_results = node.submitpackage([package_parent["hex"], package_child["hex"]], maxfeerate=0)
|
||||
assert_equal(f"package RBF failed: too many potential replacements, rejecting replacement {package_child['tx'].rehash()}; too many potential replacements (102 > 100)\n", pkg_results["package_msg"])
|
||||
assert_equal(f"package RBF failed: too many potential replacements, rejecting replacement {package_child['tx'].rehash()}; too many potential replacements (102 > 100)", pkg_results["package_msg"])
|
||||
self.assert_mempool_contents(expected=expected_txns)
|
||||
|
||||
# Make singleton tx to conflict with in next batch
|
||||
|
@ -234,7 +234,7 @@ class PackageRBFTest(BitcoinTestFramework):
|
|||
package_parent = self.wallet.create_self_transfer_multi(utxos_to_spend=double_spending_coins, fee_per_output=parent_fee_per_conflict)
|
||||
package_child = self.wallet.create_self_transfer(fee_rate=child_feerate, utxo_to_spend=package_parent["new_utxos"][0])
|
||||
pkg_results = node.submitpackage([package_parent["hex"], package_child["hex"]], maxfeerate=0)
|
||||
assert_equal(f"package RBF failed: too many potential replacements, rejecting replacement {package_child['tx'].rehash()}; too many potential replacements (101 > 100)\n", pkg_results["package_msg"])
|
||||
assert_equal(f"package RBF failed: too many potential replacements, rejecting replacement {package_child['tx'].rehash()}; too many potential replacements (101 > 100)", pkg_results["package_msg"])
|
||||
self.assert_mempool_contents(expected=expected_txns)
|
||||
|
||||
# Finally, evict MAX_REPLACEMENT_CANDIDATES
|
||||
|
|
|
@ -78,7 +78,7 @@ class AddrTest(BitcoinTestFramework):
|
|||
def run_test(self):
|
||||
self.log.info('Check disconnection when sending sendaddrv2 after verack')
|
||||
conn = self.nodes[0].add_p2p_connection(P2PInterface())
|
||||
with self.nodes[0].assert_debug_log(['sendaddrv2 received after verack from peer=0; disconnecting']):
|
||||
with self.nodes[0].assert_debug_log(['sendaddrv2 received after verack, disconnecting peer=0']):
|
||||
conn.send_message(msg_sendaddrv2())
|
||||
conn.wait_for_disconnect()
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ class P2PLeakTest(BitcoinTestFramework):
|
|||
|
||||
self.log.info('Check that old peers are disconnected')
|
||||
p2p_old_peer = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False)
|
||||
with self.nodes[0].assert_debug_log(["using obsolete version 31799; disconnecting"]):
|
||||
with self.nodes[0].assert_debug_log(["using obsolete version 31799, disconnecting peer=5"]):
|
||||
p2p_old_peer.send_message(self.create_old_version(31799))
|
||||
p2p_old_peer.wait_for_disconnect()
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ class SendTxRcnclTest(BitcoinTestFramework):
|
|||
with self.nodes[0].assert_debug_log(["received: sendtxrcncl"]):
|
||||
peer.send_message(create_sendtxrcncl_msg())
|
||||
self.log.info('second SENDTXRCNCL triggers a disconnect')
|
||||
with self.nodes[0].assert_debug_log(["(sendtxrcncl received from already registered peer); disconnecting"]):
|
||||
with self.nodes[0].assert_debug_log(["(sendtxrcncl received from already registered peer), disconnecting peer=0"]):
|
||||
peer.send_message(create_sendtxrcncl_msg())
|
||||
peer.wait_for_disconnect()
|
||||
|
||||
|
@ -226,7 +226,7 @@ class SendTxRcnclTest(BitcoinTestFramework):
|
|||
self.log.info('SENDTXRCNCL if block-relay-only triggers a disconnect')
|
||||
peer = self.nodes[0].add_outbound_p2p_connection(
|
||||
PeerNoVerack(), wait_for_verack=False, p2p_idx=0, connection_type="block-relay-only")
|
||||
with self.nodes[0].assert_debug_log(["we indicated no tx relay; disconnecting"]):
|
||||
with self.nodes[0].assert_debug_log(["we indicated no tx relay, disconnecting peer=5"]):
|
||||
peer.send_message(create_sendtxrcncl_msg())
|
||||
peer.wait_for_disconnect()
|
||||
|
||||
|
|
|
@ -84,15 +84,15 @@ class TimeoutsTest(BitcoinTestFramework):
|
|||
|
||||
if self.options.v2transport:
|
||||
expected_timeout_logs = [
|
||||
"version handshake timeout peer=0",
|
||||
"version handshake timeout peer=1",
|
||||
"version handshake timeout peer=2",
|
||||
"version handshake timeout, disconnecting peer=0",
|
||||
"version handshake timeout, disconnecting peer=1",
|
||||
"version handshake timeout, disconnecting peer=2",
|
||||
]
|
||||
else:
|
||||
expected_timeout_logs = [
|
||||
"version handshake timeout peer=0",
|
||||
"socket no message in first 3 seconds, 1 0 peer=1",
|
||||
"socket no message in first 3 seconds, 0 0 peer=2",
|
||||
"version handshake timeout, disconnecting peer=0",
|
||||
"socket no message in first 3 seconds, never sent to peer, disconnecting peer=1",
|
||||
"socket no message in first 3 seconds, never received from peer, never sent to peer, disconnecting peer=2",
|
||||
]
|
||||
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=expected_timeout_logs):
|
||||
|
|
|
@ -151,7 +151,7 @@ class EncryptedP2PMisbehaving(BitcoinTestFramework):
|
|||
# Ensure that the bytes sent after 4 bytes network magic are actually received.
|
||||
self.wait_until(lambda: node0.getpeerinfo()[-1]["bytesrecv"] > 4)
|
||||
self.wait_until(lambda: node0.getpeerinfo()[-1]["bytessent"] > 0)
|
||||
with node0.assert_debug_log(['V2 handshake timeout peer=0']):
|
||||
with node0.assert_debug_log(['V2 handshake timeout, disconnecting peer=0']):
|
||||
node0.bumpmocktime(4) # `InactivityCheck()` triggers now
|
||||
peer1.wait_for_disconnect(timeout=1)
|
||||
self.log.info('successful disconnection since modified ellswift was sent as response')
|
||||
|
@ -162,7 +162,7 @@ class EncryptedP2PMisbehaving(BitcoinTestFramework):
|
|||
expected_debug_message = [
|
||||
[], # EARLY_KEY_RESPONSE
|
||||
["V2 transport error: missing garbage terminator, peer=1"], # EXCESS_GARBAGE
|
||||
["V2 handshake timeout peer=3"], # WRONG_GARBAGE_TERMINATOR
|
||||
["V2 handshake timeout, disconnecting peer=3"], # WRONG_GARBAGE_TERMINATOR
|
||||
["V2 transport error: packet decryption failure"], # WRONG_GARBAGE
|
||||
["V2 transport error: packet decryption failure"], # SEND_NO_AAD
|
||||
[], # SEND_NON_EMPTY_VERSION_PACKET
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2020-2022 The Bitcoin Core developers
|
||||
# Copyright (c) 2020-present The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Test generate* RPCs."""
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.wallet import MiniWallet
|
||||
from test_framework.util import (
|
||||
|
@ -83,11 +85,18 @@ class RPCGenerateTest(BitcoinTestFramework):
|
|||
txid = block['tx'][1]
|
||||
assert_equal(node.getrawtransaction(txid=txid, verbose=False, blockhash=hash), rawtx)
|
||||
|
||||
# Ensure that generateblock can be called concurrently by many threads.
|
||||
self.log.info('Generate blocks in parallel')
|
||||
generate_50_blocks = lambda n: [n.generateblock(output=address, transactions=[]) for _ in range(50)]
|
||||
rpcs = [node.cli for _ in range(6)]
|
||||
with ThreadPoolExecutor(max_workers=len(rpcs)) as threads:
|
||||
list(threads.map(generate_50_blocks, rpcs))
|
||||
|
||||
self.log.info('Fail to generate block with out of order txs')
|
||||
txid1 = miniwallet.send_self_transfer(from_node=node)['txid']
|
||||
utxo1 = miniwallet.get_utxo(txid=txid1)
|
||||
rawtx2 = miniwallet.create_self_transfer(utxo_to_spend=utxo1)['hex']
|
||||
assert_raises_rpc_error(-25, 'testBlockValidity failed: bad-txns-inputs-missingorspent', self.generateblock, node, address, [rawtx2, txid1])
|
||||
assert_raises_rpc_error(-25, 'TestBlockValidity failed: bad-txns-inputs-missingorspent', self.generateblock, node, address, [rawtx2, txid1])
|
||||
|
||||
self.log.info('Fail to generate block with txid not in mempool')
|
||||
missing_txid = '0000000000000000000000000000000000000000000000000000000000000000'
|
||||
|
|
|
@ -20,7 +20,7 @@ class GetBlocksActivityTest(BitcoinTestFramework):
|
|||
node = self.nodes[0]
|
||||
wallet = MiniWallet(node)
|
||||
node.setmocktime(node.getblockheader(node.getbestblockhash())['time'])
|
||||
wallet.generate(200, invalid_call=False)
|
||||
self.generate(wallet, 200)
|
||||
|
||||
self.test_no_activity(node)
|
||||
self.test_activity_in_block(node, wallet)
|
||||
|
@ -195,7 +195,7 @@ class GetBlocksActivityTest(BitcoinTestFramework):
|
|||
|
||||
def test_no_address(self, node, wallet):
|
||||
raw_wallet = MiniWallet(self.nodes[0], mode=MiniWalletMode.RAW_P2PK)
|
||||
raw_wallet.generate(100, invalid_call=False)
|
||||
self.generate(raw_wallet, 100)
|
||||
|
||||
no_addr_tx = raw_wallet.send_self_transfer(from_node=node)
|
||||
raw_desc = raw_wallet.get_descriptor()
|
||||
|
|
|
@ -110,17 +110,21 @@ class RPCPackagesTest(BitcoinTestFramework):
|
|||
self.assert_testres_equal(package_bad, testres_bad)
|
||||
|
||||
self.log.info("Check testmempoolaccept tells us when some transactions completed validation successfully")
|
||||
tx_bad_sig_hex = node.createrawtransaction([{"txid": coin["txid"], "vout": 0}],
|
||||
tx_bad_sig_hex = node.createrawtransaction([{"txid": coin["txid"], "vout": coin["vout"]}],
|
||||
{address : coin["amount"] - Decimal("0.0001")})
|
||||
tx_bad_sig = tx_from_hex(tx_bad_sig_hex)
|
||||
testres_bad_sig = node.testmempoolaccept(self.independent_txns_hex + [tx_bad_sig_hex])
|
||||
# By the time the signature for the last transaction is checked, all the other transactions
|
||||
# have been fully validated, which is why the node returns full validation results for all
|
||||
# transactions here but empty results in other cases.
|
||||
tx_bad_sig_txid = tx_bad_sig.rehash()
|
||||
tx_bad_sig_wtxid = tx_bad_sig.getwtxid()
|
||||
assert_equal(testres_bad_sig, self.independent_txns_testres + [{
|
||||
"txid": tx_bad_sig.rehash(),
|
||||
"wtxid": tx_bad_sig.getwtxid(), "allowed": False,
|
||||
"reject-reason": "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)"
|
||||
"txid": tx_bad_sig_txid,
|
||||
"wtxid": tx_bad_sig_wtxid, "allowed": False,
|
||||
"reject-reason": "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)",
|
||||
"reject-details": "mandatory-script-verify-flag-failed (Operation not valid with the current stack size), " +
|
||||
f"input 0 of {tx_bad_sig_txid} (wtxid {tx_bad_sig_wtxid}), spending {coin['txid']}:{coin['vout']}"
|
||||
}])
|
||||
|
||||
self.log.info("Check testmempoolaccept reports txns in packages that exceed max feerate")
|
||||
|
@ -304,7 +308,8 @@ class RPCPackagesTest(BitcoinTestFramework):
|
|||
assert testres_rbf_single[0]["allowed"]
|
||||
testres_rbf_package = self.independent_txns_testres_blank + [{
|
||||
"txid": replacement_tx["txid"], "wtxid": replacement_tx["wtxid"], "allowed": False,
|
||||
"reject-reason": "bip125-replacement-disallowed"
|
||||
"reject-reason": "bip125-replacement-disallowed",
|
||||
"reject-details": "bip125-replacement-disallowed"
|
||||
}]
|
||||
self.assert_testres_equal(self.independent_txns_hex + [replacement_tx["hex"]], testres_rbf_package)
|
||||
|
||||
|
|
|
@ -188,7 +188,12 @@ class AuthServiceProxy():
|
|||
{'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)},
|
||||
http_response.status)
|
||||
|
||||
responsedata = http_response.read().decode('utf8')
|
||||
data = http_response.read()
|
||||
try:
|
||||
responsedata = data.decode('utf8')
|
||||
except UnicodeDecodeError as e:
|
||||
raise JSONRPCException({
|
||||
'code': -342, 'message': f'Cannot decode response in utf8 format, content: {data}, exception: {e}'})
|
||||
response = json.loads(responsedata, parse_float=decimal.Decimal)
|
||||
elapsed = time.time() - req_start_time
|
||||
if "error" in response and response["error"] is None:
|
||||
|
|
|
@ -453,6 +453,17 @@ def write_config(config_path, *, n, chain, extra_config="", disable_autoconnect=
|
|||
f.write("unsafesqlitesync=1\n")
|
||||
if disable_autoconnect:
|
||||
f.write("connect=0\n")
|
||||
# Limit max connections to mitigate test failures on some systems caused by the warning:
|
||||
# "Warning: Reducing -maxconnections from <...> to <...> due to system limitations".
|
||||
# The value is calculated as follows:
|
||||
# available_fds = 256 // Same as FD_SETSIZE on NetBSD.
|
||||
# MIN_CORE_FDS = 151 // Number of file descriptors required for core functionality.
|
||||
# MAX_ADDNODE_CONNECTIONS = 8 // Maximum number of -addnode outgoing nodes.
|
||||
# nBind == 3 // Maximum number of bound interfaces used in a test.
|
||||
#
|
||||
# min_required_fds = MIN_CORE_FDS + MAX_ADDNODE_CONNECTIONS + nBind = 151 + 8 + 3 = 162;
|
||||
# nMaxConnections = available_fds - min_required_fds = 256 - 161 = 94;
|
||||
f.write("maxconnections=94\n")
|
||||
f.write(extra_config)
|
||||
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ from test_framework.messages import COIN
|
|||
from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_raises_rpc_error,
|
||||
ensure_for,
|
||||
)
|
||||
from test_framework.wallet import MiniWallet
|
||||
|
||||
|
@ -34,17 +35,18 @@ class AssumeutxoTest(BitcoinTestFramework):
|
|||
|
||||
def set_test_params(self):
|
||||
"""Use the pregenerated, deterministic chain up to height 199."""
|
||||
self.num_nodes = 2
|
||||
self.num_nodes = 3
|
||||
self.rpc_timeout = 120
|
||||
self.extra_args = [
|
||||
[],
|
||||
[],
|
||||
[],
|
||||
]
|
||||
|
||||
def setup_network(self):
|
||||
"""Start with the nodes disconnected so that one can generate a snapshot
|
||||
including blocks the other hasn't yet seen."""
|
||||
self.add_nodes(2)
|
||||
self.add_nodes(3)
|
||||
self.start_nodes(extra_args=self.extra_args)
|
||||
|
||||
def run_test(self):
|
||||
|
@ -57,6 +59,7 @@ class AssumeutxoTest(BitcoinTestFramework):
|
|||
"""
|
||||
n0 = self.nodes[0]
|
||||
n1 = self.nodes[1]
|
||||
n2 = self.nodes[2]
|
||||
|
||||
self.mini_wallet = MiniWallet(n0)
|
||||
|
||||
|
@ -88,6 +91,7 @@ class AssumeutxoTest(BitcoinTestFramework):
|
|||
|
||||
# make n1 aware of the new header, but don't give it the block.
|
||||
n1.submitheader(newblock)
|
||||
n2.submitheader(newblock)
|
||||
|
||||
# Ensure everyone is seeing the same headers.
|
||||
for n in self.nodes:
|
||||
|
@ -125,6 +129,7 @@ class AssumeutxoTest(BitcoinTestFramework):
|
|||
|
||||
assert_equal(n0.getblockcount(), FINAL_HEIGHT)
|
||||
assert_equal(n1.getblockcount(), START_HEIGHT)
|
||||
assert_equal(n2.getblockcount(), START_HEIGHT)
|
||||
|
||||
assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT)
|
||||
|
||||
|
@ -192,6 +197,13 @@ class AssumeutxoTest(BitcoinTestFramework):
|
|||
w = n1.get_wallet_rpc("w")
|
||||
assert_equal(w.getbalance(), 34)
|
||||
|
||||
self.log.info("Check balance of a wallet that is active during snapshot completion")
|
||||
n2.restorewallet("w", "backup_w.dat")
|
||||
loaded = n2.loadtxoutset(dump_output['path'])
|
||||
self.connect_nodes(0, 2)
|
||||
self.wait_until(lambda: len(n2.getchainstates()['chainstates']) == 1)
|
||||
ensure_for(duration=1, f=lambda: (n2.getbalance() == 34))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
AssumeutxoTest(__file__).main()
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2020-2022 The Bitcoin Core developers
|
||||
# Copyright (c) 2020-present The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Test Migrating a wallet from legacy to descriptor."""
|
||||
|
@ -463,7 +463,7 @@ class WalletMigrationTest(BitcoinTestFramework):
|
|||
addr_info = wallet.getaddressinfo(addr)
|
||||
desc = descsum_create("pk(" + addr_info["pubkey"] + ")")
|
||||
|
||||
self.master_node.generatetodescriptor(1, desc, invalid_call=False)
|
||||
self.generatetodescriptor(self.master_node, 1, desc)
|
||||
|
||||
bals = wallet.getbalances()
|
||||
|
||||
|
|
|
@ -1,54 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (c) 2018-2022 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
#
|
||||
# Check for assertions with obvious side effects.
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
|
||||
def git_grep(params: [], error_msg: ""):
|
||||
try:
|
||||
output = subprocess.check_output(["git", "grep", *params], text=True, encoding="utf8")
|
||||
print(error_msg)
|
||||
print(output)
|
||||
return 1
|
||||
except subprocess.CalledProcessError as ex1:
|
||||
if ex1.returncode > 1:
|
||||
raise ex1
|
||||
return 0
|
||||
|
||||
|
||||
def main():
|
||||
# Aborting the whole process is undesirable for RPC code. So nonfatal
|
||||
# checks should be used over assert. See: src/util/check.h
|
||||
# src/rpc/server.cpp is excluded from this check since it's mostly meta-code.
|
||||
exit_code = git_grep([
|
||||
"--line-number",
|
||||
"--extended-regexp",
|
||||
r"\<(A|a)ss(ume|ert)\(",
|
||||
"--",
|
||||
"src/rpc/",
|
||||
"src/wallet/rpc*",
|
||||
":(exclude)src/rpc/server.cpp",
|
||||
], "CHECK_NONFATAL(condition) or NONFATAL_UNREACHABLE should be used instead of assert for RPC code.")
|
||||
|
||||
# The `BOOST_ASSERT` macro requires to `#include boost/assert.hpp`,
|
||||
# which is an unnecessary Boost dependency.
|
||||
exit_code |= git_grep([
|
||||
"--line-number",
|
||||
"--extended-regexp",
|
||||
r"BOOST_ASSERT\(",
|
||||
"--",
|
||||
"*.cpp",
|
||||
"*.h",
|
||||
], "BOOST_ASSERT must be replaced with Assert, BOOST_REQUIRE, or BOOST_CHECK.")
|
||||
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -48,6 +48,16 @@ fn get_linter_list() -> Vec<&'static Linter> {
|
|||
name: "std_filesystem",
|
||||
lint_fn: lint_std_filesystem
|
||||
},
|
||||
&Linter {
|
||||
description: "Check that fatal assertions are not used in RPC code",
|
||||
name: "rpc_assert",
|
||||
lint_fn: lint_rpc_assert
|
||||
},
|
||||
&Linter {
|
||||
description: "Check that boost assertions are not used",
|
||||
name: "boost_assert",
|
||||
lint_fn: lint_boost_assert
|
||||
},
|
||||
&Linter {
|
||||
description: "Check that release note snippets are in the right folder",
|
||||
name: "doc_release_note_snippets",
|
||||
|
@ -237,7 +247,7 @@ fn lint_py_lint() -> LintResult {
|
|||
"F822", // undefined name name in __all__
|
||||
"F823", // local variable name … referenced before assignment
|
||||
"F841", // local variable 'foo' is assigned to but never used
|
||||
"PLE", // Pylint errors
|
||||
"PLE", // Pylint errors
|
||||
"W191", // indentation contains tabs
|
||||
"W291", // trailing whitespace
|
||||
"W292", // no newline at end of file
|
||||
|
@ -273,6 +283,7 @@ fn lint_std_filesystem() -> LintResult {
|
|||
let found = git()
|
||||
.args([
|
||||
"grep",
|
||||
"--line-number",
|
||||
"std::filesystem",
|
||||
"--",
|
||||
"./src/",
|
||||
|
@ -283,10 +294,66 @@ fn lint_std_filesystem() -> LintResult {
|
|||
.success();
|
||||
if found {
|
||||
Err(r#"
|
||||
^^^
|
||||
Direct use of std::filesystem may be dangerous and buggy. Please include <util/fs.h> and use the
|
||||
fs:: namespace, which has unsafe filesystem functions marked as deleted.
|
||||
"#
|
||||
.trim()
|
||||
.to_string())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn lint_rpc_assert() -> LintResult {
|
||||
let found = git()
|
||||
.args([
|
||||
"grep",
|
||||
"--line-number",
|
||||
"--extended-regexp",
|
||||
r"\<(A|a)ss(ume|ert)\(",
|
||||
"--",
|
||||
"src/rpc/",
|
||||
"src/wallet/rpc*",
|
||||
":(exclude)src/rpc/server.cpp",
|
||||
// src/rpc/server.cpp is excluded from this check since it's mostly meta-code.
|
||||
])
|
||||
.status()
|
||||
.expect("command error")
|
||||
.success();
|
||||
if found {
|
||||
Err(r#"
|
||||
CHECK_NONFATAL(condition) or NONFATAL_UNREACHABLE should be used instead of assert for RPC code.
|
||||
|
||||
Aborting the whole process is undesirable for RPC code. So nonfatal
|
||||
checks should be used over assert. See: src/util/check.h
|
||||
"#
|
||||
.trim()
|
||||
.to_string())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn lint_boost_assert() -> LintResult {
|
||||
let found = git()
|
||||
.args([
|
||||
"grep",
|
||||
"--line-number",
|
||||
"--extended-regexp",
|
||||
r"BOOST_ASSERT\(",
|
||||
"--",
|
||||
"*.cpp",
|
||||
"*.h",
|
||||
])
|
||||
.status()
|
||||
.expect("command error")
|
||||
.success();
|
||||
if found {
|
||||
Err(r#"
|
||||
BOOST_ASSERT must be replaced with Assert, BOOST_REQUIRE, or BOOST_CHECK to avoid an unnecessary
|
||||
include of the boost/assert.hpp dependency.
|
||||
"#
|
||||
.trim()
|
||||
.to_string())
|
||||
} else {
|
||||
Ok(())
|
||||
|
@ -303,17 +370,15 @@ fn lint_doc_release_note_snippets() -> LintResult {
|
|||
if non_release_notes.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(format!(
|
||||
r#"
|
||||
{}
|
||||
^^^
|
||||
println!("{non_release_notes}");
|
||||
Err(r#"
|
||||
Release note snippets and other docs must be put into the doc/ folder directly.
|
||||
|
||||
The doc/release-notes/ folder is for archived release notes of previous releases only. Snippets are
|
||||
expected to follow the naming "/doc/release-notes-<PR number>.md".
|
||||
"#,
|
||||
non_release_notes
|
||||
))
|
||||
"#
|
||||
.trim()
|
||||
.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -356,7 +421,6 @@ fn lint_trailing_whitespace() -> LintResult {
|
|||
.success();
|
||||
if trailing_space {
|
||||
Err(r#"
|
||||
^^^
|
||||
Trailing whitespace (including Windows line endings [CR LF]) is problematic, because git may warn
|
||||
about it, or editors may remove it by default, forcing developers in the future to either undo the
|
||||
changes manually or spend time on review.
|
||||
|
@ -366,6 +430,7 @@ Thus, it is best to remove the trailing space now.
|
|||
Please add any false positives, such as subtrees, Windows-related files, patch files, or externally
|
||||
sourced files to the exclude list.
|
||||
"#
|
||||
.trim()
|
||||
.to_string())
|
||||
} else {
|
||||
Ok(())
|
||||
|
@ -382,7 +447,6 @@ fn lint_tabs_whitespace() -> LintResult {
|
|||
.success();
|
||||
if tabs {
|
||||
Err(r#"
|
||||
^^^
|
||||
Use of tabs in this codebase is problematic, because existing code uses spaces and tabs will cause
|
||||
display issues and conflict with editor settings.
|
||||
|
||||
|
@ -390,6 +454,7 @@ Please remove the tabs.
|
|||
|
||||
Please add any false positives, such as subtrees, or externally sourced files to the exclude list.
|
||||
"#
|
||||
.trim()
|
||||
.to_string())
|
||||
} else {
|
||||
Ok(())
|
||||
|
@ -464,7 +529,6 @@ fn lint_includes_build_config() -> LintResult {
|
|||
if missing {
|
||||
return Err(format!(
|
||||
r#"
|
||||
^^^
|
||||
One or more files use a symbol declared in the bitcoin-build-config.h header. However, they are not
|
||||
including the header. This is problematic, because the header may or may not be indirectly
|
||||
included. If the indirect include were to be intentionally or accidentally removed, the build could
|
||||
|
@ -480,12 +544,13 @@ include again.
|
|||
#include <bitcoin-build-config.h> // IWYU pragma: keep
|
||||
"#,
|
||||
defines_regex
|
||||
));
|
||||
)
|
||||
.trim()
|
||||
.to_string());
|
||||
}
|
||||
let redundant = print_affected_files(false);
|
||||
if redundant {
|
||||
return Err(r#"
|
||||
^^^
|
||||
None of the files use a symbol declared in the bitcoin-build-config.h header. However, they are including
|
||||
the header. Consider removing the unused include.
|
||||
"#
|
||||
|
@ -538,7 +603,9 @@ Markdown link errors found:
|
|||
{}
|
||||
"#,
|
||||
stderr
|
||||
))
|
||||
)
|
||||
.trim()
|
||||
.to_string())
|
||||
}
|
||||
Err(e) if e.kind() == ErrorKind::NotFound => {
|
||||
println!("`mlc` was not found in $PATH, skipping markdown lint check.");
|
||||
|
@ -590,10 +657,9 @@ fn main() -> ExitCode {
|
|||
env::set_current_dir(&git_root).unwrap();
|
||||
if let Err(err) = (linter.lint_fn)() {
|
||||
println!(
|
||||
"{err}\n^---- ⚠️ Failure generated from lint check '{}'!",
|
||||
linter.name
|
||||
"^^^\n{err}\n^---- ⚠️ Failure generated from lint check '{}' ({})!\n\n",
|
||||
linter.name, linter.description,
|
||||
);
|
||||
println!("{}", linter.description);
|
||||
test_failed = true;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue