2017-06-02 14:30:36 -04:00
#!/usr/bin/env python3
2022-12-24 23:49:50 +00:00
# Copyright (c) 2017-2022 The Bitcoin Core developers
2017-06-02 14:30:36 -04:00
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
""" Class for bitcoind node under test """
2018-08-21 19:23:21 -04:00
import contextlib
2017-07-11 13:01:44 -04:00
import decimal
2017-06-02 14:30:36 -04:00
import errno
2018-03-28 09:37:09 -04:00
from enum import Enum
2017-06-02 14:30:36 -04:00
import http . client
2017-07-11 13:01:44 -04:00
import json
2017-06-02 14:30:36 -04:00
import logging
2018-02-07 09:57:27 -05:00
import os
2023-12-08 17:30:19 +01:00
import platform
2017-12-20 18:38:40 -05:00
import re
2017-06-02 14:30:36 -04:00
import subprocess
2018-02-07 09:36:13 -05:00
import tempfile
2017-06-02 14:30:36 -04:00
import time
2018-08-02 08:27:37 -04:00
import urllib . parse
2018-09-15 04:32:12 -05:00
import collections
2018-10-19 12:28:47 -04:00
import shlex
2021-10-15 17:23:05 -04:00
from pathlib import Path
2017-06-02 14:30:36 -04:00
2023-06-16 17:10:30 +02:00
from . authproxy import (
JSONRPCException ,
2023-06-29 13:42:59 +02:00
serialization_fallback ,
2023-06-16 17:10:30 +02:00
)
2020-04-02 18:09:15 -04:00
from . descriptors import descsum_create
2022-02-05 23:49:47 +05:30
from . messages import NODE_P2P_V2
from . p2p import P2P_SERVICES , P2P_SUBVERSION
2017-06-02 14:30:36 -04:00
from . util import (
2019-04-18 17:45:42 -04:00
MAX_NODES ,
2020-11-28 12:41:15 +00:00
assert_equal ,
2018-03-06 16:48:15 -05:00
append_config ,
2018-04-09 14:07:47 -04:00
delete_cookie_file ,
2020-04-18 17:03:13 +02:00
get_auth_cookie ,
2017-06-02 14:30:36 -04:00
get_rpc_proxy ,
rpc_url ,
2023-10-03 18:34:20 +02:00
wait_until_helper_internal ,
2017-03-27 09:42:17 -04:00
p2p_port ,
2021-08-13 10:33:24 +02:00
tor_port ,
2017-06-02 14:30:36 -04:00
)
2017-08-16 08:52:24 -07:00
BITCOIND_PROC_WAIT_TIMEOUT = 60
2024-08-20 20:12:00 -04:00
# The size of the blocks xor key
# from InitBlocksdirXorKey::xor_key.size()
NUM_XOR_BYTES = 8
2024-08-20 20:14:23 -04:00
# The null blocks key (all 0s)
NULL_BLK_XOR_KEY = bytes ( [ 0 ] * NUM_XOR_BYTES )
2024-08-28 07:57:59 -04:00
BITCOIN_PID_FILENAME_DEFAULT = " bitcoind.pid "
2017-08-16 08:52:24 -07:00
2018-03-28 09:44:30 +03:00
class FailedToStartError ( Exception ) :
""" Raised when a node fails to start correctly. """
2018-03-28 09:37:09 -04:00
class ErrorMatch ( Enum ) :
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
2017-06-02 14:30:36 -04:00
class TestNode ( ) :
""" A class for representing a bitcoind node under test.
This class contains :
- state about the node ( whether it ' s running, etc)
- a Python subprocess . Popen object representing the running process
- an RPC connection to the node
2017-03-27 09:42:17 -04:00
- one or more P2P connections to the node
2017-06-02 14:30:36 -04:00
2017-03-27 09:42:17 -04:00
To make things easier for the test writer , any unrecognised messages will
be dispatched to the RPC connection . """
2017-06-02 14:30:36 -04:00
2023-11-03 16:54:19 -04:00
def __init__ ( self , i , datadir_path , * , chain , rpchost , timewait , timeout_factor , bitcoind , bitcoin_cli , coverage_dir , cwd , extra_conf = None , extra_args = None , use_cli = False , start_perf = False , use_valgrind = False , version = None , descriptors = False , v2transport = False ) :
2018-10-19 12:28:47 -04:00
"""
Kwargs :
start_perf ( bool ) : If True , begin profiling the node with ` perf ` as soon as
the node starts .
"""
2017-06-02 14:30:36 -04:00
self . index = i
2020-06-10 13:29:07 -07:00
self . p2p_conn_index = 1
2023-09-02 01:09:43 -04:00
self . datadir_path = datadir_path
self . bitcoinconf = self . datadir_path / " bitcoin.conf "
self . stdout_dir = self . datadir_path / " stdout "
self . stderr_dir = self . datadir_path / " stderr "
2019-07-31 14:11:32 -04:00
self . chain = chain
2017-06-02 14:30:36 -04:00
self . rpchost = rpchost
2018-08-01 14:37:47 -04:00
self . rpc_timeout = timewait
2018-04-19 08:38:59 -04:00
self . binary = bitcoind
2017-06-02 14:30:36 -04:00
self . coverage_dir = coverage_dir
2019-02-15 12:54:29 +01:00
self . cwd = cwd
2019-07-16 15:33:35 -04:00
self . descriptors = descriptors
2021-08-13 10:33:24 +02:00
self . has_explicit_bind = False
2018-12-10 15:11:37 -05:00
if extra_conf is not None :
2023-09-02 01:09:43 -04:00
append_config ( self . datadir_path , extra_conf )
2021-08-13 10:33:24 +02:00
# Remember if there is bind=... in the config file.
self . has_explicit_bind = any ( e . startswith ( " bind= " ) for e in extra_conf )
2018-02-15 14:01:43 -05:00
# Most callers will just need to add extra args to the standard list below.
2018-03-18 16:26:45 +02:00
# For those callers that need more flexibility, they can just set the args property directly.
2018-02-15 14:01:43 -05:00
# Note that common args are set in the config file (see initialize_datadir)
2017-06-02 14:30:36 -04:00
self . extra_args = extra_args
2019-01-05 20:20:42 +01:00
self . version = version
2019-05-03 12:47:27 -04:00
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
# This means that starting a bitcoind using the temp dir to debug a failed test won't
# spam debug.log.
2018-03-26 21:35:35 -07:00
self . args = [
self . binary ,
2023-09-02 01:09:43 -04:00
f " -datadir= { self . datadir_path } " ,
2018-03-26 21:35:35 -07:00
" -logtimemicros " ,
" -debug " ,
" -debugexclude=libevent " ,
" -debugexclude=leveldb " ,
2022-10-26 05:05:52 -06:00
" -debugexclude=rand " ,
2024-05-15 17:33:01 -03:00
" -uacomment=testnode %d " % i , # required for subversion uniqueness across peers
2018-03-26 21:35:35 -07:00
]
2022-11-10 14:46:12 +01:00
if self . descriptors is None :
self . args . append ( " -disablewallet " )
2023-08-21 12:08:39 +02:00
# Use valgrind, expect for previous release binaries
if use_valgrind and version is None :
2023-09-02 01:09:43 -04:00
default_suppressions_file = Path ( __file__ ) . parents [ 3 ] / " contrib " / " valgrind.supp "
2019-11-29 15:11:18 +00:00
suppressions_file = os . getenv ( " VALGRIND_SUPPRESSIONS_FILE " ,
default_suppressions_file )
self . args = [ " valgrind " , " --suppressions= {} " . format ( suppressions_file ) ,
" --gen-suppressions=all " , " --exit-on-first-error=yes " ,
" --error-exitcode=1 " , " --quiet " ] + self . args
2017-06-02 14:30:36 -04:00
2020-05-04 20:06:38 -04:00
if self . version_is_at_least ( 190000 ) :
2019-01-05 20:20:42 +01:00
self . args . append ( " -logthreadnames " )
2020-08-25 20:22:28 +00:00
if self . version_is_at_least ( 219900 ) :
self . args . append ( " -logsourcelocations " )
2022-08-18 13:37:25 +02:00
if self . version_is_at_least ( 239000 ) :
Create BCLog::Level::Trace log severity level
for verbose log messages for development or debugging only, as bitcoind may run
more slowly, that are more granular/frequent than the Debug log level, i.e. for
very high-frequency, low-level messages to be logged distinctly from
higher-level, less-frequent debug logging that could still be usable in production.
An example would be to log higher-level peer events (connection, disconnection,
misbehavior, eviction) as Debug, versus Trace for low-level, high-volume p2p
messages in the BCLog::NET category. This will enable the user to log only the
former without the latter, in order to focus on high-level peer management events.
With respect to the name, "trace" is suggested as the most granular level
in resources like the following:
- https://sematext.com/blog/logging-levels
- https://howtodoinjava.com/log4j2/logging-levels
Update the test framework and add test coverage.
2022-06-01 13:44:59 +02:00
self . args . append ( " -loglevel=trace " )
2019-01-05 20:20:42 +01:00
2023-11-03 16:54:19 -04:00
# Default behavior from global -v2transport flag is added to args to persist it over restarts.
# May be overwritten in individual tests, using extra_args.
self . default_to_v2 = v2transport
2024-01-29 14:12:14 -05:00
if self . version_is_at_least ( 260000 ) :
# 26.0 and later support v2transport
if v2transport :
self . args . append ( " -v2transport=1 " )
else :
self . args . append ( " -v2transport=0 " )
2024-02-29 12:35:32 -05:00
# if v2transport is requested via global flag but not supported for node version, ignore it
2023-11-03 16:54:19 -04:00
2023-09-02 01:09:43 -04:00
self . cli = TestNodeCLI ( bitcoin_cli , self . datadir_path )
2017-07-11 13:14:18 -04:00
self . use_cli = use_cli
2018-10-19 12:28:47 -04:00
self . start_perf = start_perf
2017-07-11 13:01:44 -04:00
2017-06-02 14:30:36 -04:00
self . running = False
self . process = None
self . rpc_connected = False
self . rpc = None
self . url = None
self . log = logging . getLogger ( ' TestFramework.node %d ' % i )
2018-04-06 10:53:35 -04:00
self . cleanup_on_exit = True # Whether to kill the node when this object goes away
2018-10-19 12:28:47 -04:00
# Cache perf subprocesses here by their data output filename.
self . perf_subprocesses = { }
2017-06-02 14:30:36 -04:00
2017-03-27 09:42:17 -04:00
self . p2ps = [ ]
2020-05-18 09:45:55 +05:30
self . timeout_factor = timeout_factor
2017-03-27 09:42:17 -04:00
2023-08-11 16:51:39 +02:00
self . mocktime = None
2019-05-17 13:07:07 -04:00
AddressKeyPair = collections . namedtuple ( ' AddressKeyPair ' , [ ' address ' , ' key ' ] )
PRIV_KEYS = [
2018-09-11 09:23:05 -04:00
# address , privkey
2018-09-15 04:32:12 -05:00
AddressKeyPair ( ' mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z ' , ' cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW ' ) ,
AddressKeyPair ( ' msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg ' , ' cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE ' ) ,
AddressKeyPair ( ' mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP ' , ' cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK ' ) ,
AddressKeyPair ( ' mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR ' , ' cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim ' ) ,
AddressKeyPair ( ' msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws ' , ' cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh ' ) ,
AddressKeyPair ( ' n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi ' , ' cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq ' ) ,
AddressKeyPair ( ' myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6 ' , ' cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK ' ) ,
AddressKeyPair ( ' mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8 ' , ' cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy ' ) ,
AddressKeyPair ( ' mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg ' , ' cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k ' ) ,
2019-04-18 17:45:42 -04:00
AddressKeyPair ( ' mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf ' , ' cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik ' ) ,
AddressKeyPair ( ' mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6 ' , ' cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3 ' ) ,
AddressKeyPair ( ' mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7 ' , ' cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ ' ) ,
2019-05-17 13:07:07 -04:00
]
def get_deterministic_priv_key ( self ) :
""" Return a deterministic priv key in base58, that only depends on the node ' s index """
assert len ( self . PRIV_KEYS ) == MAX_NODES
return self . PRIV_KEYS [ self . index ]
2018-09-10 16:58:15 -04:00
2018-04-18 15:17:22 -04:00
def _node_msg ( self , msg : str ) - > str :
""" Return a modified msg that identifies this node by its index as a debugging aid. """
return " [node %d ] %s " % ( self . index , msg )
def _raise_assertion_error ( self , msg : str ) :
""" Raise an AssertionError with msg modified to identify this node. """
raise AssertionError ( self . _node_msg ( msg ) )
2018-04-06 10:53:35 -04:00
def __del__ ( self ) :
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self . process and self . cleanup_on_exit :
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
2018-04-18 15:17:22 -04:00
print ( self . _node_msg ( " Cleaning up leftover process " ) )
2018-04-06 10:53:35 -04:00
self . process . kill ( )
2017-10-20 09:27:55 -04:00
def __getattr__ ( self , name ) :
2017-07-11 13:14:18 -04:00
""" Dispatches any unrecognised messages to the RPC connection or a CLI instance. """
if self . use_cli :
2019-07-16 15:33:35 -04:00
return getattr ( RPCOverloadWrapper ( self . cli , True , self . descriptors ) , name )
2017-07-11 13:14:18 -04:00
else :
2018-04-18 15:17:22 -04:00
assert self . rpc_connected and self . rpc is not None , self . _node_msg ( " Error: no RPC connection " )
2019-07-16 15:33:35 -04:00
return getattr ( RPCOverloadWrapper ( self . rpc , descriptors = self . descriptors ) , name )
2017-06-02 14:30:36 -04:00
2023-03-21 13:14:53 -04:00
def start ( self , extra_args = None , * , cwd = None , stdout = None , stderr = None , env = None , * * kwargs ) :
2017-06-02 14:30:36 -04:00
""" Start the node. """
2017-06-09 16:35:17 -04:00
if extra_args is None :
extra_args = self . extra_args
2018-02-07 09:57:27 -05:00
2021-08-13 10:33:24 +02:00
# If listening and no -bind is given, then bitcoind would bind P2P ports on
2024-11-04 18:24:45 -05:00
# 0.0.0.0:P and 127.0.0.1:P+1 (for incoming Tor connections), where P is
2021-08-13 10:33:24 +02:00
# a unique port chosen by the test framework and configured as port=P in
2024-11-04 18:24:45 -05:00
# bitcoin.conf. To avoid collisions, change it to 127.0.0.1:tor_port().
2021-08-13 10:33:24 +02:00
will_listen = all ( e != " -nolisten " and e != " -listen=0 " for e in extra_args )
has_explicit_bind = self . has_explicit_bind or any ( e . startswith ( " -bind= " ) for e in extra_args )
if will_listen and not has_explicit_bind :
extra_args . append ( f " -bind=0.0.0.0: { p2p_port ( self . index ) } " )
extra_args . append ( f " -bind=127.0.0.1: { tor_port ( self . index ) } =onion " )
2023-11-03 16:54:19 -04:00
self . use_v2transport = " -v2transport=1 " in extra_args or ( self . default_to_v2 and " -v2transport=0 " not in extra_args )
2018-02-07 09:57:27 -05:00
# Add a new stdout and stderr file each time bitcoind is started
2017-06-09 16:35:17 -04:00
if stderr is None :
2018-02-07 09:57:27 -05:00
stderr = tempfile . NamedTemporaryFile ( dir = self . stderr_dir , delete = False )
if stdout is None :
stdout = tempfile . NamedTemporaryFile ( dir = self . stdout_dir , delete = False )
self . stderr = stderr
self . stdout = stdout
2019-02-15 12:54:29 +01:00
if cwd is None :
cwd = self . cwd
2018-04-09 14:07:47 -04:00
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
2023-09-02 01:09:43 -04:00
delete_cookie_file ( self . datadir_path , self . chain )
2018-02-07 10:00:13 -05:00
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict ( os . environ , LIBC_FATAL_STDERR_ = " 1 " )
2023-03-21 13:14:53 -04:00
if env is not None :
subp_env . update ( env )
2018-02-07 10:00:13 -05:00
2019-02-15 12:54:29 +01:00
self . process = subprocess . Popen ( self . args + extra_args , env = subp_env , stdout = stdout , stderr = stderr , cwd = cwd , * * kwargs )
2018-02-07 10:00:13 -05:00
2017-06-02 14:30:36 -04:00
self . running = True
self . log . debug ( " bitcoind started, waiting for RPC to come up " )
2018-10-19 12:28:47 -04:00
if self . start_perf :
self . _start_perf ( )
2024-05-23 14:11:39 +02:00
def wait_for_rpc_connection ( self , * , wait_for_import = True ) :
2017-06-02 14:30:36 -04:00
""" Sets up an RPC connection to the bitcoind process. Returns False if unable to connect. """
2017-08-16 15:46:48 -04:00
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range ( poll_per_s * self . rpc_timeout ) :
2018-03-28 09:44:30 +03:00
if self . process . poll ( ) is not None :
2023-08-10 17:55:33 -03:00
# Attach abrupt shutdown error/s to the exception message
self . stderr . seek ( 0 )
str_error = ' ' . join ( line . decode ( ' utf-8 ' ) for line in self . stderr )
str_error + = " ************************ \n " if str_error else ' '
2018-04-18 15:17:22 -04:00
raise FailedToStartError ( self . _node_msg (
2023-08-10 17:55:33 -03:00
f ' bitcoind exited with status { self . process . returncode } during initialization. { str_error } ' ) )
2017-06-02 14:30:36 -04:00
try :
2020-05-19 19:12:45 -04:00
rpc = get_rpc_proxy (
2023-09-02 01:09:43 -04:00
rpc_url ( self . datadir_path , self . index , self . chain , self . rpchost ) ,
2020-05-19 19:12:45 -04:00
self . index ,
timeout = self . rpc_timeout / / 2 , # Shorter timeout to allow for one retry in case of ETIMEDOUT
coveragedir = self . coverage_dir ,
)
2019-02-06 00:03:09 +00:00
rpc . getblockcount ( )
2017-06-02 14:30:36 -04:00
# If the call to getblockcount() succeeds then the RPC connection is up
2024-05-23 14:11:39 +02:00
if self . version_is_at_least ( 190000 ) and wait_for_import :
2020-05-04 20:06:38 -04:00
# getmempoolinfo.loaded is available since commit
# bb8ae2c (version 0.19.0)
2023-12-13 11:24:03 +01:00
self . wait_until ( lambda : rpc . getmempoolinfo ( ) [ ' loaded ' ] )
2020-05-04 20:06:38 -04:00
# Wait for the node to finish reindex, block import, and
# loading the mempool. Usually importing happens fast or
# even "immediate" when the node is started. However, there
2023-06-17 11:11:51 -03:00
# is no guarantee and sometimes ImportBlocks might finish
2020-05-04 20:06:38 -04:00
# later. This is going to cause intermittent test failures,
# because generally the tests assume the node is fully
# ready after being started.
#
# For example, the node will reject block messages from p2p
# when it is still importing with the error "Unexpected
# block message received"
#
# The wait is done here to make tests as robust as possible
# and prevent racy tests and intermittent failures as much
# as possible. Some tests might not need this, but the
2020-05-18 09:45:55 +05:30
# overhead is trivial, and the added guarantees are worth
2020-05-04 20:06:38 -04:00
# the minimal performance cost.
2019-02-06 00:03:09 +00:00
self . log . debug ( " RPC successfully started " )
if self . use_cli :
return
self . rpc = rpc
2017-06-02 14:30:36 -04:00
self . rpc_connected = True
2021-05-28 12:03:37 +08:00
self . url = self . rpc . rpc_url
2017-06-02 14:30:36 -04:00
return
except JSONRPCException as e : # Initialization phase
2018-10-06 13:42:11 +08:00
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e . error [ ' code ' ] != - 28 and e . error [ ' code ' ] != - 342 :
2017-06-02 14:30:36 -04:00
raise # unknown JSON RPC exception
2020-04-08 21:45:31 +08:00
except ConnectionResetError :
# This might happen when the RPC server is in warmup, but shut down before the call to getblockcount
# succeeds. Try again to properly raise the FailedToStartError
pass
2020-04-14 10:19:13 -04:00
except OSError as e :
2020-05-19 19:12:45 -04:00
if e . errno == errno . ETIMEDOUT :
pass # Treat identical to ConnectionResetError
elif e . errno == errno . ECONNREFUSED :
pass # Port not yet open?
else :
2020-04-14 10:19:13 -04:00
raise # unknown OS error
2020-04-18 17:03:13 +02:00
except ValueError as e : # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting
2017-06-02 14:30:36 -04:00
if " No RPC credentials " not in str ( e ) :
raise
2017-08-18 22:09:58 +02:00
time . sleep ( 1.0 / poll_per_s )
2020-04-17 17:03:25 -04:00
self . _raise_assertion_error ( " Unable to connect to bitcoind after {} s " . format ( self . rpc_timeout ) )
2017-06-02 14:30:36 -04:00
2020-04-18 17:03:13 +02:00
def wait_for_cookie_credentials ( self ) :
""" Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up. """
self . log . debug ( " Waiting for cookie credentials " )
# Poll at a rate of four times per second.
poll_per_s = 4
for _ in range ( poll_per_s * self . rpc_timeout ) :
try :
2023-09-02 01:09:43 -04:00
get_auth_cookie ( self . datadir_path , self . chain )
2020-04-18 17:03:13 +02:00
self . log . debug ( " Cookie credentials successfully retrieved " )
return
except ValueError : # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting
pass # so we continue polling until RPC credentials are retrieved
time . sleep ( 1.0 / poll_per_s )
self . _raise_assertion_error ( " Unable to retrieve cookie credentials after {} s " . format ( self . rpc_timeout ) )
2021-07-27 13:59:55 +02:00
def generate ( self , nblocks , maxtries = 1000000 , * * kwargs ) :
2018-10-05 16:51:10 +09:00
self . log . debug ( " TestNode.generate() dispatches `generate` call to `generatetoaddress` " )
2021-07-27 13:59:55 +02:00
return self . generatetoaddress ( nblocks = nblocks , address = self . get_deterministic_priv_key ( ) . address , maxtries = maxtries , * * kwargs )
2025-01-03 19:09:03 +05:30
def generateblock ( self , * args , called_by_framework , * * kwargs ) :
assert called_by_framework , " Direct call of this mining RPC is discouraged. Please use one of the self.generate* methods on the test framework, which sync the nodes to avoid intermittent test issues. You may use sync_fun=self.no_op to disable the sync explicitly. "
2021-07-27 13:59:55 +02:00
return self . __getattr__ ( ' generateblock ' ) ( * args , * * kwargs )
2025-01-03 19:09:03 +05:30
def generatetoaddress ( self , * args , called_by_framework , * * kwargs ) :
assert called_by_framework , " Direct call of this mining RPC is discouraged. Please use one of the self.generate* methods on the test framework, which sync the nodes to avoid intermittent test issues. You may use sync_fun=self.no_op to disable the sync explicitly. "
2021-07-27 13:59:55 +02:00
return self . __getattr__ ( ' generatetoaddress ' ) ( * args , * * kwargs )
2025-01-03 19:09:03 +05:30
def generatetodescriptor ( self , * args , called_by_framework , * * kwargs ) :
assert called_by_framework , " Direct call of this mining RPC is discouraged. Please use one of the self.generate* methods on the test framework, which sync the nodes to avoid intermittent test issues. You may use sync_fun=self.no_op to disable the sync explicitly. "
2021-07-27 13:59:55 +02:00
return self . __getattr__ ( ' generatetodescriptor ' ) ( * args , * * kwargs )
2018-10-05 16:51:10 +09:00
2023-08-11 16:51:39 +02:00
def setmocktime ( self , timestamp ) :
""" Wrapper for setmocktime RPC, sets self.mocktime """
if timestamp == 0 :
# setmocktime(0) resets to system time.
self . mocktime = None
else :
self . mocktime = timestamp
return self . __getattr__ ( ' setmocktime ' ) ( timestamp )
2017-06-02 14:30:36 -04:00
def get_wallet_rpc ( self , wallet_name ) :
2017-07-11 13:14:18 -04:00
if self . use_cli :
2019-07-16 15:33:35 -04:00
return RPCOverloadWrapper ( self . cli ( " -rpcwallet= {} " . format ( wallet_name ) ) , True , self . descriptors )
2017-07-11 13:14:18 -04:00
else :
2018-04-18 15:17:22 -04:00
assert self . rpc_connected and self . rpc , self . _node_msg ( " RPC not connected " )
2018-08-02 08:27:37 -04:00
wallet_path = " wallet/ {} " . format ( urllib . parse . quote ( wallet_name ) )
2019-07-16 15:33:35 -04:00
return RPCOverloadWrapper ( self . rpc / wallet_path , descriptors = self . descriptors )
2017-06-02 14:30:36 -04:00
2020-05-04 20:06:38 -04:00
def version_is_at_least ( self , ver ) :
return self . version is None or self . version > = ver
2020-12-17 13:39:03 +01:00
def stop_node ( self , expected_stderr = ' ' , * , wait = 0 , wait_until_stopped = True ) :
2017-06-02 14:30:36 -04:00
""" Stop the node. """
if not self . running :
return
self . log . debug ( " Stopping node " )
try :
2022-11-29 15:27:51 +01:00
# Do not use wait argument when testing older nodes, e.g. in wallet_backwards_compatibility.py
2020-05-04 20:06:38 -04:00
if self . version_is_at_least ( 180000 ) :
2019-01-05 20:20:42 +01:00
self . stop ( wait = wait )
else :
self . stop ( )
2017-06-02 14:30:36 -04:00
except http . client . CannotSendRequest :
self . log . exception ( " Unable to stop node. " )
2018-02-07 10:51:13 -05:00
2018-10-19 12:28:47 -04:00
# If there are any running perf processes, stop them.
for profile_name in tuple ( self . perf_subprocesses . keys ( ) ) :
self . _stop_perf ( profile_name )
2017-03-27 09:42:17 -04:00
del self . p2ps [ : ]
2017-06-02 14:30:36 -04:00
2023-07-04 14:07:55 +02:00
assert ( not expected_stderr ) or wait_until_stopped # Must wait to check stderr
2020-12-17 13:39:03 +01:00
if wait_until_stopped :
2023-07-04 14:07:55 +02:00
self . wait_until_stopped ( expected_stderr = expected_stderr )
2020-12-17 13:39:03 +01:00
2023-07-04 14:07:55 +02:00
def is_node_stopped ( self , * , expected_stderr = " " , expected_ret_code = 0 ) :
2017-06-02 14:30:36 -04:00
""" Checks whether the node has stopped.
Returns True if the node has stopped . False otherwise .
This method is responsible for freeing resources ( self . process ) . """
if not self . running :
return True
return_code = self . process . poll ( )
2017-08-16 08:52:24 -07:00
if return_code is None :
return False
# process has stopped. Assert that it didn't return an error code.
2023-06-15 12:31:35 +02:00
assert return_code == expected_ret_code , self . _node_msg (
f " Node returned unexpected exit code ( { return_code } ) vs ( { expected_ret_code } ) when stopping " )
2023-07-04 14:07:55 +02:00
# Check that stderr is as expected
self . stderr . seek ( 0 )
stderr = self . stderr . read ( ) . decode ( ' utf-8 ' ) . strip ( )
if stderr != expected_stderr :
raise AssertionError ( " Unexpected stderr {} != {} " . format ( stderr , expected_stderr ) )
self . stdout . close ( )
self . stderr . close ( )
2017-08-16 08:52:24 -07:00
self . running = False
self . process = None
self . rpc_connected = False
self . rpc = None
self . log . debug ( " Node stopped " )
return True
2023-07-04 14:07:55 +02:00
def wait_until_stopped ( self , * , timeout = BITCOIND_PROC_WAIT_TIMEOUT , expect_error = False , * * kwargs ) :
2024-04-22 16:42:44 -04:00
if " expected_ret_code " not in kwargs :
kwargs [ " expected_ret_code " ] = 1 if expect_error else 0 # Whether node shutdown return EXIT_FAILURE or EXIT_SUCCESS
self . wait_until ( lambda : self . is_node_stopped ( * * kwargs ) , timeout = timeout )
2017-06-02 14:30:36 -04:00
2023-01-23 17:35:04 +01:00
def replace_in_config ( self , replacements ) :
"""
Perform replacements in the configuration file .
The substitutions are passed as a list of search - replace - tuples , e . g .
[ ( " old " , " new " ) , ( " foo " , " bar " ) , . . . ]
"""
with open ( self . bitcoinconf , ' r ' , encoding = ' utf8 ' ) as conf :
conf_data = conf . read ( )
for replacement in replacements :
assert_equal ( len ( replacement ) , 2 )
old , new = replacement [ 0 ] , replacement [ 1 ]
conf_data = conf_data . replace ( old , new )
with open ( self . bitcoinconf , ' w ' , encoding = ' utf8 ' ) as conf :
conf . write ( conf_data )
2021-10-15 17:23:05 -04:00
@property
def chain_path ( self ) - > Path :
2023-06-16 17:10:30 +02:00
return self . datadir_path / self . chain
2021-10-15 17:23:05 -04:00
@property
def debug_log_path ( self ) - > Path :
return self . chain_path / ' debug.log '
2023-07-12 14:36:37 +02:00
@property
def blocks_path ( self ) - > Path :
return self . chain_path / " blocks "
2024-08-20 19:28:14 -04:00
@property
def blocks_key_path ( self ) - > Path :
return self . blocks_path / " xor.dat "
2024-08-20 20:03:25 -04:00
def read_xor_key ( self ) - > bytes :
with open ( self . blocks_key_path , " rb " ) as xor_f :
return xor_f . read ( NUM_XOR_BYTES )
2023-06-14 12:28:54 +02:00
@property
def wallets_path ( self ) - > Path :
return self . chain_path / " wallets "
2023-07-20 09:17:59 +02:00
def debug_log_size ( self , * * kwargs ) - > int :
with open ( self . debug_log_path , * * kwargs ) as dl :
2021-12-10 14:23:18 -05:00
dl . seek ( 0 , 2 )
return dl . tell ( )
2018-08-21 19:23:21 -04:00
@contextlib.contextmanager
2020-01-20 08:32:42 -07:00
def assert_debug_log ( self , expected_msgs , unexpected_msgs = None , timeout = 2 ) :
if unexpected_msgs is None :
unexpected_msgs = [ ]
2023-10-12 17:24:16 +02:00
assert_equal ( type ( expected_msgs ) , list )
assert_equal ( type ( unexpected_msgs ) , list )
2020-05-18 09:45:55 +05:30
time_end = time . time ( ) + timeout * self . timeout_factor
2023-07-20 09:17:59 +02:00
prev_size = self . debug_log_size ( encoding = " utf-8 " ) # Must use same encoding that is used to read() below
2019-09-04 13:17:22 -04:00
yield
while True :
found = True
2023-07-06 12:47:42 +02:00
with open ( self . debug_log_path , encoding = " utf-8 " , errors = " replace " ) as dl :
2019-09-04 13:17:22 -04:00
dl . seek ( prev_size )
log = dl . read ( )
print_log = " - " + " \n - " . join ( log . splitlines ( ) )
2020-01-20 08:32:42 -07:00
for unexpected_msg in unexpected_msgs :
if re . search ( re . escape ( unexpected_msg ) , log , flags = re . MULTILINE ) :
self . _raise_assertion_error ( ' Unexpected message " {} " partially matches log: \n \n {} \n \n ' . format ( unexpected_msg , print_log ) )
2019-09-04 13:17:22 -04:00
for expected_msg in expected_msgs :
if re . search ( re . escape ( expected_msg ) , log , flags = re . MULTILINE ) is None :
found = False
if found :
return
if time . time ( ) > = time_end :
break
time . sleep ( 0.05 )
self . _raise_assertion_error ( ' Expected messages " {} " does not partially match log: \n \n {} \n \n ' . format ( str ( expected_msgs ) , print_log ) )
2018-08-21 19:23:21 -04:00
2022-01-28 13:40:15 +01:00
@contextlib.contextmanager
2024-04-30 14:03:41 -04:00
def busy_wait_for_debug_log ( self , expected_msgs , timeout = 60 ) :
2021-12-10 14:26:04 -05:00
"""
Block until we see a particular debug log message fragment or until we exceed the timeout .
Return :
the number of log lines we encountered when matching
"""
time_end = time . time ( ) + timeout * self . timeout_factor
2023-07-20 09:17:59 +02:00
prev_size = self . debug_log_size ( mode = " rb " ) # Must use same mode that is used to read() below
2021-12-10 14:26:04 -05:00
2022-01-28 13:40:15 +01:00
yield
2021-12-10 14:26:04 -05:00
while True :
found = True
2022-06-07 16:50:11 +02:00
with open ( self . debug_log_path , " rb " ) as dl :
2021-12-10 14:26:04 -05:00
dl . seek ( prev_size )
log = dl . read ( )
for expected_msg in expected_msgs :
2022-06-07 16:50:11 +02:00
if expected_msg not in log :
2021-12-10 14:26:04 -05:00
found = False
if found :
2022-01-28 13:40:15 +01:00
return
2021-12-10 14:26:04 -05:00
if time . time ( ) > = time_end :
2023-03-20 11:56:18 +01:00
print_log = " - " + " \n - " . join ( log . decode ( " utf8 " , errors = " replace " ) . splitlines ( ) )
2021-12-10 14:26:04 -05:00
break
# No sleep here because we want to detect the message fragment as fast as
# possible.
self . _raise_assertion_error (
' Expected messages " {} " does not partially match log: \n \n {} \n \n ' . format (
str ( expected_msgs ) , print_log ) )
2023-12-06 00:15:57 +01:00
@contextlib.contextmanager
def wait_for_new_peer ( self , timeout = 5 ) :
"""
Wait until the node is connected to at least one new peer . We detect this
by watching for an increased highest peer id , using the ` getpeerinfo ` RPC call .
Note that the simpler approach of only accounting for the number of peers
suffers from race conditions , as disconnects from unrelated previous peers
could happen anytime in - between .
"""
def get_highest_peer_id ( ) :
peer_info = self . getpeerinfo ( )
return peer_info [ - 1 ] [ " id " ] if peer_info else - 1
initial_peer_id = get_highest_peer_id ( )
yield
2023-12-13 11:24:03 +01:00
self . wait_until ( lambda : get_highest_peer_id ( ) > initial_peer_id , timeout = timeout )
2023-12-06 00:15:57 +01:00
2018-10-19 12:28:47 -04:00
@contextlib.contextmanager
2021-05-28 13:31:05 +08:00
def profile_with_perf ( self , profile_name : str ) :
2018-10-19 12:28:47 -04:00
"""
Context manager that allows easy profiling of node activity using ` perf ` .
See ` test / functional / README . md ` for details on perf usage .
Args :
2021-05-28 13:31:05 +08:00
profile_name : This string will be appended to the
2018-10-19 12:28:47 -04:00
profile data filename generated by perf .
"""
subp = self . _start_perf ( profile_name )
yield
if subp :
self . _stop_perf ( profile_name )
def _start_perf ( self , profile_name = None ) :
""" Start a perf process to profile this node.
Returns the subprocess running perf . """
subp = None
def test_success ( cmd ) :
return subprocess . call (
# shell=True required for pipe use below
cmd , shell = True ,
stderr = subprocess . DEVNULL , stdout = subprocess . DEVNULL ) == 0
2023-12-08 17:30:19 +01:00
if platform . system ( ) != ' Linux ' :
2019-03-22 13:21:58 -04:00
self . log . warning ( " Can ' t profile with perf; only available on Linux platforms " )
2018-10-19 12:28:47 -04:00
return None
if not test_success ( ' which perf ' ) :
self . log . warning ( " Can ' t profile with perf; must install perf-tools " )
return None
if not test_success ( ' readelf -S {} | grep .debug_str ' . format ( shlex . quote ( self . binary ) ) ) :
self . log . warning (
" perf output won ' t be very useful without debug symbols compiled into bitcoind " )
output_path = tempfile . NamedTemporaryFile (
2023-09-02 01:09:43 -04:00
dir = self . datadir_path ,
2018-10-19 12:28:47 -04:00
prefix = " {} .perf.data. " . format ( profile_name or ' test ' ) ,
delete = False ,
) . name
cmd = [
' perf ' , ' record ' ,
' -g ' , # Record the callgraph.
' --call-graph ' , ' dwarf ' , # Compatibility for gcc's --fomit-frame-pointer.
' -F ' , ' 101 ' , # Sampling frequency in Hz.
' -p ' , str ( self . process . pid ) ,
' -o ' , output_path ,
]
subp = subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
self . perf_subprocesses [ profile_name ] = subp
return subp
def _stop_perf ( self , profile_name ) :
""" Stop (and pop) a perf subprocess. """
subp = self . perf_subprocesses . pop ( profile_name )
output_path = subp . args [ subp . args . index ( ' -o ' ) + 1 ]
subp . terminate ( )
subp . wait ( timeout = 10 )
stderr = subp . stderr . read ( ) . decode ( )
if ' Consider tweaking /proc/sys/kernel/perf_event_paranoid ' in stderr :
self . log . warning (
" perf couldn ' t collect data! Try "
" ' sudo sysctl -w kernel.perf_event_paranoid=-1 ' " )
else :
report_cmd = " perf report -i {} " . format ( output_path )
self . log . info ( " See perf output by running ' {} ' " . format ( report_cmd ) )
2018-03-28 09:37:09 -04:00
def assert_start_raises_init_error ( self , extra_args = None , expected_msg = None , match = ErrorMatch . FULL_TEXT , * args , * * kwargs ) :
2018-02-07 09:36:13 -05:00
""" Attempt to start the node and expect it to raise an error.
2018-02-07 10:38:25 -05:00
extra_args : extra arguments to pass through to bitcoind
expected_msg : regex that stderr should match when bitcoind fails
2018-02-07 09:36:13 -05:00
Will throw if bitcoind starts without an error .
2018-02-07 10:38:25 -05:00
Will throw if an expected_msg is provided and it does not match bitcoind ' s stdout. " " "
2022-04-29 21:35:05 +02:00
assert not self . running
2018-02-07 09:57:27 -05:00
with tempfile . NamedTemporaryFile ( dir = self . stderr_dir , delete = False ) as log_stderr , \
tempfile . NamedTemporaryFile ( dir = self . stdout_dir , delete = False ) as log_stdout :
2018-02-07 09:36:13 -05:00
try :
2018-02-07 09:57:27 -05:00
self . start ( extra_args , stdout = log_stdout , stderr = log_stderr , * args , * * kwargs )
2020-12-10 12:35:28 +01:00
ret = self . process . wait ( timeout = self . rpc_timeout )
self . log . debug ( self . _node_msg ( f ' bitcoind exited with status { ret } during initialization ' ) )
2021-04-28 09:55:34 +02:00
assert ret != 0 # Exit code must indicate failure
2018-02-07 09:36:13 -05:00
self . running = False
self . process = None
2018-02-07 10:38:25 -05:00
# Check stderr for expected message
2018-02-07 09:36:13 -05:00
if expected_msg is not None :
log_stderr . seek ( 0 )
2018-03-19 15:35:04 -04:00
stderr = log_stderr . read ( ) . decode ( ' utf-8 ' ) . strip ( )
2018-03-28 09:37:09 -04:00
if match == ErrorMatch . PARTIAL_REGEX :
2018-03-19 15:35:04 -04:00
if re . search ( expected_msg , stderr , flags = re . MULTILINE ) is None :
2018-04-18 15:17:22 -04:00
self . _raise_assertion_error (
' Expected message " {} " does not partially match stderr: \n " {} " ' . format ( expected_msg , stderr ) )
2018-03-28 09:37:09 -04:00
elif match == ErrorMatch . FULL_REGEX :
2018-03-19 15:35:04 -04:00
if re . fullmatch ( expected_msg , stderr ) is None :
2018-04-18 15:17:22 -04:00
self . _raise_assertion_error (
' Expected message " {} " does not fully match stderr: \n " {} " ' . format ( expected_msg , stderr ) )
2018-03-28 09:37:09 -04:00
elif match == ErrorMatch . FULL_TEXT :
if expected_msg != stderr :
2018-04-18 15:17:22 -04:00
self . _raise_assertion_error (
' Expected message " {} " does not fully match stderr: \n " {} " ' . format ( expected_msg , stderr ) )
2020-12-10 12:35:28 +01:00
except subprocess . TimeoutExpired :
self . process . kill ( )
self . running = False
self . process = None
assert_msg = f ' bitcoind should have exited within { self . rpc_timeout } s '
2018-02-07 09:36:13 -05:00
if expected_msg is None :
2020-12-10 12:35:28 +01:00
assert_msg + = " with an error "
2018-02-07 09:36:13 -05:00
else :
2020-12-10 12:35:28 +01:00
assert_msg + = " with expected error " + expected_msg
2018-04-18 15:17:22 -04:00
self . _raise_assertion_error ( assert_msg )
2018-02-07 09:36:13 -05:00
2024-02-13 10:37:23 +05:30
def add_p2p_connection ( self , p2p_conn , * , wait_for_verack = True , send_version = True , supports_v2_p2p = None , wait_for_v2_handshake = True , expect_success = True , * * kwargs ) :
2020-06-10 13:29:07 -07:00
""" Add an inbound p2p connection to the node.
2017-03-27 09:42:17 -04:00
This method adds the p2p connection to the self . p2ps list and also
2022-02-05 23:49:47 +05:30
returns the connection to the caller .
When self . use_v2transport is True , TestNode advertises NODE_P2P_V2 service flag
An inbound connection is made from TestNode < - - - - - - P2PConnection
- if TestNode doesn ' t advertise NODE_P2P_V2 service, P2PConnection sends version message and v1 P2P is followed
- if TestNode advertises NODE_P2P_V2 service , ( and if P2PConnections supports v2 P2P )
P2PConnection sends ellswift bytes and v2 P2P is followed
"""
2017-03-27 09:42:17 -04:00
if ' dstport ' not in kwargs :
kwargs [ ' dstport ' ] = p2p_port ( self . index )
if ' dstaddr ' not in kwargs :
kwargs [ ' dstaddr ' ] = ' 127.0.0.1 '
2023-11-20 15:10:29 -05:00
if supports_v2_p2p is None :
supports_v2_p2p = self . use_v2transport
2022-02-05 23:49:47 +05:30
if self . use_v2transport :
kwargs [ ' services ' ] = kwargs . get ( ' services ' , P2P_SERVICES ) | NODE_P2P_V2
supports_v2_p2p = self . use_v2transport and supports_v2_p2p
2022-02-05 22:11:02 +05:30
p2p_conn . peer_connect ( * * kwargs , send_version = send_version , net = self . chain , timeout_factor = self . timeout_factor , supports_v2_p2p = supports_v2_p2p ) ( )
2017-03-27 09:42:17 -04:00
self . p2ps . append ( p2p_conn )
2024-02-13 10:37:23 +05:30
if not expect_success :
return p2p_conn
2020-08-04 12:55:35 +02:00
p2p_conn . wait_until ( lambda : p2p_conn . is_connected , check_connected = False )
2023-11-17 17:00:20 -05:00
if supports_v2_p2p and wait_for_v2_handshake :
p2p_conn . wait_until ( lambda : p2p_conn . v2_state . tried_v2_handshake )
2023-11-03 13:30:54 +01:00
if send_version :
p2p_conn . wait_until ( lambda : not p2p_conn . on_connection_send_msg )
2018-08-08 17:22:45 -04:00
if wait_for_verack :
2020-03-02 14:14:30 -05:00
# Wait for the node to send us the version and verack
2018-08-08 17:22:45 -04:00
p2p_conn . wait_for_verack ( )
2020-03-02 14:14:30 -05:00
# At this point we have sent our version message and received the version and verack, however the full node
# has not yet received the verack from us (in reply to their version). So, the connection is not yet fully
# established (fSuccessfullyConnected).
#
# This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the
# message we send. However, it might lead to races where we are expecting to receive a message. E.g. a
# transaction that will be added to the mempool as soon as we return here.
#
# So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds)
2020-05-18 09:45:55 +05:30
# in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely.
2020-03-02 14:14:30 -05:00
p2p_conn . sync_with_ping ( )
2017-03-27 09:42:17 -04:00
2023-06-28 10:15:41 +02:00
# Consistency check that the node received our user agent string.
2023-07-24 20:31:00 -04:00
# Find our connection in getpeerinfo by our address:port and theirs, as this combination is unique.
2023-06-28 10:15:41 +02:00
sockname = p2p_conn . _transport . get_extra_info ( " socket " ) . getsockname ( )
our_addr_and_port = f " { sockname [ 0 ] } : { sockname [ 1 ] } "
2023-07-24 20:31:00 -04:00
dst_addr_and_port = f " { p2p_conn . dstaddr } : { p2p_conn . dstport } "
info = [ peer for peer in self . getpeerinfo ( ) if peer [ " addr " ] == our_addr_and_port and peer [ " addrbind " ] == dst_addr_and_port ]
2023-06-28 10:15:41 +02:00
assert_equal ( len ( info ) , 1 )
assert_equal ( info [ 0 ] [ " subver " ] , P2P_SUBVERSION )
2020-11-28 12:41:15 +00:00
2017-03-27 09:42:17 -04:00
return p2p_conn
2024-01-26 03:28:33 +01:00
def add_outbound_p2p_connection ( self , p2p_conn , * , wait_for_verack = True , wait_for_disconnect = False , p2p_idx , connection_type = " outbound-full-relay " , supports_v2_p2p = None , advertise_v2_p2p = None , * * kwargs ) :
2021-05-31 22:49:42 +02:00
""" Add an outbound p2p connection from node. Must be an
2021-08-23 10:42:39 +01:00
" outbound-full-relay " , " block-relay-only " , " addr-fetch " or " feeler " connection .
2020-06-10 13:29:07 -07:00
This method adds the p2p connection to the self . p2ps list and returns
the connection to the caller .
2022-11-03 11:46:07 -04:00
p2p_idx must be different for simultaneously connected peers . When reusing it for the next peer
after disconnecting the previous one , it is necessary to wait for the disconnect to finish to avoid
a race condition .
2022-02-05 23:49:47 +05:30
Parameters :
supports_v2_p2p : whether p2p_conn supports v2 P2P or not
advertise_v2_p2p : whether p2p_conn is advertised to support v2 P2P or not
An outbound connection is made from TestNode - - - - - - - > P2PConnection
- if P2PConnection doesn ' t advertise_v2_p2p, TestNode sends version message and v1 P2P is followed
- if P2PConnection both supports_v2_p2p and advertise_v2_p2p , TestNode sends ellswift bytes and v2 P2P is followed
- if P2PConnection doesn ' t supports_v2_p2p but advertise_v2_p2p,
TestNode sends ellswift bytes and P2PConnection disconnects ,
TestNode reconnects by sending version message and v1 P2P is followed
2020-06-10 13:29:07 -07:00
"""
def addconnection_callback ( address , port ) :
self . log . debug ( " Connecting to %s : %d %s " % ( address , port , connection_type ) )
2022-02-05 23:49:47 +05:30
self . addconnection ( ' %s : %d ' % ( address , port ) , connection_type , advertise_v2_p2p )
2020-06-10 13:29:07 -07:00
2023-11-20 15:10:29 -05:00
if supports_v2_p2p is None :
supports_v2_p2p = self . use_v2transport
if advertise_v2_p2p is None :
advertise_v2_p2p = self . use_v2transport
2022-02-05 23:49:47 +05:30
if advertise_v2_p2p :
kwargs [ ' services ' ] = kwargs . get ( ' services ' , P2P_SERVICES ) | NODE_P2P_V2
assert self . use_v2transport # only a v2 TestNode could make a v2 outbound connection
# if P2PConnection is advertised to support v2 P2P when it doesn't actually support v2 P2P,
# reconnection needs to be attempted using v1 P2P by sending version message
reconnect = advertise_v2_p2p and not supports_v2_p2p
# P2PConnection needs to be advertised to support v2 P2P so that ellswift bytes are sent instead of msg_version
supports_v2_p2p = supports_v2_p2p and advertise_v2_p2p
p2p_conn . peer_accept_connection ( connect_cb = addconnection_callback , connect_id = p2p_idx + 1 , net = self . chain , timeout_factor = self . timeout_factor , supports_v2_p2p = supports_v2_p2p , reconnect = reconnect , * * kwargs ) ( )
2020-06-10 13:29:07 -07:00
2023-09-08 19:14:22 +05:30
if reconnect :
p2p_conn . wait_for_reconnect ( )
2020-06-10 13:29:07 -07:00
2024-01-26 03:28:33 +01:00
if connection_type == " feeler " or wait_for_disconnect :
2021-08-23 10:42:39 +01:00
# feeler connections are closed as soon as the node receives a `version` message
p2p_conn . wait_until ( lambda : p2p_conn . message_count [ " version " ] == 1 , check_connected = False )
p2p_conn . wait_until ( lambda : not p2p_conn . is_connected , check_connected = False )
else :
p2p_conn . wait_for_connect ( )
self . p2ps . append ( p2p_conn )
2020-06-10 13:29:07 -07:00
2023-11-17 17:00:20 -05:00
if supports_v2_p2p :
p2p_conn . wait_until ( lambda : p2p_conn . v2_state . tried_v2_handshake )
2023-11-08 11:39:30 +01:00
p2p_conn . wait_until ( lambda : not p2p_conn . on_connection_send_msg )
2022-09-19 17:11:32 +03:00
if wait_for_verack :
p2p_conn . wait_for_verack ( )
p2p_conn . sync_with_ping ( )
2020-06-10 13:29:07 -07:00
return p2p_conn
2020-08-17 10:10:44 +01:00
def num_test_p2p_connections ( self ) :
2020-06-12 14:54:23 -07:00
""" Return number of test framework p2p connections to the node. """
2020-11-28 11:41:25 +00:00
return len ( [ peer for peer in self . getpeerinfo ( ) if peer [ ' subver ' ] == P2P_SUBVERSION ] )
2020-06-12 14:54:23 -07:00
2017-11-08 16:28:17 -05:00
def disconnect_p2ps ( self ) :
2022-11-21 18:10:20 -05:00
""" Close all p2p connections to the node.
Use only after each p2p has sent a version message to ensure the wait works . """
2017-11-08 16:28:17 -05:00
for p in self . p2ps :
2017-11-17 15:01:24 -05:00
p . peer_disconnect ( )
del self . p2ps [ : ]
2020-06-10 13:29:07 -07:00
2023-12-13 11:24:03 +01:00
self . wait_until ( lambda : self . num_test_p2p_connections ( ) == 0 )
2017-11-08 16:28:17 -05:00
2023-08-11 16:51:39 +02:00
def bumpmocktime ( self , seconds ) :
""" Fast forward using setmocktime to self.mocktime + seconds. Requires setmocktime to have
been called at some point in the past . """
assert self . mocktime
self . mocktime + = seconds
self . setmocktime ( self . mocktime )
2024-09-21 21:23:14 +02:00
def wait_until ( self , test_function , timeout = 60 , check_interval = 0.05 ) :
return wait_until_helper_internal ( test_function , timeout = timeout , timeout_factor = self . timeout_factor , check_interval = check_interval )
2023-12-13 11:24:03 +01:00
2020-03-02 14:14:30 -05:00
2017-12-21 04:54:43 -05:00
class TestNodeCLIAttr :
def __init__ ( self , cli , command ) :
self . cli = cli
self . command = command
def __call__ ( self , * args , * * kwargs ) :
return self . cli . send_cli ( self . command , * args , * * kwargs )
def get_request ( self , * args , * * kwargs ) :
return lambda : self ( * args , * * kwargs )
2017-03-27 09:42:17 -04:00
2020-03-02 14:14:30 -05:00
2019-01-23 15:51:35 -05:00
def arg_to_cli ( arg ) :
if isinstance ( arg , bool ) :
return str ( arg ) . lower ( )
2020-05-05 11:27:48 -04:00
elif arg is None :
return ' null '
2019-01-23 15:51:35 -05:00
elif isinstance ( arg , dict ) or isinstance ( arg , list ) :
2023-06-29 13:42:59 +02:00
return json . dumps ( arg , default = serialization_fallback )
2019-01-23 15:51:35 -05:00
else :
return str ( arg )
2020-03-02 14:14:30 -05:00
2017-07-11 13:01:44 -04:00
class TestNodeCLI ( ) :
""" Interface to bitcoin-cli for an individual node """
def __init__ ( self , binary , datadir ) :
2018-01-23 13:58:53 -05:00
self . options = [ ]
2017-07-11 13:01:44 -04:00
self . binary = binary
self . datadir = datadir
2017-09-06 17:07:21 +01:00
self . input = None
2017-07-11 13:14:18 -04:00
self . log = logging . getLogger ( ' TestFramework.bitcoincli ' )
2017-09-06 17:07:21 +01:00
2018-01-23 13:58:53 -05:00
def __call__ ( self , * options , input = None ) :
# TestNodeCLI is callable with bitcoin-cli command-line options
2017-12-20 18:41:12 -05:00
cli = TestNodeCLI ( self . binary , self . datadir )
2018-01-23 13:58:53 -05:00
cli . options = [ str ( o ) for o in options ]
2017-12-20 18:41:12 -05:00
cli . input = input
return cli
2017-07-11 13:01:44 -04:00
def __getattr__ ( self , command ) :
2017-12-21 04:54:43 -05:00
return TestNodeCLIAttr ( self , command )
def batch ( self , requests ) :
results = [ ]
for request in requests :
2018-06-18 17:28:37 -04:00
try :
results . append ( dict ( result = request ( ) ) )
except JSONRPCException as e :
results . append ( dict ( error = e ) )
2017-12-21 04:54:43 -05:00
return results
2017-07-11 13:01:44 -04:00
2023-11-06 16:03:47 -05:00
def send_cli ( self , clicommand = None , * args , * * kwargs ) :
2017-07-11 13:01:44 -04:00
""" Run bitcoin-cli command. Deserializes returned string as python object. """
2019-01-23 15:51:35 -05:00
pos_args = [ arg_to_cli ( arg ) for arg in args ]
named_args = [ str ( key ) + " = " + arg_to_cli ( value ) for ( key , value ) in kwargs . items ( ) ]
2023-09-02 01:09:43 -04:00
p_args = [ self . binary , f " -datadir= { self . datadir } " ] + self . options
2017-07-11 13:01:44 -04:00
if named_args :
p_args + = [ " -named " ]
2023-11-06 16:03:47 -05:00
if clicommand is not None :
p_args + = [ clicommand ]
2018-01-23 14:00:34 -05:00
p_args + = pos_args + named_args
2020-04-20 23:24:54 +02:00
self . log . debug ( " Running bitcoin-cli {} " . format ( p_args [ 2 : ] ) )
2023-01-17 21:46:35 +01:00
process = subprocess . Popen ( p_args , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE , text = True )
2017-09-06 16:36:13 +01:00
cli_stdout , cli_stderr = process . communicate ( input = self . input )
returncode = process . poll ( )
if returncode :
2017-12-20 18:38:40 -05:00
match = re . match ( r ' error code: ([-0-9]+) \ nerror message: \ n(.*) ' , cli_stderr )
if match :
code , message = match . groups ( )
raise JSONRPCException ( dict ( code = int ( code ) , message = message ) )
2017-09-06 16:36:13 +01:00
# Ignore cli_stdout, raise with cli_stderr
raise subprocess . CalledProcessError ( returncode , self . binary , output = cli_stderr )
2017-12-20 18:38:40 -05:00
try :
return json . loads ( cli_stdout , parse_float = decimal . Decimal )
test: Catch decimal.InvalidOperation from TestNodeCLI#send_cli
decimal.InvalidOperation is a special case of a float parsing error, which
presumably should be handled in the same way as a general parsing error,
rather than blow up.
Alternatives include: logging the error, or re-raising with more information.
Example log output:
File "/home/travis/build/bitcoin/bitcoin/ci/scratch/build/bitcoin-i686-pc-linux-gnu/test/functional/test_framework/test_framework.py", line 603, in sync_all
self.sync_blocks(nodes)
File "/home/travis/build/bitcoin/bitcoin/ci/scratch/build/bitcoin-i686-pc-linux-gnu/test/functional/test_framework/test_framework.py", line 568, in sync_blocks
best_hash = [x.getbestblockhash() for x in rpc_connections]
File "/home/travis/build/bitcoin/bitcoin/ci/scratch/build/bitcoin-i686-pc-linux-gnu/test/functional/test_framework/test_framework.py", line 568, in <listcomp>
best_hash = [x.getbestblockhash() for x in rpc_connections]
File "/home/travis/build/bitcoin/bitcoin/ci/scratch/build/bitcoin-i686-pc-linux-gnu/test/functional/test_framework/test_node.py", line 571, in __call__
return self.cli.send_cli(self.command, *args, **kwargs)
File "/home/travis/build/bitcoin/bitcoin/ci/scratch/build/bitcoin-i686-pc-linux-gnu/test/functional/test_framework/test_node.py", line 639, in send_cli
return json.loads(cli_stdout, parse_float=decimal.Decimal)
File "/usr/lib64/python3.6/json/__init__.py", line 367, in loads
return cls(**kw).decode(s)
File "/usr/lib64/python3.6/json/decoder.py", line 339, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib64/python3.6/json/decoder.py", line 355, in raw_decode
obj, end = self.scan_once(s, idx)
decimal.InvalidOperation: [<class 'decimal.InvalidOperation'>]
2020-07-30 18:43:45 -07:00
except ( json . JSONDecodeError , decimal . InvalidOperation ) :
2017-12-20 18:38:40 -05:00
return cli_stdout . rstrip ( " \n " )
2020-04-02 18:09:15 -04:00
class RPCOverloadWrapper ( ) :
2019-07-16 15:33:35 -04:00
def __init__ ( self , rpc , cli = False , descriptors = False ) :
2020-04-02 18:09:15 -04:00
self . rpc = rpc
self . is_cli = cli
2019-07-16 15:33:35 -04:00
self . descriptors = descriptors
2020-04-02 18:09:15 -04:00
def __getattr__ ( self , name ) :
return getattr ( self . rpc , name )
2021-10-18 16:24:24 -04:00
def createwallet_passthrough ( self , * args , * * kwargs ) :
return self . __getattr__ ( " createwallet " ) ( * args , * * kwargs )
2019-08-04 17:56:17 +02:00
def createwallet ( self , wallet_name , disable_private_keys = None , blank = None , passphrase = ' ' , avoid_reuse = None , descriptors = None , load_on_startup = None , external_signer = None ) :
2019-07-16 15:33:35 -04:00
if descriptors is None :
descriptors = self . descriptors
2019-08-04 17:56:17 +02:00
return self . __getattr__ ( ' createwallet ' ) ( wallet_name , disable_private_keys , blank , passphrase , avoid_reuse , descriptors , load_on_startup , external_signer )
2019-07-16 15:33:35 -04:00
2020-04-02 18:09:15 -04:00
def importprivkey ( self , privkey , label = None , rescan = None ) :
wallet_info = self . getwalletinfo ( )
if ' descriptors ' not in wallet_info or ( ' descriptors ' in wallet_info and not wallet_info [ ' descriptors ' ] ) :
return self . __getattr__ ( ' importprivkey ' ) ( privkey , label , rescan )
desc = descsum_create ( ' combo( ' + privkey + ' ) ' )
req = [ {
' desc ' : desc ,
' timestamp ' : 0 if rescan else ' now ' ,
' label ' : label if label else ' '
} ]
import_res = self . importdescriptors ( req )
if not import_res [ 0 ] [ ' success ' ] :
raise JSONRPCException ( import_res [ 0 ] [ ' error ' ] )
def addmultisigaddress ( self , nrequired , keys , label = None , address_type = None ) :
wallet_info = self . getwalletinfo ( )
if ' descriptors ' not in wallet_info or ( ' descriptors ' in wallet_info and not wallet_info [ ' descriptors ' ] ) :
return self . __getattr__ ( ' addmultisigaddress ' ) ( nrequired , keys , label , address_type )
cms = self . createmultisig ( nrequired , keys , address_type )
req = [ {
' desc ' : cms [ ' descriptor ' ] ,
' timestamp ' : 0 ,
' label ' : label if label else ' '
} ]
import_res = self . importdescriptors ( req )
if not import_res [ 0 ] [ ' success ' ] :
raise JSONRPCException ( import_res [ 0 ] [ ' error ' ] )
return cms
def importpubkey ( self , pubkey , label = None , rescan = None ) :
wallet_info = self . getwalletinfo ( )
if ' descriptors ' not in wallet_info or ( ' descriptors ' in wallet_info and not wallet_info [ ' descriptors ' ] ) :
return self . __getattr__ ( ' importpubkey ' ) ( pubkey , label , rescan )
desc = descsum_create ( ' combo( ' + pubkey + ' ) ' )
req = [ {
' desc ' : desc ,
' timestamp ' : 0 if rescan else ' now ' ,
' label ' : label if label else ' '
} ]
import_res = self . importdescriptors ( req )
if not import_res [ 0 ] [ ' success ' ] :
raise JSONRPCException ( import_res [ 0 ] [ ' error ' ] )
def importaddress ( self , address , label = None , rescan = None , p2sh = None ) :
wallet_info = self . getwalletinfo ( )
if ' descriptors ' not in wallet_info or ( ' descriptors ' in wallet_info and not wallet_info [ ' descriptors ' ] ) :
return self . __getattr__ ( ' importaddress ' ) ( address , label , rescan , p2sh )
is_hex = False
try :
int ( address , 16 )
is_hex = True
desc = descsum_create ( ' raw( ' + address + ' ) ' )
2022-08-18 20:23:15 +02:00
except Exception :
2020-04-02 18:09:15 -04:00
desc = descsum_create ( ' addr( ' + address + ' ) ' )
reqs = [ {
' desc ' : desc ,
' timestamp ' : 0 if rescan else ' now ' ,
' label ' : label if label else ' '
} ]
if is_hex and p2sh :
reqs . append ( {
' desc ' : descsum_create ( ' p2sh(raw( ' + address + ' )) ' ) ,
' timestamp ' : 0 if rescan else ' now ' ,
' label ' : label if label else ' '
} )
import_res = self . importdescriptors ( reqs )
for res in import_res :
if not res [ ' success ' ] :
raise JSONRPCException ( res [ ' error ' ] )