mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-02-02 14:37:42 -03:00
Merge bitcoin/bitcoin#28487: [25.1] Final backports
45a5fcb165
http: bugfix: track closed connection (stickies-v)752a456fa8
http: log connection instead of request count (stickies-v)ae86adabe4
http: refactor: use encapsulated HTTPRequestTracker (stickies-v)f31899d19a
gui: macOS, make appMenuBar part of the main app window (furszy)64ffa94231
gui: macOS, do not process dock icon actions during shutdown (furszy)e270f3f857
depends: fix unusable memory_resource in macos qt build (fanquake)a6683945ca
build, macos: Fix `qt` package build with new Xcode 15 linker (Hennadii Stepanov)b3517cb1b5
test: Test loading wallets with conflicts without a chain (Andrew Chow)d63478cb50
wallet: Check last block and conflict height are valid in MarkConflicted (Andrew Chow)5e51a9cc72
ci: Nuke Android APK task, Use credits for tsan (MarcoFalke)910c36253e
test: ensure old fee_estimate.dat not read on restart and flushed (ismaelsadeeq)37764d3300
tx fees, policy: read stale fee estimates with a regtest-only option (ismaelsadeeq)16bb9161fa
tx fees, policy: do not read estimates of old fee_estimates.dat (ismaelsadeeq)c4dd5989b3
tx fees, policy: periodically flush fee estimates to fee_estimates.dat (ismaelsadeeq)c36770cefd
test: wallet, verify migration doesn't crash for an invalid script (furszy)0d2a33e05c
wallet: disallow migration of invalid or not-watched scripts (furszy)2c51a07c08
Do not use std::vector = {} to release memory (Pieter Wuille) Pull request description: Further backports for the `25.x` branch. Currently: * https://github.com/bitcoin/bitcoin/pull/27622 * https://github.com/bitcoin/bitcoin/pull/27834 * https://github.com/bitcoin/bitcoin/pull/28125 * https://github.com/bitcoin/bitcoin/pull/28452 * https://github.com/bitcoin/bitcoin/pull/28542 * https://github.com/bitcoin/bitcoin/pull/28543 * https://github.com/bitcoin/bitcoin/pull/28551 * https://github.com/bitcoin/bitcoin/pull/28571 * https://github.com/bitcoin-core/gui/pull/751 ACKs for top commit: hebasto: re-ACK45a5fcb165
, only #28551 has been backported with since my recent [review](https://github.com/bitcoin/bitcoin/pull/28487#pullrequestreview-1655584132). dergoegge: reACK45a5fcb165
willcl-ark: reACK45a5fcb165
Tree-SHA512: 0f5807aa364b7c2a2039fef11d5cd5e168372c3bf5b0e941350fcd92e7db4a1662801b97bb4f68e29788c77d24bbf97385a483c4501ca72d93fa25327d5694fa
This commit is contained in:
commit
9f8d501cb8
21 changed files with 563 additions and 54 deletions
19
.cirrus.yml
19
.cirrus.yml
|
@ -244,6 +244,7 @@ task:
|
|||
docker_arguments:
|
||||
CI_IMAGE_NAME_TAG: ubuntu:lunar
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh"
|
||||
<< : *CREDITS_TEMPLATE
|
||||
env:
|
||||
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
|
||||
|
||||
|
@ -343,21 +344,3 @@ task:
|
|||
CI_USE_APT_INSTALL: "no"
|
||||
PACKAGE_MANAGER_INSTALL: "echo" # Nothing to do
|
||||
FILE_ENV: "./ci/test/00_setup_env_mac_native_arm64.sh"
|
||||
|
||||
task:
|
||||
name: 'ARM64 Android APK [jammy]'
|
||||
<< : *CONTAINER_DEPENDS_TEMPLATE
|
||||
container:
|
||||
docker_arguments:
|
||||
CI_IMAGE_NAME_TAG: ubuntu:jammy
|
||||
FILE_ENV: "./ci/test/00_setup_env_android.sh"
|
||||
<< : *CREDITS_TEMPLATE
|
||||
android_sdk_cache:
|
||||
folder: "depends/SDKs/android"
|
||||
fingerprint_key: "ANDROID_API_LEVEL=28 ANDROID_BUILD_TOOLS_VERSION=28.0.3 ANDROID_NDK_VERSION=23.2.8568313"
|
||||
depends_sources_cache:
|
||||
folder: "depends/sources"
|
||||
fingerprint_script: git rev-parse HEAD:depends/packages
|
||||
<< : *MAIN_TEMPLATE
|
||||
env:
|
||||
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
|
||||
|
|
|
@ -22,6 +22,8 @@ $(package)_patches += rcc_hardcode_timestamp.patch
|
|||
$(package)_patches += duplicate_lcqpafonts.patch
|
||||
$(package)_patches += fast_fixed_dtoa_no_optimize.patch
|
||||
$(package)_patches += guix_cross_lib_path.patch
|
||||
$(package)_patches += fix-macos-linker.patch
|
||||
$(package)_patches += memory_resource.patch
|
||||
|
||||
$(package)_qttranslations_file_name=qttranslations-$($(package)_suffix)
|
||||
$(package)_qttranslations_sha256_hash=c92af4171397a0ed272330b4fa0669790fcac8d050b07c8b8cc565ebeba6735e
|
||||
|
@ -238,6 +240,7 @@ endef
|
|||
define $(package)_preprocess_cmds
|
||||
cp $($(package)_patch_dir)/qt.pro qt.pro && \
|
||||
cp $($(package)_patch_dir)/qttools_src.pro qttools/src/src.pro && \
|
||||
patch -p1 -i $($(package)_patch_dir)/fix-macos-linker.patch && \
|
||||
patch -p1 -i $($(package)_patch_dir)/dont_hardcode_pwd.patch && \
|
||||
patch -p1 -i $($(package)_patch_dir)/fix_qt_pkgconfig.patch && \
|
||||
patch -p1 -i $($(package)_patch_dir)/fix_android_jni_static.patch && \
|
||||
|
@ -246,6 +249,7 @@ define $(package)_preprocess_cmds
|
|||
patch -p1 -i $($(package)_patch_dir)/qtbase-moc-ignore-gcc-macro.patch && \
|
||||
patch -p1 -i $($(package)_patch_dir)/fix_montery_include.patch && \
|
||||
patch -p1 -i $($(package)_patch_dir)/use_android_ndk23.patch && \
|
||||
patch -p1 -i $($(package)_patch_dir)/memory_resource.patch && \
|
||||
patch -p1 -i $($(package)_patch_dir)/rcc_hardcode_timestamp.patch && \
|
||||
patch -p1 -i $($(package)_patch_dir)/duplicate_lcqpafonts.patch && \
|
||||
patch -p1 -i $($(package)_patch_dir)/fast_fixed_dtoa_no_optimize.patch && \
|
||||
|
|
55
depends/patches/qt/fix-macos-linker.patch
Normal file
55
depends/patches/qt/fix-macos-linker.patch
Normal file
|
@ -0,0 +1,55 @@
|
|||
qmake: Don't error out if QMAKE_DEFAULT_LIBDIRS is empty on macOS
|
||||
|
||||
The new linker in Xcode 15 doesn't provide any default linker or
|
||||
framework paths when requested via -v, but still seems to use the
|
||||
default paths documented in the ld man page.
|
||||
|
||||
We trust that linker will do the right thing, even if we don't
|
||||
know of its default linker paths.
|
||||
|
||||
We also need to opt out of the default fallback logic to
|
||||
set the libdirs to /lib and /usr/lib.
|
||||
|
||||
This may result in UnixMakefileGenerator::findLibraries finding
|
||||
different libraries than expected, if additional paths are
|
||||
passed with -L, which will then take precedence for qmake,
|
||||
even if the linker itself will use the library from the
|
||||
SDK's default paths. This should hopefully not be an issue
|
||||
in practice, as we don't turn -lFoo into absolute paths in
|
||||
qmake, so the only risk is that we're picking up the wrong
|
||||
prl files and adding additional dependencies that the lib
|
||||
in the SDK doesn't have.
|
||||
|
||||
Upstream commits:
|
||||
- Qt 5.15.16: Not yet publicly available.
|
||||
- Qt dev: cdf64b0e47115cc473e1afd1472b4b09e130b2a5
|
||||
|
||||
For other Qt branches see
|
||||
https://codereview.qt-project.org/q/I2347b26e2df0828471373b0e15b8c9089274c65d
|
||||
|
||||
--- old/qtbase/mkspecs/features/toolchain.prf
|
||||
+++ new/qtbase/mkspecs/features/toolchain.prf
|
||||
@@ -283,9 +283,12 @@ isEmpty($${target_prefix}.INCDIRS) {
|
||||
}
|
||||
}
|
||||
}
|
||||
- isEmpty(QMAKE_DEFAULT_LIBDIRS)|isEmpty(QMAKE_DEFAULT_INCDIRS): \
|
||||
+ isEmpty(QMAKE_DEFAULT_INCDIRS): \
|
||||
!integrity: \
|
||||
- error("failed to parse default search paths from compiler output")
|
||||
+ error("failed to parse default include paths from compiler output")
|
||||
+ isEmpty(QMAKE_DEFAULT_LIBDIRS): \
|
||||
+ !integrity:!darwin: \
|
||||
+ error("failed to parse default library paths from compiler output")
|
||||
QMAKE_DEFAULT_LIBDIRS = $$unique(QMAKE_DEFAULT_LIBDIRS)
|
||||
} else: ghs {
|
||||
cmd = $$QMAKE_CXX $$QMAKE_CXXFLAGS -$${LITERAL_HASH} -o /tmp/fake_output /tmp/fake_input.cpp
|
||||
@@ -407,7 +410,7 @@ isEmpty($${target_prefix}.INCDIRS) {
|
||||
QMAKE_DEFAULT_INCDIRS = $$split(INCLUDE, $$QMAKE_DIRLIST_SEP)
|
||||
}
|
||||
|
||||
- unix:if(!cross_compile|host_build) {
|
||||
+ unix:!darwin:if(!cross_compile|host_build) {
|
||||
isEmpty(QMAKE_DEFAULT_INCDIRS): QMAKE_DEFAULT_INCDIRS = /usr/include /usr/local/include
|
||||
isEmpty(QMAKE_DEFAULT_LIBDIRS): QMAKE_DEFAULT_LIBDIRS = /lib /usr/lib
|
||||
}
|
49
depends/patches/qt/memory_resource.patch
Normal file
49
depends/patches/qt/memory_resource.patch
Normal file
|
@ -0,0 +1,49 @@
|
|||
Fix unusable memory_resource on macos
|
||||
|
||||
See https://bugreports.qt.io/browse/QTBUG-117484
|
||||
and https://bugreports.qt.io/browse/QTBUG-114316
|
||||
|
||||
--- a/qtbase/src/corelib/tools/qduplicatetracker_p.h
|
||||
+++ b/qtbase/src/corelib/tools/qduplicatetracker_p.h
|
||||
@@ -52,7 +52,7 @@
|
||||
|
||||
#include <qglobal.h>
|
||||
|
||||
-#if QT_HAS_INCLUDE(<memory_resource>) && __cplusplus > 201402L
|
||||
+#ifdef __cpp_lib_memory_resource
|
||||
# include <unordered_set>
|
||||
# include <memory_resource>
|
||||
#else
|
||||
|
||||
--- a/qtbase/src/corelib/global/qcompilerdetection.h
|
||||
+++ b/qtbase/src/corelib/global/qcompilerdetection.h
|
||||
@@ -1041,16 +1041,22 @@
|
||||
# endif // !_HAS_CONSTEXPR
|
||||
# endif // !__GLIBCXX__ && !_LIBCPP_VERSION
|
||||
# endif // Q_OS_QNX
|
||||
-# if (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) && defined(__GNUC_LIBSTD__) \
|
||||
- && ((__GNUC_LIBSTD__-0) * 100 + __GNUC_LIBSTD_MINOR__-0 <= 402)
|
||||
+# if (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC)
|
||||
+# if defined(__GNUC_LIBSTD__) && ((__GNUC_LIBSTD__-0) * 100 + __GNUC_LIBSTD_MINOR__-0 <= 402)
|
||||
// Apple has not updated libstdc++ since 2007, which means it does not have
|
||||
// <initializer_list> or std::move. Let's disable these features
|
||||
-# undef Q_COMPILER_INITIALIZER_LISTS
|
||||
-# undef Q_COMPILER_RVALUE_REFS
|
||||
-# undef Q_COMPILER_REF_QUALIFIERS
|
||||
+# undef Q_COMPILER_INITIALIZER_LISTS
|
||||
+# undef Q_COMPILER_RVALUE_REFS
|
||||
+# undef Q_COMPILER_REF_QUALIFIERS
|
||||
// Also disable <atomic>, since it's clearly not there
|
||||
-# undef Q_COMPILER_ATOMICS
|
||||
-# endif
|
||||
+# undef Q_COMPILER_ATOMICS
|
||||
+# endif
|
||||
+# if defined(__cpp_lib_memory_resource) \
|
||||
+ && ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < 140000) \
|
||||
+ || (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < 170000))
|
||||
+# undef __cpp_lib_memory_resource // Only supported on macOS 14 and iOS 17
|
||||
+# endif
|
||||
+# endif // (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC)
|
||||
# if defined(Q_CC_CLANG) && defined(Q_CC_INTEL) && Q_CC_INTEL >= 1500
|
||||
// ICC 15.x and 16.0 have their own implementation of std::atomic, which is activated when in Clang mode
|
||||
// (probably because libc++'s <atomic> on OS X failed to compile), but they're missing some
|
|
@ -7,6 +7,7 @@
|
|||
#include <pow.h>
|
||||
#include <timedata.h>
|
||||
#include <util/check.h>
|
||||
#include <util/vector.h>
|
||||
|
||||
// The two constants below are computed using the simulation script on
|
||||
// https://gist.github.com/sipa/016ae445c132cdf65a2791534dfb7ae1
|
||||
|
@ -51,9 +52,9 @@ HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus
|
|||
void HeadersSyncState::Finalize()
|
||||
{
|
||||
Assume(m_download_state != State::FINAL);
|
||||
m_header_commitments = {};
|
||||
ClearShrink(m_header_commitments);
|
||||
m_last_header_received.SetNull();
|
||||
m_redownloaded_headers = {};
|
||||
ClearShrink(m_redownloaded_headers);
|
||||
m_redownload_buffer_last_hash.SetNull();
|
||||
m_redownload_buffer_first_prev_hash.SetNull();
|
||||
m_process_all_remaining_headers = false;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <rpc/protocol.h> // For HTTP status codes
|
||||
#include <shutdown.h>
|
||||
#include <sync.h>
|
||||
#include <util/check.h>
|
||||
#include <util/strencodings.h>
|
||||
#include <util/syscall_sandbox.h>
|
||||
#include <util/system.h>
|
||||
|
@ -28,7 +29,7 @@
|
|||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
#include <unordered_map>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
|
@ -149,10 +150,61 @@ static GlobalMutex g_httppathhandlers_mutex;
|
|||
static std::vector<HTTPPathHandler> pathHandlers GUARDED_BY(g_httppathhandlers_mutex);
|
||||
//! Bound listening sockets
|
||||
static std::vector<evhttp_bound_socket *> boundSockets;
|
||||
|
||||
/**
|
||||
* @brief Helps keep track of open `evhttp_connection`s with active `evhttp_requests`
|
||||
*
|
||||
*/
|
||||
class HTTPRequestTracker
|
||||
{
|
||||
private:
|
||||
mutable Mutex m_mutex;
|
||||
mutable std::condition_variable m_cv;
|
||||
//! For each connection, keep a counter of how many requests are open
|
||||
std::unordered_map<const evhttp_connection*, size_t> m_tracker GUARDED_BY(m_mutex);
|
||||
|
||||
void RemoveConnectionInternal(const decltype(m_tracker)::iterator it) EXCLUSIVE_LOCKS_REQUIRED(m_mutex)
|
||||
{
|
||||
m_tracker.erase(it);
|
||||
if (m_tracker.empty()) m_cv.notify_all();
|
||||
}
|
||||
public:
|
||||
//! Increase request counter for the associated connection by 1
|
||||
void AddRequest(evhttp_request* req) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
|
||||
{
|
||||
const evhttp_connection* conn{Assert(evhttp_request_get_connection(Assert(req)))};
|
||||
WITH_LOCK(m_mutex, ++m_tracker[conn]);
|
||||
}
|
||||
//! Decrease request counter for the associated connection by 1, remove connection if counter is 0
|
||||
void RemoveRequest(evhttp_request* req) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
|
||||
{
|
||||
const evhttp_connection* conn{Assert(evhttp_request_get_connection(Assert(req)))};
|
||||
LOCK(m_mutex);
|
||||
auto it{m_tracker.find(conn)};
|
||||
if (it != m_tracker.end() && it->second > 0) {
|
||||
if (--(it->second) == 0) RemoveConnectionInternal(it);
|
||||
}
|
||||
}
|
||||
//! Remove a connection entirely
|
||||
void RemoveConnection(const evhttp_connection* conn) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
|
||||
{
|
||||
LOCK(m_mutex);
|
||||
auto it{m_tracker.find(Assert(conn))};
|
||||
if (it != m_tracker.end()) RemoveConnectionInternal(it);
|
||||
}
|
||||
size_t CountActiveConnections() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
|
||||
{
|
||||
return WITH_LOCK(m_mutex, return m_tracker.size());
|
||||
}
|
||||
//! Wait until there are no more connections with active requests in the tracker
|
||||
void WaitUntilEmpty() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
|
||||
{
|
||||
WAIT_LOCK(m_mutex, lock);
|
||||
m_cv.wait(lock, [this]() EXCLUSIVE_LOCKS_REQUIRED(m_mutex) { return m_tracker.empty(); });
|
||||
}
|
||||
};
|
||||
//! Track active requests
|
||||
static GlobalMutex g_requests_mutex;
|
||||
static std::condition_variable g_requests_cv;
|
||||
static std::unordered_set<evhttp_request*> g_requests GUARDED_BY(g_requests_mutex);
|
||||
static HTTPRequestTracker g_requests;
|
||||
|
||||
/** Check if a network address is allowed to access the HTTP server */
|
||||
static bool ClientAllowed(const CNetAddr& netaddr)
|
||||
|
@ -214,20 +266,20 @@ std::string RequestMethodString(HTTPRequest::RequestMethod m)
|
|||
/** HTTP request callback */
|
||||
static void http_request_cb(struct evhttp_request* req, void* arg)
|
||||
{
|
||||
// Track requests and notify when a request is completed.
|
||||
evhttp_connection* conn{evhttp_request_get_connection(req)};
|
||||
// Track active requests
|
||||
{
|
||||
WITH_LOCK(g_requests_mutex, g_requests.insert(req));
|
||||
g_requests_cv.notify_all();
|
||||
g_requests.AddRequest(req);
|
||||
evhttp_request_set_on_complete_cb(req, [](struct evhttp_request* req, void*) {
|
||||
auto n{WITH_LOCK(g_requests_mutex, return g_requests.erase(req))};
|
||||
assert(n == 1);
|
||||
g_requests_cv.notify_all();
|
||||
g_requests.RemoveRequest(req);
|
||||
}, nullptr);
|
||||
evhttp_connection_set_closecb(conn, [](evhttp_connection* conn, void* arg) {
|
||||
g_requests.RemoveConnection(conn);
|
||||
}, nullptr);
|
||||
}
|
||||
|
||||
// Disable reading to work around a libevent bug, fixed in 2.2.0.
|
||||
if (event_get_version_number() >= 0x02010600 && event_get_version_number() < 0x02020001) {
|
||||
evhttp_connection* conn = evhttp_request_get_connection(req);
|
||||
if (conn) {
|
||||
bufferevent* bev = evhttp_connection_get_bufferevent(conn);
|
||||
if (bev) {
|
||||
|
@ -477,13 +529,10 @@ void StopHTTPServer()
|
|||
}
|
||||
boundSockets.clear();
|
||||
{
|
||||
WAIT_LOCK(g_requests_mutex, lock);
|
||||
if (!g_requests.empty()) {
|
||||
LogPrint(BCLog::HTTP, "Waiting for %d requests to stop HTTP server\n", g_requests.size());
|
||||
if (const auto n_connections{g_requests.CountActiveConnections()}; n_connections != 0) {
|
||||
LogPrint(BCLog::HTTP, "Waiting for %d connections to stop HTTP server\n", n_connections);
|
||||
}
|
||||
g_requests_cv.wait(lock, []() EXCLUSIVE_LOCKS_REQUIRED(g_requests_mutex) {
|
||||
return g_requests.empty();
|
||||
});
|
||||
g_requests.WaitUntilEmpty();
|
||||
}
|
||||
if (eventHTTP) {
|
||||
// Schedule a callback to call evhttp_free in the event base thread, so
|
||||
|
|
14
src/init.cpp
14
src/init.cpp
|
@ -79,6 +79,7 @@
|
|||
#include <util/system.h>
|
||||
#include <util/thread.h>
|
||||
#include <util/threadnames.h>
|
||||
#include <util/time.h>
|
||||
#include <util/translation.h>
|
||||
#include <validation.h>
|
||||
#include <validationinterface.h>
|
||||
|
@ -579,6 +580,7 @@ void SetupServerArgs(ArgsManager& argsman)
|
|||
argsman.AddArg("-acceptnonstdtxn", strprintf("Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", !testnetChainParams->RequireStandard()), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
|
||||
argsman.AddArg("-incrementalrelayfee=<amt>", strprintf("Fee rate (in %s/kvB) used to define cost of relay, used for mempool limiting and replacement policy. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
|
||||
argsman.AddArg("-dustrelayfee=<amt>", strprintf("Fee rate (in %s/kvB) used to define dust, the value of an output such that it will cost more than its value in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::NODE_RELAY);
|
||||
argsman.AddArg("-acceptstalefeeestimates", strprintf("Read fee estimates even if they are stale (%sdefault: %u) fee estimates are considered stale if they are %s hours old", "regtest only; ", DEFAULT_ACCEPT_STALE_FEE_ESTIMATES, Ticks<std::chrono::hours>(MAX_FILE_AGE)), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
|
||||
argsman.AddArg("-bytespersigop", strprintf("Equivalent bytes per sigop in transactions for relay and mining (default: %u)", DEFAULT_BYTES_PER_SIGOP), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
|
||||
argsman.AddArg("-datacarrier", strprintf("Relay and mine data carrier transactions (default: %u)", DEFAULT_ACCEPT_DATACARRIER), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
|
||||
argsman.AddArg("-datacarriersize", strprintf("Maximum size of data in data carrier transactions we relay and mine (default: %u)", MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
|
||||
|
@ -1251,7 +1253,17 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
|
|||
assert(!node.fee_estimator);
|
||||
// Don't initialize fee estimation with old data if we don't relay transactions,
|
||||
// as they would never get updated.
|
||||
if (!ignores_incoming_txs) node.fee_estimator = std::make_unique<CBlockPolicyEstimator>(FeeestPath(args));
|
||||
if (!ignores_incoming_txs) {
|
||||
bool read_stale_estimates = args.GetBoolArg("-acceptstalefeeestimates", DEFAULT_ACCEPT_STALE_FEE_ESTIMATES);
|
||||
if (read_stale_estimates && (chainparams.NetworkIDString() != CBaseChainParams::REGTEST)) {
|
||||
return InitError(strprintf(_("acceptstalefeeestimates is not supported on %s chain."), chainparams.NetworkIDString()));
|
||||
}
|
||||
node.fee_estimator = std::make_unique<CBlockPolicyEstimator>(FeeestPath(args), read_stale_estimates);
|
||||
|
||||
// Flush estimates to disk periodically
|
||||
CBlockPolicyEstimator* fee_estimator = node.fee_estimator.get();
|
||||
node.scheduler->scheduleEvery([fee_estimator] { fee_estimator->FlushFeeEstimates(); }, FEE_FLUSH_INTERVAL);
|
||||
}
|
||||
|
||||
// Check port numbers
|
||||
for (const std::string port_option : {
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <chrono>
|
||||
#include <cmath>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
|
@ -527,7 +528,7 @@ bool CBlockPolicyEstimator::_removeTx(const uint256& hash, bool inBlock)
|
|||
}
|
||||
}
|
||||
|
||||
CBlockPolicyEstimator::CBlockPolicyEstimator(const fs::path& estimation_filepath)
|
||||
CBlockPolicyEstimator::CBlockPolicyEstimator(const fs::path& estimation_filepath, const bool read_stale_estimates)
|
||||
: m_estimation_filepath{estimation_filepath}
|
||||
{
|
||||
static_assert(MIN_BUCKET_FEERATE > 0, "Min feerate must be nonzero");
|
||||
|
@ -545,9 +546,22 @@ CBlockPolicyEstimator::CBlockPolicyEstimator(const fs::path& estimation_filepath
|
|||
shortStats = std::unique_ptr<TxConfirmStats>(new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_PERIODS, SHORT_DECAY, SHORT_SCALE));
|
||||
longStats = std::unique_ptr<TxConfirmStats>(new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_PERIODS, LONG_DECAY, LONG_SCALE));
|
||||
|
||||
// If the fee estimation file is present, read recorded estimations
|
||||
AutoFile est_file{fsbridge::fopen(m_estimation_filepath, "rb")};
|
||||
if (est_file.IsNull() || !Read(est_file)) {
|
||||
|
||||
// Whenever the fee estimation file is not present return early
|
||||
if (est_file.IsNull()) {
|
||||
LogPrintf("%s is not found. Continue anyway.\n", fs::PathToString(m_estimation_filepath));
|
||||
return;
|
||||
}
|
||||
|
||||
std::chrono::hours file_age = GetFeeEstimatorFileAge();
|
||||
// fee estimate file must not be too old to avoid wrong fee estimates.
|
||||
if (file_age > MAX_FILE_AGE && !read_stale_estimates) {
|
||||
LogPrintf("Fee estimation file %s too old (age=%lld > %lld hours) and will not be used to avoid serving stale estimates.\n", fs::PathToString(m_estimation_filepath), Ticks<std::chrono::hours>(file_age), Ticks<std::chrono::hours>(MAX_FILE_AGE));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!Read(est_file)) {
|
||||
LogPrintf("Failed to read fee estimates from %s. Continue anyway.\n", fs::PathToString(m_estimation_filepath));
|
||||
}
|
||||
}
|
||||
|
@ -903,10 +917,16 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, FeeCalculation
|
|||
|
||||
void CBlockPolicyEstimator::Flush() {
|
||||
FlushUnconfirmed();
|
||||
FlushFeeEstimates();
|
||||
}
|
||||
|
||||
void CBlockPolicyEstimator::FlushFeeEstimates()
|
||||
{
|
||||
AutoFile est_file{fsbridge::fopen(m_estimation_filepath, "wb")};
|
||||
if (est_file.IsNull() || !Write(est_file)) {
|
||||
LogPrintf("Failed to write fee estimates to %s. Continue anyway.\n", fs::PathToString(m_estimation_filepath));
|
||||
} else {
|
||||
LogPrintf("Flushed fee estimates to %s.\n", fs::PathToString(m_estimation_filepath.filename()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1011,6 +1031,13 @@ void CBlockPolicyEstimator::FlushUnconfirmed()
|
|||
LogPrint(BCLog::ESTIMATEFEE, "Recorded %u unconfirmed txs from mempool in %gs\n", num_entries, Ticks<SecondsDouble>(endclear - startclear));
|
||||
}
|
||||
|
||||
std::chrono::hours CBlockPolicyEstimator::GetFeeEstimatorFileAge()
|
||||
{
|
||||
auto file_time = std::filesystem::last_write_time(m_estimation_filepath);
|
||||
auto now = std::filesystem::file_time_type::clock::now();
|
||||
return std::chrono::duration_cast<std::chrono::hours>(now - file_time);
|
||||
}
|
||||
|
||||
static std::set<double> MakeFeeSet(const CFeeRate& min_incremental_fee,
|
||||
double max_filter_fee_rate,
|
||||
double fee_filter_spacing)
|
||||
|
|
|
@ -14,12 +14,25 @@
|
|||
#include <util/fs.h>
|
||||
|
||||
#include <array>
|
||||
#include <chrono>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
|
||||
// How often to flush fee estimates to fee_estimates.dat.
|
||||
static constexpr std::chrono::hours FEE_FLUSH_INTERVAL{1};
|
||||
|
||||
/** fee_estimates.dat that are more than 60 hours (2.5 days) will not be read,
|
||||
* as the estimates in the file are stale.
|
||||
*/
|
||||
static constexpr std::chrono::hours MAX_FILE_AGE{60};
|
||||
|
||||
// Whether we allow importing a fee_estimates file older than MAX_FILE_AGE.
|
||||
static constexpr bool DEFAULT_ACCEPT_STALE_FEE_ESTIMATES{false};
|
||||
|
||||
class AutoFile;
|
||||
class CTxMemPoolEntry;
|
||||
class TxConfirmStats;
|
||||
|
@ -183,7 +196,7 @@ private:
|
|||
const fs::path m_estimation_filepath;
|
||||
public:
|
||||
/** Create new BlockPolicyEstimator and initialize stats tracking classes with default values */
|
||||
CBlockPolicyEstimator(const fs::path& estimation_filepath);
|
||||
CBlockPolicyEstimator(const fs::path& estimation_filepath, const bool read_stale_estimates);
|
||||
~CBlockPolicyEstimator();
|
||||
|
||||
/** Process all the transactions that have been included in a block */
|
||||
|
@ -239,6 +252,13 @@ public:
|
|||
void Flush()
|
||||
EXCLUSIVE_LOCKS_REQUIRED(!m_cs_fee_estimator);
|
||||
|
||||
/** Record current fee estimations. */
|
||||
void FlushFeeEstimates()
|
||||
EXCLUSIVE_LOCKS_REQUIRED(!m_cs_fee_estimator);
|
||||
|
||||
/** Calculates the age of the file, since last modified */
|
||||
std::chrono::hours GetFeeEstimatorFileAge();
|
||||
|
||||
private:
|
||||
mutable Mutex m_cs_fee_estimator;
|
||||
|
||||
|
|
|
@ -238,7 +238,6 @@ BitcoinGUI::~BitcoinGUI()
|
|||
trayIcon->hide();
|
||||
#ifdef Q_OS_MACOS
|
||||
delete m_app_nap_inhibitor;
|
||||
delete appMenuBar;
|
||||
MacDockIconHandler::cleanup();
|
||||
#endif
|
||||
|
||||
|
@ -466,13 +465,7 @@ void BitcoinGUI::createActions()
|
|||
|
||||
void BitcoinGUI::createMenuBar()
|
||||
{
|
||||
#ifdef Q_OS_MACOS
|
||||
// Create a decoupled menu bar on Mac which stays even if the window is closed
|
||||
appMenuBar = new QMenuBar();
|
||||
#else
|
||||
// Get the main window's menu bar on other platforms
|
||||
appMenuBar = menuBar();
|
||||
#endif
|
||||
|
||||
// Configure the menus
|
||||
QMenu *file = appMenuBar->addMenu(tr("&File"));
|
||||
|
@ -860,6 +853,7 @@ void BitcoinGUI::createTrayIconMenu()
|
|||
// Note: On macOS, the Dock icon is used to provide the tray's functionality.
|
||||
MacDockIconHandler* dockIconHandler = MacDockIconHandler::instance();
|
||||
connect(dockIconHandler, &MacDockIconHandler::dockIconClicked, [this] {
|
||||
if (m_node.shutdownRequested()) return; // nothing to show, node is shutting down.
|
||||
show();
|
||||
activateWindow();
|
||||
});
|
||||
|
@ -871,6 +865,8 @@ void BitcoinGUI::createTrayIconMenu()
|
|||
// See https://bugreports.qt.io/browse/QTBUG-91697
|
||||
trayIconMenu.get(), &QMenu::aboutToShow,
|
||||
[this, show_hide_action, send_action, receive_action, sign_action, verify_action, options_action, node_window_action, quit_action] {
|
||||
if (m_node.shutdownRequested()) return; // nothing to do, node is shutting down.
|
||||
|
||||
if (show_hide_action) show_hide_action->setText(
|
||||
(!isHidden() && !isMinimized() && !GUIUtil::isObscured(this)) ?
|
||||
tr("&Hide") :
|
||||
|
|
|
@ -31,7 +31,7 @@ void initialize_policy_estimator()
|
|||
FUZZ_TARGET_INIT(policy_estimator, initialize_policy_estimator)
|
||||
{
|
||||
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
|
||||
CBlockPolicyEstimator block_policy_estimator{FeeestPath(*g_setup->m_node.args)};
|
||||
CBlockPolicyEstimator block_policy_estimator{FeeestPath(*g_setup->m_node.args), DEFAULT_ACCEPT_STALE_FEE_ESTIMATES};
|
||||
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000) {
|
||||
CallOneOf(
|
||||
fuzzed_data_provider,
|
||||
|
|
|
@ -28,7 +28,7 @@ FUZZ_TARGET_INIT(policy_estimator_io, initialize_policy_estimator_io)
|
|||
FuzzedAutoFileProvider fuzzed_auto_file_provider = ConsumeAutoFile(fuzzed_data_provider);
|
||||
AutoFile fuzzed_auto_file{fuzzed_auto_file_provider.open()};
|
||||
// Re-using block_policy_estimator across runs to avoid costly creation of CBlockPolicyEstimator object.
|
||||
static CBlockPolicyEstimator block_policy_estimator{FeeestPath(*g_setup->m_node.args)};
|
||||
static CBlockPolicyEstimator block_policy_estimator{FeeestPath(*g_setup->m_node.args), DEFAULT_ACCEPT_STALE_FEE_ESTIMATES};
|
||||
if (block_policy_estimator.Read(fuzzed_auto_file)) {
|
||||
block_policy_estimator.Write(fuzzed_auto_file);
|
||||
}
|
||||
|
|
|
@ -173,7 +173,7 @@ ChainTestingSetup::ChainTestingSetup(const std::string& chainName, const std::ve
|
|||
m_node.scheduler->m_service_thread = std::thread(util::TraceThread, "scheduler", [&] { m_node.scheduler->serviceQueue(); });
|
||||
GetMainSignals().RegisterBackgroundSignalScheduler(*m_node.scheduler);
|
||||
|
||||
m_node.fee_estimator = std::make_unique<CBlockPolicyEstimator>(FeeestPath(*m_node.args));
|
||||
m_node.fee_estimator = std::make_unique<CBlockPolicyEstimator>(FeeestPath(*m_node.args), DEFAULT_ACCEPT_STALE_FEE_ESTIMATES);
|
||||
m_node.mempool = std::make_unique<CTxMemPool>(MemPoolOptionsForTest(m_node));
|
||||
|
||||
m_cache_sizes = CalculateCacheSizes(m_args);
|
||||
|
|
|
@ -1791,4 +1791,29 @@ BOOST_AUTO_TEST_CASE(util_WriteBinaryFile)
|
|||
BOOST_CHECK(valid);
|
||||
BOOST_CHECK_EQUAL(actual_text, expected_text);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(clearshrink_test)
|
||||
{
|
||||
{
|
||||
std::vector<uint8_t> v = {1, 2, 3};
|
||||
ClearShrink(v);
|
||||
BOOST_CHECK_EQUAL(v.size(), 0);
|
||||
BOOST_CHECK_EQUAL(v.capacity(), 0);
|
||||
}
|
||||
|
||||
{
|
||||
std::vector<bool> v = {false, true, false, false, true, true};
|
||||
ClearShrink(v);
|
||||
BOOST_CHECK_EQUAL(v.size(), 0);
|
||||
BOOST_CHECK_EQUAL(v.capacity(), 0);
|
||||
}
|
||||
|
||||
{
|
||||
std::deque<int> v = {1, 3, 3, 7};
|
||||
ClearShrink(v);
|
||||
BOOST_CHECK_EQUAL(v.size(), 0);
|
||||
// std::deque has no capacity() we can observe.
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE_END()
|
||||
|
|
|
@ -49,4 +49,22 @@ inline V Cat(V v1, const V& v2)
|
|||
return v1;
|
||||
}
|
||||
|
||||
/** Clear a vector (or std::deque) and release its allocated memory. */
|
||||
template<typename V>
|
||||
inline void ClearShrink(V& v) noexcept
|
||||
{
|
||||
// There are various ways to clear a vector and release its memory:
|
||||
//
|
||||
// 1. V{}.swap(v)
|
||||
// 2. v = V{}
|
||||
// 3. v = {}; v.shrink_to_fit();
|
||||
// 4. v.clear(); v.shrink_to_fit();
|
||||
//
|
||||
// (2) does not appear to release memory in glibc debug mode, even if v.shrink_to_fit()
|
||||
// follows. (3) and (4) rely on std::vector::shrink_to_fit, which is only a non-binding
|
||||
// request. Therefore, we use method (1).
|
||||
|
||||
V{}.swap(v);
|
||||
}
|
||||
|
||||
#endif // BITCOIN_UTIL_VECTOR_H
|
||||
|
|
|
@ -1712,11 +1712,26 @@ std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetScriptPub
|
|||
}
|
||||
|
||||
// All watchonly scripts are raw
|
||||
spks.insert(setWatchOnly.begin(), setWatchOnly.end());
|
||||
for (const CScript& script : setWatchOnly) {
|
||||
// As the legacy wallet allowed to import any script, we need to verify the validity here.
|
||||
// LegacyScriptPubKeyMan::IsMine() return 'ISMINE_NO' for invalid or not watched scripts (IsMineResult::INVALID or IsMineResult::NO).
|
||||
// e.g. a "sh(sh(pkh()))" which legacy wallets allowed to import!.
|
||||
if (IsMine(script) != ISMINE_NO) spks.insert(script);
|
||||
}
|
||||
|
||||
return spks;
|
||||
}
|
||||
|
||||
std::unordered_set<CScript, SaltedSipHasher> LegacyScriptPubKeyMan::GetNotMineScriptPubKeys() const
|
||||
{
|
||||
LOCK(cs_KeyStore);
|
||||
std::unordered_set<CScript, SaltedSipHasher> spks;
|
||||
for (const CScript& script : setWatchOnly) {
|
||||
if (IsMine(script) == ISMINE_NO) spks.insert(script);
|
||||
}
|
||||
return spks;
|
||||
}
|
||||
|
||||
std::optional<MigrationData> LegacyScriptPubKeyMan::MigrateToDescriptor()
|
||||
{
|
||||
LOCK(cs_KeyStore);
|
||||
|
|
|
@ -517,6 +517,12 @@ public:
|
|||
std::set<CKeyID> GetKeys() const override;
|
||||
std::unordered_set<CScript, SaltedSipHasher> GetScriptPubKeys() const override;
|
||||
|
||||
/**
|
||||
* Retrieves scripts that were imported by bugs into the legacy spkm and are
|
||||
* simply invalid, such as a sh(sh(pkh())) script, or not watched.
|
||||
*/
|
||||
std::unordered_set<CScript, SaltedSipHasher> GetNotMineScriptPubKeys() const;
|
||||
|
||||
/** Get the DescriptorScriptPubKeyMans (with private keys) that have the same scriptPubKeys as this LegacyScriptPubKeyMan.
|
||||
* Does not modify this ScriptPubKeyMan. */
|
||||
std::optional<MigrationData> MigrateToDescriptor();
|
||||
|
|
|
@ -1323,11 +1323,14 @@ void CWallet::MarkConflicted(const uint256& hashBlock, int conflicting_height, c
|
|||
{
|
||||
LOCK(cs_wallet);
|
||||
|
||||
int conflictconfirms = (m_last_block_processed_height - conflicting_height + 1) * -1;
|
||||
// If number of conflict confirms cannot be determined, this means
|
||||
// that the block is still unknown or not yet part of the main chain,
|
||||
// for example when loading the wallet during a reindex. Do nothing in that
|
||||
// case.
|
||||
if (m_last_block_processed_height < 0 || conflicting_height < 0) {
|
||||
return;
|
||||
}
|
||||
int conflictconfirms = (m_last_block_processed_height - conflicting_height + 1) * -1;
|
||||
if (conflictconfirms >= 0)
|
||||
return;
|
||||
|
||||
|
@ -3899,6 +3902,13 @@ bool CWallet::ApplyMigrationData(MigrationData& data, bilingual_str& error)
|
|||
return false;
|
||||
}
|
||||
|
||||
// Get all invalid or non-watched scripts that will not be migrated
|
||||
std::set<CTxDestination> not_migrated_dests;
|
||||
for (const auto& script : legacy_spkm->GetNotMineScriptPubKeys()) {
|
||||
CTxDestination dest;
|
||||
if (ExtractDestination(script, dest)) not_migrated_dests.emplace(dest);
|
||||
}
|
||||
|
||||
for (auto& desc_spkm : data.desc_spkms) {
|
||||
if (m_spk_managers.count(desc_spkm->GetID()) > 0) {
|
||||
error = _("Error: Duplicate descriptors created during migration. Your wallet may be corrupted.");
|
||||
|
@ -4004,6 +4014,13 @@ bool CWallet::ApplyMigrationData(MigrationData& data, bilingual_str& error)
|
|||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Skip invalid/non-watched scripts that will not be migrated
|
||||
if (not_migrated_dests.count(addr_pair.first) > 0) {
|
||||
dests_to_delete.push_back(addr_pair.first);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Not ours, not in watchonly wallet, and not in solvable
|
||||
error = _("Error: Address book data in wallet cannot be identified to belong to migrated wallets");
|
||||
return false;
|
||||
|
|
|
@ -7,6 +7,7 @@ from copy import deepcopy
|
|||
from decimal import Decimal
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
|
||||
from test_framework.messages import (
|
||||
COIN,
|
||||
|
@ -21,6 +22,8 @@ from test_framework.util import (
|
|||
)
|
||||
from test_framework.wallet import MiniWallet
|
||||
|
||||
MAX_FILE_AGE = 60
|
||||
SECONDS_PER_HOUR = 60 * 60
|
||||
|
||||
def small_txpuzzle_randfee(
|
||||
wallet, from_node, conflist, unconflist, amount, min_fee, fee_increment, batch_reqs
|
||||
|
@ -290,6 +293,95 @@ class EstimateFeeTest(BitcoinTestFramework):
|
|||
est_feerate = node.estimatesmartfee(2)["feerate"]
|
||||
assert_equal(est_feerate, high_feerate_kvb)
|
||||
|
||||
def test_old_fee_estimate_file(self):
|
||||
# Get the initial fee rate while node is running
|
||||
fee_rate = self.nodes[0].estimatesmartfee(1)["feerate"]
|
||||
|
||||
# Restart node to ensure fee_estimate.dat file is read
|
||||
self.restart_node(0)
|
||||
assert_equal(self.nodes[0].estimatesmartfee(1)["feerate"], fee_rate)
|
||||
|
||||
fee_dat = self.nodes[0].chain_path / "fee_estimates.dat"
|
||||
|
||||
# Stop the node and backdate the fee_estimates.dat file more than MAX_FILE_AGE
|
||||
self.stop_node(0)
|
||||
last_modified_time = time.time() - (MAX_FILE_AGE + 1) * SECONDS_PER_HOUR
|
||||
os.utime(fee_dat, (last_modified_time, last_modified_time))
|
||||
|
||||
# Start node and ensure the fee_estimates.dat file was not read
|
||||
self.start_node(0)
|
||||
assert_equal(self.nodes[0].estimatesmartfee(1)["errors"], ["Insufficient data or no feerate found"])
|
||||
|
||||
|
||||
def test_estimate_dat_is_flushed_periodically(self):
|
||||
fee_dat = self.nodes[0].chain_path / "fee_estimates.dat"
|
||||
os.remove(fee_dat) if os.path.exists(fee_dat) else None
|
||||
|
||||
# Verify that fee_estimates.dat does not exist
|
||||
assert_equal(os.path.isfile(fee_dat), False)
|
||||
|
||||
# Verify if the string "Flushed fee estimates to fee_estimates.dat." is present in the debug log file.
|
||||
# If present, it indicates that fee estimates have been successfully flushed to disk.
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=["Flushed fee estimates to fee_estimates.dat."], timeout=1):
|
||||
# Mock the scheduler for an hour to flush fee estimates to fee_estimates.dat
|
||||
self.nodes[0].mockscheduler(SECONDS_PER_HOUR)
|
||||
|
||||
# Verify that fee estimates were flushed and fee_estimates.dat file is created
|
||||
assert_equal(os.path.isfile(fee_dat), True)
|
||||
|
||||
# Verify that the estimates remain the same if there are no blocks in the flush interval
|
||||
block_hash_before = self.nodes[0].getbestblockhash()
|
||||
fee_dat_initial_content = open(fee_dat, "rb").read()
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=["Flushed fee estimates to fee_estimates.dat."], timeout=1):
|
||||
# Mock the scheduler for an hour to flush fee estimates to fee_estimates.dat
|
||||
self.nodes[0].mockscheduler(SECONDS_PER_HOUR)
|
||||
|
||||
# Verify that there were no blocks in between the flush interval
|
||||
assert_equal(block_hash_before, self.nodes[0].getbestblockhash())
|
||||
|
||||
fee_dat_current_content = open(fee_dat, "rb").read()
|
||||
assert_equal(fee_dat_current_content, fee_dat_initial_content)
|
||||
|
||||
# Verify that the estimates remain the same after shutdown with no blocks before shutdown
|
||||
self.restart_node(0)
|
||||
fee_dat_current_content = open(fee_dat, "rb").read()
|
||||
assert_equal(fee_dat_current_content, fee_dat_initial_content)
|
||||
|
||||
# Verify that the estimates are not the same if new blocks were produced in the flush interval
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=["Flushed fee estimates to fee_estimates.dat."], timeout=1):
|
||||
# Mock the scheduler for an hour to flush fee estimates to fee_estimates.dat
|
||||
self.generate(self.nodes[0], 5, sync_fun=self.no_op)
|
||||
self.nodes[0].mockscheduler(SECONDS_PER_HOUR)
|
||||
|
||||
fee_dat_current_content = open(fee_dat, "rb").read()
|
||||
assert fee_dat_current_content != fee_dat_initial_content
|
||||
|
||||
fee_dat_initial_content = fee_dat_current_content
|
||||
|
||||
# Generate blocks before shutdown and verify that the fee estimates are not the same
|
||||
self.generate(self.nodes[0], 5, sync_fun=self.no_op)
|
||||
self.restart_node(0)
|
||||
fee_dat_current_content = open(fee_dat, "rb").read()
|
||||
assert fee_dat_current_content != fee_dat_initial_content
|
||||
|
||||
|
||||
def test_acceptstalefeeestimates_option(self):
|
||||
# Get the initial fee rate while node is running
|
||||
fee_rate = self.nodes[0].estimatesmartfee(1)["feerate"]
|
||||
|
||||
self.stop_node(0)
|
||||
|
||||
fee_dat = self.nodes[0].chain_path / "fee_estimates.dat"
|
||||
|
||||
# Stop the node and backdate the fee_estimates.dat file more than MAX_FILE_AGE
|
||||
last_modified_time = time.time() - (MAX_FILE_AGE + 1) * SECONDS_PER_HOUR
|
||||
os.utime(fee_dat, (last_modified_time, last_modified_time))
|
||||
|
||||
# Restart node with -acceptstalefeeestimates option to ensure fee_estimate.dat file is read
|
||||
self.start_node(0,extra_args=["-acceptstalefeeestimates"])
|
||||
assert_equal(self.nodes[0].estimatesmartfee(1)["feerate"], fee_rate)
|
||||
|
||||
|
||||
def run_test(self):
|
||||
self.log.info("This test is time consuming, please be patient")
|
||||
self.log.info("Splitting inputs so we can generate tx's")
|
||||
|
@ -312,12 +404,21 @@ class EstimateFeeTest(BitcoinTestFramework):
|
|||
self.log.info("Testing estimates with single transactions.")
|
||||
self.sanity_check_estimates_range()
|
||||
|
||||
self.log.info("Test fee_estimates.dat is flushed periodically")
|
||||
self.test_estimate_dat_is_flushed_periodically()
|
||||
|
||||
# check that the effective feerate is greater than or equal to the mempoolminfee even for high mempoolminfee
|
||||
self.log.info(
|
||||
"Test fee rate estimation after restarting node with high MempoolMinFee"
|
||||
)
|
||||
self.test_feerate_mempoolminfee()
|
||||
|
||||
self.log.info("Test acceptstalefeeestimates option")
|
||||
self.test_acceptstalefeeestimates_option()
|
||||
|
||||
self.log.info("Test reading old fee_estimates.dat")
|
||||
self.test_old_fee_estimate_file()
|
||||
|
||||
self.log.info("Restarting node with fresh estimation")
|
||||
self.stop_node(0)
|
||||
fee_dat = os.path.join(self.nodes[0].datadir, self.chain, "fee_estimates.dat")
|
||||
|
|
|
@ -400,6 +400,62 @@ class ToolWalletTest(BitcoinTestFramework):
|
|||
self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=badload', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump')
|
||||
assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest/wallets", "badload"))
|
||||
|
||||
def test_chainless_conflicts(self):
|
||||
self.log.info("Test wallet tool when wallet contains conflicting transactions")
|
||||
self.restart_node(0)
|
||||
self.generate(self.nodes[0], 101)
|
||||
|
||||
def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
|
||||
|
||||
self.nodes[0].createwallet("conflicts")
|
||||
wallet = self.nodes[0].get_wallet_rpc("conflicts")
|
||||
def_wallet.sendtoaddress(wallet.getnewaddress(), 10)
|
||||
self.generate(self.nodes[0], 1)
|
||||
|
||||
# parent tx
|
||||
parent_txid = wallet.sendtoaddress(wallet.getnewaddress(), 9)
|
||||
parent_txid_bytes = bytes.fromhex(parent_txid)[::-1]
|
||||
conflict_utxo = wallet.gettransaction(txid=parent_txid, verbose=True)["decoded"]["vin"][0]
|
||||
|
||||
# The specific assertion in MarkConflicted being tested requires that the parent tx is already loaded
|
||||
# by the time the child tx is loaded. Since transactions end up being loaded in txid order due to how both
|
||||
# and sqlite store things, we can just grind the child tx until it has a txid that is greater than the parent's.
|
||||
locktime = 500000000 # Use locktime as nonce, starting at unix timestamp minimum
|
||||
addr = wallet.getnewaddress()
|
||||
while True:
|
||||
child_send_res = wallet.send(outputs=[{addr: 8}], options={"add_to_wallet": False, "locktime": locktime})
|
||||
child_txid = child_send_res["txid"]
|
||||
child_txid_bytes = bytes.fromhex(child_txid)[::-1]
|
||||
if (child_txid_bytes > parent_txid_bytes):
|
||||
wallet.sendrawtransaction(child_send_res["hex"])
|
||||
break
|
||||
locktime += 1
|
||||
|
||||
# conflict with parent
|
||||
conflict_unsigned = self.nodes[0].createrawtransaction(inputs=[conflict_utxo], outputs=[{wallet.getnewaddress(): 9.9999}])
|
||||
conflict_signed = wallet.signrawtransactionwithwallet(conflict_unsigned)["hex"]
|
||||
conflict_txid = self.nodes[0].sendrawtransaction(conflict_signed)
|
||||
self.generate(self.nodes[0], 1)
|
||||
assert_equal(wallet.gettransaction(txid=parent_txid)["confirmations"], -1)
|
||||
assert_equal(wallet.gettransaction(txid=child_txid)["confirmations"], -1)
|
||||
assert_equal(wallet.gettransaction(txid=conflict_txid)["confirmations"], 1)
|
||||
|
||||
self.stop_node(0)
|
||||
|
||||
# Wallet tool should successfully give info for this wallet
|
||||
expected_output = textwrap.dedent(f'''\
|
||||
Wallet info
|
||||
===========
|
||||
Name: conflicts
|
||||
Format: {"sqlite" if self.options.descriptors else "bdb"}
|
||||
Descriptors: {"yes" if self.options.descriptors else "no"}
|
||||
Encrypted: no
|
||||
HD (hd seed available): yes
|
||||
Keypool Size: {"8" if self.options.descriptors else "1"}
|
||||
Transactions: 4
|
||||
Address Book: 4
|
||||
''')
|
||||
self.assert_tool_output(expected_output, "-wallet=conflicts", "info")
|
||||
|
||||
def run_test(self):
|
||||
self.wallet_path = os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename)
|
||||
|
@ -413,6 +469,7 @@ class ToolWalletTest(BitcoinTestFramework):
|
|||
# Salvage is a legacy wallet only thing
|
||||
self.test_salvage()
|
||||
self.test_dump_createfromdump()
|
||||
self.test_chainless_conflicts()
|
||||
|
||||
if __name__ == '__main__':
|
||||
ToolWalletTest().main()
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
import os
|
||||
import random
|
||||
from test_framework.address import script_to_p2sh
|
||||
from test_framework.descriptors import descsum_create
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.messages import COIN, CTransaction, CTxOut
|
||||
|
@ -640,6 +641,21 @@ class WalletMigrationTest(BitcoinTestFramework):
|
|||
wallet.rpc.importaddress(address=script_wsh_pkh.hex(), label="raw_spk2", rescan=True, p2sh=False)
|
||||
assert_equal(wallet.getbalances()['watchonly']['trusted'], 5)
|
||||
|
||||
# Import sh(pkh()) script, by using importaddress(), with the p2sh flag enabled.
|
||||
# This will wrap the script under another sh level, which is invalid!, and store it inside the wallet.
|
||||
# The migration process must skip the invalid scripts and the addressbook records linked to them.
|
||||
# They are not being watched by the current wallet, nor should be watched by the migrated one.
|
||||
label_sh_pkh = "raw_sh_pkh"
|
||||
script_pkh = key_to_p2pkh_script(df_wallet.getaddressinfo(df_wallet.getnewaddress())["pubkey"])
|
||||
script_sh_pkh = script_to_p2sh_script(script_pkh)
|
||||
addy_script_sh_pkh = script_to_p2sh(script_pkh) # valid script address
|
||||
addy_script_double_sh_pkh = script_to_p2sh(script_sh_pkh) # invalid script address
|
||||
|
||||
# Note: 'importaddress()' will add two scripts, a valid one sh(pkh()) and an invalid one 'sh(sh(pkh()))'.
|
||||
# Both of them will be stored with the same addressbook label. And only the latter one should
|
||||
# be discarded during migration. The first one must be migrated.
|
||||
wallet.rpc.importaddress(address=script_sh_pkh.hex(), label=label_sh_pkh, rescan=False, p2sh=True)
|
||||
|
||||
# Migrate wallet and re-check balance
|
||||
info_migration = wallet.migratewallet()
|
||||
wallet_wo = self.nodes[0].get_wallet_rpc(info_migration["watchonly_name"])
|
||||
|
@ -649,11 +665,68 @@ class WalletMigrationTest(BitcoinTestFramework):
|
|||
# The watch-only scripts are no longer part of the main wallet
|
||||
assert_equal(wallet.getbalances()['mine']['trusted'], 0)
|
||||
|
||||
# The invalid sh(sh(pk())) script label must not be part of the main wallet anymore
|
||||
assert label_sh_pkh not in wallet.listlabels()
|
||||
# But, the standard sh(pkh()) script should be part of the watch-only wallet.
|
||||
addrs_by_label = wallet_wo.getaddressesbylabel(label_sh_pkh)
|
||||
assert addy_script_sh_pkh in addrs_by_label
|
||||
assert addy_script_double_sh_pkh not in addrs_by_label
|
||||
|
||||
# Also, the watch-only wallet should have the descriptor for the standard sh(pkh())
|
||||
desc = descsum_create(f"addr({addy_script_sh_pkh})")
|
||||
assert next(it['desc'] for it in wallet_wo.listdescriptors()['descriptors'] if it['desc'] == desc)
|
||||
# And doesn't have a descriptor for the invalid one
|
||||
desc_invalid = descsum_create(f"addr({addy_script_double_sh_pkh})")
|
||||
assert_equal(next((it['desc'] for it in wallet_wo.listdescriptors()['descriptors'] if it['desc'] == desc_invalid), None), None)
|
||||
|
||||
# Just in case, also verify wallet restart
|
||||
self.nodes[0].unloadwallet(info_migration["watchonly_name"])
|
||||
self.nodes[0].loadwallet(info_migration["watchonly_name"])
|
||||
assert_equal(wallet_wo.getbalances()['mine']['trusted'], 5)
|
||||
|
||||
def test_conflict_txs(self):
|
||||
self.log.info("Test migration when wallet contains conflicting transactions")
|
||||
def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
|
||||
|
||||
wallet = self.create_legacy_wallet("conflicts")
|
||||
def_wallet.sendtoaddress(wallet.getnewaddress(), 10)
|
||||
self.generate(self.nodes[0], 1)
|
||||
|
||||
# parent tx
|
||||
parent_txid = wallet.sendtoaddress(wallet.getnewaddress(), 9)
|
||||
parent_txid_bytes = bytes.fromhex(parent_txid)[::-1]
|
||||
conflict_utxo = wallet.gettransaction(txid=parent_txid, verbose=True)["decoded"]["vin"][0]
|
||||
|
||||
# The specific assertion in MarkConflicted being tested requires that the parent tx is already loaded
|
||||
# by the time the child tx is loaded. Since transactions end up being loaded in txid order due to how both
|
||||
# and sqlite store things, we can just grind the child tx until it has a txid that is greater than the parent's.
|
||||
locktime = 500000000 # Use locktime as nonce, starting at unix timestamp minimum
|
||||
addr = wallet.getnewaddress()
|
||||
while True:
|
||||
child_send_res = wallet.send(outputs=[{addr: 8}], options={"add_to_wallet": False, "locktime": locktime})
|
||||
child_txid = child_send_res["txid"]
|
||||
child_txid_bytes = bytes.fromhex(child_txid)[::-1]
|
||||
if (child_txid_bytes > parent_txid_bytes):
|
||||
wallet.sendrawtransaction(child_send_res["hex"])
|
||||
break
|
||||
locktime += 1
|
||||
|
||||
# conflict with parent
|
||||
conflict_unsigned = self.nodes[0].createrawtransaction(inputs=[conflict_utxo], outputs=[{wallet.getnewaddress(): 9.9999}])
|
||||
conflict_signed = wallet.signrawtransactionwithwallet(conflict_unsigned)["hex"]
|
||||
conflict_txid = self.nodes[0].sendrawtransaction(conflict_signed)
|
||||
self.generate(self.nodes[0], 1)
|
||||
assert_equal(wallet.gettransaction(txid=parent_txid)["confirmations"], -1)
|
||||
assert_equal(wallet.gettransaction(txid=child_txid)["confirmations"], -1)
|
||||
assert_equal(wallet.gettransaction(txid=conflict_txid)["confirmations"], 1)
|
||||
|
||||
wallet.migratewallet()
|
||||
assert_equal(wallet.gettransaction(txid=parent_txid)["confirmations"], -1)
|
||||
assert_equal(wallet.gettransaction(txid=child_txid)["confirmations"], -1)
|
||||
assert_equal(wallet.gettransaction(txid=conflict_txid)["confirmations"], 1)
|
||||
|
||||
wallet.unloadwallet()
|
||||
|
||||
def run_test(self):
|
||||
self.generate(self.nodes[0], 101)
|
||||
|
||||
|
@ -668,6 +741,7 @@ class WalletMigrationTest(BitcoinTestFramework):
|
|||
self.test_unloaded_by_path()
|
||||
self.test_addressbook()
|
||||
self.test_migrate_raw_p2sh()
|
||||
self.test_conflict_txs()
|
||||
|
||||
if __name__ == '__main__':
|
||||
WalletMigrationTest().main()
|
||||
|
|
Loading…
Add table
Reference in a new issue