diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 30a1a562dd..204db1ef0e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -246,6 +246,7 @@ add_library(bitcoin_node STATIC EXCLUDE_FROM_ALL node/psbt.cpp node/timeoffsets.cpp node/transaction.cpp + node/txdownloadman_impl.cpp node/txreconciliation.cpp node/utxo_snapshot.cpp node/warnings.cpp diff --git a/src/net_processing.cpp b/src/net_processing.cpp index be16884011..170352f729 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -88,22 +89,6 @@ static constexpr auto PING_INTERVAL{2min}; static const unsigned int MAX_LOCATOR_SZ = 101; /** The maximum number of entries in an 'inv' protocol message */ static const unsigned int MAX_INV_SZ = 50000; -/** Maximum number of in-flight transaction requests from a peer. It is not a hard limit, but the threshold at which - * point the OVERLOADED_PEER_TX_DELAY kicks in. */ -static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT = 100; -/** Maximum number of transactions to consider for requesting, per peer. It provides a reasonable DoS limit to - * per-peer memory usage spent on announcements, while covering peers continuously sending INVs at the maximum - * rate (by our own policy, see INVENTORY_BROADCAST_PER_SECOND) for several minutes, while not receiving - * the actual transaction (from any peer) in response to requests for them. */ -static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 5000; -/** How long to delay requesting transactions via txids, if we have wtxid-relaying peers */ -static constexpr auto TXID_RELAY_DELAY{2s}; -/** How long to delay requesting transactions from non-preferred peers */ -static constexpr auto NONPREF_PEER_TX_DELAY{2s}; -/** How long to delay requesting transactions from overloaded peers (see MAX_PEER_TX_REQUEST_IN_FLIGHT). */ -static constexpr auto OVERLOADED_PEER_TX_DELAY{2s}; -/** How long to wait before downloading a transaction from an additional peer */ -static constexpr auto GETDATA_TX_INTERVAL{60s}; /** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */ static const unsigned int MAX_GETDATA_SZ = 1000; /** Number of blocks that can be requested at any given time from a single peer. */ @@ -155,7 +140,7 @@ static constexpr unsigned int INVENTORY_BROADCAST_TARGET = INVENTORY_BROADCAST_P /** Maximum number of inventory items to send per transmission. */ static constexpr unsigned int INVENTORY_BROADCAST_MAX = 1000; static_assert(INVENTORY_BROADCAST_MAX >= INVENTORY_BROADCAST_TARGET, "INVENTORY_BROADCAST_MAX too low"); -static_assert(INVENTORY_BROADCAST_MAX <= MAX_PEER_TX_ANNOUNCEMENTS, "INVENTORY_BROADCAST_MAX too high"); +static_assert(INVENTORY_BROADCAST_MAX <= node::MAX_PEER_TX_ANNOUNCEMENTS, "INVENTORY_BROADCAST_MAX too high"); /** Average delay between feefilter broadcasts in seconds. */ static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min}; /** Maximum feefilter broadcast delay after significant change. */ @@ -580,12 +565,18 @@ private: bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer); /** Handle a transaction whose result was not MempoolAcceptResult::ResultType::VALID. - * @param[in] maybe_add_extra_compact_tx Whether this tx should be added to vExtraTxnForCompact. + * @param[in] first_time_failure Whether we should consider inserting into vExtraTxnForCompact, adding + * a new orphan to resolve, or looking for a package to submit. + * Set to true for transactions just received over p2p. * Set to false if the tx has already been rejected before, - * e.g. is an orphan, to avoid adding duplicate entries. - * Updates m_txrequest, m_lazy_recent_rejects, m_lazy_recent_rejects_reconsiderable, m_orphanage, and vExtraTxnForCompact. */ - void ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result, - bool maybe_add_extra_compact_tx) + * e.g. is already in the orphanage, to avoid adding duplicate entries. + * Updates m_txrequest, m_lazy_recent_rejects, m_lazy_recent_rejects_reconsiderable, m_orphanage, and vExtraTxnForCompact. + * + * @returns a PackageToValidate if this transaction has a reconsiderable failure and an eligible package was found, + * or std::nullopt otherwise. + */ + std::optional ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result, + bool first_time_failure) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); /** Handle a transaction whose result was MempoolAcceptResult::ResultType::VALID. @@ -593,40 +584,10 @@ private: void ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list& replaced_transactions) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); - struct PackageToValidate { - const Package m_txns; - const std::vector m_senders; - /** Construct a 1-parent-1-child package. */ - explicit PackageToValidate(const CTransactionRef& parent, - const CTransactionRef& child, - NodeId parent_sender, - NodeId child_sender) : - m_txns{parent, child}, - m_senders {parent_sender, child_sender} - {} - - std::string ToString() const { - Assume(m_txns.size() == 2); - return strprintf("parent %s (wtxid=%s, sender=%d) + child %s (wtxid=%s, sender=%d)", - m_txns.front()->GetHash().ToString(), - m_txns.front()->GetWitnessHash().ToString(), - m_senders.front(), - m_txns.back()->GetHash().ToString(), - m_txns.back()->GetWitnessHash().ToString(), - m_senders.back()); - } - }; - /** Handle the results of package validation: calls ProcessValidTx and ProcessInvalidTx for * individual transactions, and caches rejection for the package as a group. */ - void ProcessPackageResult(const PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); - - /** Look for a child of this transaction in the orphanage to form a 1-parent-1-child package, - * skipping any combinations that have already been tried. Return the resulting package along with - * the senders of its respective transactions, or std::nullopt if no package is found. */ - std::optional Find1P1CPackage(const CTransactionRef& ptx, NodeId nodeid) + void ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); /** @@ -719,12 +680,6 @@ private: void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req); - /** Register with TxRequestTracker that an INV has been received from a - * peer. The announcement parameters are decided in PeerManager and then - * passed to TxRequestTracker. */ - void AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time) - EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_tx_download_mutex); - /** Send a message to a peer */ void PushMessage(CNode& node, CSerializedNetMsg&& msg) const { m_connman.PushMessage(&node, std::move(msg)); } template @@ -781,7 +736,8 @@ private: * - Each data structure's limits hold (m_orphanage max size, m_txrequest per-peer limits, etc). */ Mutex m_tx_download_mutex ACQUIRED_BEFORE(m_mempool.cs); - TxRequestTracker m_txrequest GUARDED_BY(m_tx_download_mutex); + node::TxDownloadManager m_txdownloadman GUARDED_BY(m_tx_download_mutex); + std::unique_ptr m_txreconciliation; /** The height of the best chain */ @@ -852,124 +808,6 @@ private: /** Stalling timeout for blocks in IBD */ std::atomic m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT}; - /** Check whether we already have this gtxid in: - * - mempool - * - orphanage - * - m_lazy_recent_rejects - * - m_lazy_recent_rejects_reconsiderable (if include_reconsiderable = true) - * - m_lazy_recent_confirmed_transactions - * */ - bool AlreadyHaveTx(const GenTxid& gtxid, bool include_reconsiderable) - EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex); - - /** - * Filter for transactions that were recently rejected by the mempool. - * These are not rerequested until the chain tip changes, at which point - * the entire filter is reset. - * - * Without this filter we'd be re-requesting txs from each of our peers, - * increasing bandwidth consumption considerably. For instance, with 100 - * peers, half of which relay a tx we don't accept, that might be a 50x - * bandwidth increase. A flooding attacker attempting to roll-over the - * filter using minimum-sized, 60byte, transactions might manage to send - * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a - * two minute window to send invs to us. - * - * Decreasing the false positive rate is fairly cheap, so we pick one in a - * million to make it highly unlikely for users to have issues with this - * filter. - * - * We typically only add wtxids to this filter. For non-segwit - * transactions, the txid == wtxid, so this only prevents us from - * re-downloading non-segwit transactions when communicating with - * non-wtxidrelay peers -- which is important for avoiding malleation - * attacks that could otherwise interfere with transaction relay from - * non-wtxidrelay peers. For communicating with wtxidrelay peers, having - * the reject filter store wtxids is exactly what we want to avoid - * redownload of a rejected transaction. - * - * In cases where we can tell that a segwit transaction will fail - * validation no matter the witness, we may add the txid of such - * transaction to the filter as well. This can be helpful when - * communicating with txid-relay peers or if we were to otherwise fetch a - * transaction via txid (eg in our orphan handling). - * - * Memory used: 1.3 MB - */ - std::unique_ptr m_lazy_recent_rejects GUARDED_BY(m_tx_download_mutex){nullptr}; - - CRollingBloomFilter& RecentRejectsFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex) - { - AssertLockHeld(m_tx_download_mutex); - - if (!m_lazy_recent_rejects) { - m_lazy_recent_rejects = std::make_unique(120'000, 0.000'001); - } - - return *m_lazy_recent_rejects; - } - - /** - * Filter for: - * (1) wtxids of transactions that were recently rejected by the mempool but are - * eligible for reconsideration if submitted with other transactions. - * (2) packages (see GetPackageHash) we have already rejected before and should not retry. - * - * Similar to m_lazy_recent_rejects, this filter is used to save bandwidth when e.g. all of our peers - * have larger mempools and thus lower minimum feerates than us. - * - * When a transaction's error is TxValidationResult::TX_RECONSIDERABLE (in a package or by - * itself), add its wtxid to this filter. When a package fails for any reason, add the combined - * hash to this filter. - * - * Upon receiving an announcement for a transaction, if it exists in this filter, do not - * download the txdata. When considering packages, if it exists in this filter, drop it. - * - * Reset this filter when the chain tip changes. - * - * Parameters are picked to be the same as m_lazy_recent_rejects, with the same rationale. - */ - std::unique_ptr m_lazy_recent_rejects_reconsiderable GUARDED_BY(m_tx_download_mutex){nullptr}; - - CRollingBloomFilter& RecentRejectsReconsiderableFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex) - { - AssertLockHeld(m_tx_download_mutex); - - if (!m_lazy_recent_rejects_reconsiderable) { - m_lazy_recent_rejects_reconsiderable = std::make_unique(120'000, 0.000'001); - } - - return *m_lazy_recent_rejects_reconsiderable; - } - - /* - * Filter for transactions that have been recently confirmed. - * We use this to avoid requesting transactions that have already been - * confirnmed. - * - * Blocks don't typically have more than 4000 transactions, so this should - * be at least six blocks (~1 hr) worth of transactions that we can store, - * inserting both a txid and wtxid for every observed transaction. - * If the number of transactions appearing in a block goes up, or if we are - * seeing getdata requests more than an hour after initial announcement, we - * can increase this number. - * The false positive rate of 1/1M should come out to less than 1 - * transaction per day that would be inadvertently ignored (which is the - * same probability that we have in the reject filter). - */ - std::unique_ptr m_lazy_recent_confirmed_transactions GUARDED_BY(m_tx_download_mutex){nullptr}; - - CRollingBloomFilter& RecentConfirmedTransactionsFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex) - { - AssertLockHeld(m_tx_download_mutex); - - if (!m_lazy_recent_confirmed_transactions) { - m_lazy_recent_confirmed_transactions = std::make_unique(48'000, 0.000'001); - } - - return *m_lazy_recent_confirmed_transactions; - } - /** * For sending `inv`s to inbound peers, we use a single (exponentially * distributed) timer for all peers. If we used a separate timer for each @@ -1104,9 +942,6 @@ private: /** Number of peers from which we're downloading blocks. */ int m_peers_downloading_from GUARDED_BY(cs_main) = 0; - /** Storage for orphan information */ - TxOrphanage m_orphanage GUARDED_BY(m_tx_download_mutex); - void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction. @@ -1678,34 +1513,6 @@ void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer) } } -void PeerManagerImpl::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time) -{ - AssertLockHeld(::cs_main); // for State - AssertLockHeld(m_tx_download_mutex); // For m_txrequest - NodeId nodeid = node.GetId(); - if (!node.HasPermission(NetPermissionFlags::Relay) && m_txrequest.Count(nodeid) >= MAX_PEER_TX_ANNOUNCEMENTS) { - // Too many queued announcements from this peer - return; - } - const CNodeState* state = State(nodeid); - - // Decide the TxRequestTracker parameters for this announcement: - // - "preferred": if fPreferredDownload is set (= outbound, or NetPermissionFlags::NoBan permission) - // - "reqtime": current time plus delays for: - // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections - // - TXID_RELAY_DELAY for txid announcements while wtxid peers are available - // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least - // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have NetPermissionFlags::Relay). - auto delay{0us}; - const bool preferred = state->fPreferredDownload; - if (!preferred) delay += NONPREF_PEER_TX_DELAY; - if (!gtxid.IsWtxid() && m_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY; - const bool overloaded = !node.HasPermission(NetPermissionFlags::Relay) && - m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT; - if (overloaded) delay += OVERLOADED_PEER_TX_DELAY; - m_txrequest.ReceivedInv(nodeid, gtxid, preferred, current_time + delay); -} - void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) { LOCK(cs_main); @@ -1720,10 +1527,7 @@ void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_service LOCK(cs_main); // For m_node_states m_node_states.try_emplace(m_node_states.end(), nodeid); } - { - LOCK(m_tx_download_mutex); - assert(m_txrequest.Count(nodeid) == 0); - } + WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty(nodeid)); if (NetPermissions::HasFlag(node.m_permission_flags, NetPermissionFlags::BloomFilter)) { our_services = static_cast(our_services | NODE_BLOOM); @@ -1791,8 +1595,7 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) } { LOCK(m_tx_download_mutex); - m_orphanage.EraseForPeer(nodeid); - m_txrequest.DisconnectedPeer(nodeid); + m_txdownloadman.DisconnectedPeer(nodeid); } if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid); m_num_preferred_download_peers -= state->fPreferredDownload; @@ -1810,9 +1613,7 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) assert(m_peers_downloading_from == 0); assert(m_outbound_peers_with_protect_from_disconnect == 0); assert(m_wtxid_relay_peers == 0); - LOCK(m_tx_download_mutex); - assert(m_txrequest.Size() == 0); - assert(m_orphanage.Size() == 0); + WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty()); } } // cs_main if (node.fSuccessfullyConnected && @@ -1921,7 +1722,7 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c std::vector PeerManagerImpl::GetOrphanTransactions() { LOCK(m_tx_download_mutex); - return m_orphanage.GetOrphanTransactions(); + return m_txdownloadman.GetOrphanTransactions(); } PeerManagerInfo PeerManagerImpl::GetInfo() const @@ -2088,6 +1889,7 @@ PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman, m_banman(banman), m_chainman(chainman), m_mempool(pool), + m_txdownloadman(node::TxDownloadOptions{pool, m_rng, opts.max_orphan_txs, opts.deterministic_rng}), m_warnings{warnings}, m_opts{opts} { @@ -2124,8 +1926,7 @@ void PeerManagerImpl::ActiveTipChange(const CBlockIndex& new_tip, bool is_ibd) // If the chain tip has changed, previously rejected transactions might now be valid, e.g. due // to a timelock. Reset the rejection filters to give those transactions another chance if we // see them again. - RecentRejectsFilter().reset(); - RecentRejectsReconsiderableFilter().reset(); + m_txdownloadman.ActiveTipChange(); } } @@ -2160,30 +1961,13 @@ void PeerManagerImpl::BlockConnected( return; } LOCK(m_tx_download_mutex); - m_orphanage.EraseForBlock(*pblock); - - for (const auto& ptx : pblock->vtx) { - RecentConfirmedTransactionsFilter().insert(ptx->GetHash().ToUint256()); - if (ptx->HasWitness()) { - RecentConfirmedTransactionsFilter().insert(ptx->GetWitnessHash().ToUint256()); - } - m_txrequest.ForgetTxHash(ptx->GetHash()); - m_txrequest.ForgetTxHash(ptx->GetWitnessHash()); - } + m_txdownloadman.BlockConnected(pblock); } void PeerManagerImpl::BlockDisconnected(const std::shared_ptr &block, const CBlockIndex* pindex) { - // To avoid relay problems with transactions that were previously - // confirmed, clear our filter of recently confirmed transactions whenever - // there's a reorg. - // This means that in a 1-block reorg (where 1 block is disconnected and - // then another block reconnected), our filter will drop to having only one - // block's worth of transactions in it, but that should be fine, since - // presumably the most common case of relaying a confirmed transaction - // should be just after a new block containing it is found. LOCK(m_tx_download_mutex); - RecentConfirmedTransactionsFilter().reset(); + m_txdownloadman.BlockDisconnected(); } /** @@ -2319,38 +2103,6 @@ void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationSta // Messages // - -bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconsiderable) -{ - AssertLockHeld(m_tx_download_mutex); - - const uint256& hash = gtxid.GetHash(); - - if (gtxid.IsWtxid()) { - // Normal query by wtxid. - if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true; - } else { - // Never query by txid: it is possible that the transaction in the orphanage has the same - // txid but a different witness, which would give us a false positive result. If we decided - // not to request the transaction based on this result, an attacker could prevent us from - // downloading a transaction by intentionally creating a malleated version of it. While - // only one (or none!) of these transactions can ultimately be confirmed, we have no way of - // discerning which one that is, so the orphanage can store multiple transactions with the - // same txid. - // - // While we won't query by txid, we can try to "guess" what the wtxid is based on the txid. - // A non-segwit transaction's txid == wtxid. Query this txid "casted" to a wtxid. This will - // help us find non-segwit transactions, saving bandwidth, and should have no false positives. - if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true; - } - - if (include_reconsiderable && RecentRejectsReconsiderableFilter().contains(hash)) return true; - - if (RecentConfirmedTransactionsFilter().contains(hash)) return true; - - return RecentRejectsFilter().contains(hash) || m_mempool.exists(gtxid); -} - bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash) { return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr; @@ -3206,70 +2958,33 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer, return; } -void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx, const TxValidationState& state, - bool maybe_add_extra_compact_tx) +std::optional PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx, const TxValidationState& state, + bool first_time_failure) { AssertLockNotHeld(m_peer_mutex); AssertLockHeld(g_msgproc_mutex); AssertLockHeld(m_tx_download_mutex); + PeerRef peer{GetPeerRef(nodeid)}; + LogDebug(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString(), nodeid, state.ToString()); - if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { - return; - } else if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) { - // We can add the wtxid of this transaction to our reject filter. - // Do not add txids of witness transactions or witness-stripped - // transactions to the filter, as they can have been malleated; - // adding such txids to the reject filter would potentially - // interfere with relay of valid transactions from peers that - // do not support wtxid-based relay. See - // https://github.com/bitcoin/bitcoin/issues/8279 for details. - // We can remove this restriction (and always add wtxids to - // the filter even for witness stripped transactions) once - // wtxid-based relay is broadly deployed. - // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034 - // for concerns around weakening security of unupgraded nodes - // if we start doing this too early. - if (state.GetResult() == TxValidationResult::TX_RECONSIDERABLE) { - // If the result is TX_RECONSIDERABLE, add it to m_lazy_recent_rejects_reconsiderable - // because we should not download or submit this transaction by itself again, but may - // submit it as part of a package later. - RecentRejectsReconsiderableFilter().insert(ptx->GetWitnessHash().ToUint256()); - } else { - RecentRejectsFilter().insert(ptx->GetWitnessHash().ToUint256()); - } - m_txrequest.ForgetTxHash(ptx->GetWitnessHash()); - // If the transaction failed for TX_INPUTS_NOT_STANDARD, - // then we know that the witness was irrelevant to the policy - // failure, since this check depends only on the txid - // (the scriptPubKey being spent is covered by the txid). - // Add the txid to the reject filter to prevent repeated - // processing of this transaction in the event that child - // transactions are later received (resulting in - // parent-fetching by txid via the orphan-handling logic). - // We only add the txid if it differs from the wtxid, to avoid wasting entries in the - // rolling bloom filter. - if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && ptx->HasWitness()) { - RecentRejectsFilter().insert(ptx->GetHash().ToUint256()); - m_txrequest.ForgetTxHash(ptx->GetHash()); - } - if (maybe_add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) { - AddToCompactExtraTransactions(ptx); - } + const auto& [add_extra_compact_tx, unique_parents, package_to_validate] = m_txdownloadman.MempoolRejectedTx(ptx, state, nodeid, first_time_failure); + + if (add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) { + AddToCompactExtraTransactions(ptx); + } + for (const uint256& parent_txid : unique_parents) { + if (peer) AddKnownTx(*peer, parent_txid); } MaybePunishNodeForTx(nodeid, state); - // If the tx failed in ProcessOrphanTx, it should be removed from the orphanage unless the - // tx was still missing inputs. If the tx was not in the orphanage, EraseTx does nothing and returns 0. - if (Assume(state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) && m_orphanage.EraseTx(ptx->GetWitnessHash()) > 0) { - LogDebug(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString()); - } + return package_to_validate; } void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list& replaced_transactions) @@ -3278,14 +2993,7 @@ void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, c AssertLockHeld(g_msgproc_mutex); AssertLockHeld(m_tx_download_mutex); - // As this version of the transaction was acceptable, we can forget about any requests for it. - // No-op if the tx is not in txrequest. - m_txrequest.ForgetTxHash(tx->GetHash()); - m_txrequest.ForgetTxHash(tx->GetWitnessHash()); - - m_orphanage.AddChildrenToWorkSet(*tx); - // If it came from the orphanage, remove it. No-op if the tx is not in txorphanage. - m_orphanage.EraseTx(tx->GetWitnessHash()); + m_txdownloadman.MempoolAcceptedTx(tx); LogDebug(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n", nodeid, @@ -3300,7 +3008,7 @@ void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, c } } -void PeerManagerImpl::ProcessPackageResult(const PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result) +void PeerManagerImpl::ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result) { AssertLockNotHeld(m_peer_mutex); AssertLockHeld(g_msgproc_mutex); @@ -3310,7 +3018,7 @@ void PeerManagerImpl::ProcessPackageResult(const PackageToValidate& package_to_v const auto& senders = package_to_validate.m_senders; if (package_result.m_state.IsInvalid()) { - RecentRejectsReconsiderableFilter().insert(GetPackageHash(package)); + m_txdownloadman.MempoolRejectedPackage(package); } // We currently only expect to process 1-parent-1-child packages. Remove if this changes. if (!Assume(package.size() == 2)) return; @@ -3340,7 +3048,7 @@ void PeerManagerImpl::ProcessPackageResult(const PackageToValidate& package_to_v // added there when added to the orphanage or rejected for TX_RECONSIDERABLE. // This should be updated if package submission is ever used for transactions // that haven't already been validated before. - ProcessInvalidTx(nodeid, tx, tx_result.m_state, /*maybe_add_extra_compact_tx=*/false); + ProcessInvalidTx(nodeid, tx, tx_result.m_state, /*first_time_failure=*/false); break; } case MempoolAcceptResult::ResultType::MEMPOOL_ENTRY: @@ -3356,60 +3064,6 @@ void PeerManagerImpl::ProcessPackageResult(const PackageToValidate& package_to_v } } -std::optional PeerManagerImpl::Find1P1CPackage(const CTransactionRef& ptx, NodeId nodeid) -{ - AssertLockNotHeld(m_peer_mutex); - AssertLockHeld(g_msgproc_mutex); - AssertLockHeld(m_tx_download_mutex); - - const auto& parent_wtxid{ptx->GetWitnessHash()}; - - Assume(RecentRejectsReconsiderableFilter().contains(parent_wtxid.ToUint256())); - - // Prefer children from this peer. This helps prevent censorship attempts in which an attacker - // sends lots of fake children for the parent, and we (unluckily) keep selecting the fake - // children instead of the real one provided by the honest peer. - const auto cpfp_candidates_same_peer{m_orphanage.GetChildrenFromSamePeer(ptx, nodeid)}; - - // These children should be sorted from newest to oldest. In the (probably uncommon) case - // of children that replace each other, this helps us accept the highest feerate (probably the - // most recent) one efficiently. - for (const auto& child : cpfp_candidates_same_peer) { - Package maybe_cpfp_package{ptx, child}; - if (!RecentRejectsReconsiderableFilter().contains(GetPackageHash(maybe_cpfp_package))) { - return PeerManagerImpl::PackageToValidate{ptx, child, nodeid, nodeid}; - } - } - - // If no suitable candidate from the same peer is found, also try children that were provided by - // a different peer. This is useful because sometimes multiple peers announce both transactions - // to us, and we happen to download them from different peers (we wouldn't have known that these - // 2 transactions are related). We still want to find 1p1c packages then. - // - // If we start tracking all announcers of orphans, we can restrict this logic to parent + child - // pairs in which both were provided by the same peer, i.e. delete this step. - const auto cpfp_candidates_different_peer{m_orphanage.GetChildrenFromDifferentPeer(ptx, nodeid)}; - - // Find the first 1p1c that hasn't already been rejected. We randomize the order to not - // create a bias that attackers can use to delay package acceptance. - // - // Create a random permutation of the indices. - std::vector tx_indices(cpfp_candidates_different_peer.size()); - std::iota(tx_indices.begin(), tx_indices.end(), 0); - std::shuffle(tx_indices.begin(), tx_indices.end(), m_rng); - - for (const auto index : tx_indices) { - // If we already tried a package and failed for any reason, the combined hash was - // cached in m_lazy_recent_rejects_reconsiderable. - const auto [child_tx, child_sender] = cpfp_candidates_different_peer.at(index); - Package maybe_cpfp_package{ptx, child_tx}; - if (!RecentRejectsReconsiderableFilter().contains(GetPackageHash(maybe_cpfp_package))) { - return PeerManagerImpl::PackageToValidate{ptx, child_tx, nodeid, child_sender}; - } - } - return std::nullopt; -} - bool PeerManagerImpl::ProcessOrphanTx(Peer& peer) { AssertLockHeld(g_msgproc_mutex); @@ -3417,7 +3071,7 @@ bool PeerManagerImpl::ProcessOrphanTx(Peer& peer) CTransactionRef porphanTx = nullptr; - while (CTransactionRef porphanTx = m_orphanage.GetTxToReconsider(peer.m_id)) { + while (CTransactionRef porphanTx = m_txdownloadman.GetTxToReconsider(peer.m_id)) { const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx); const TxValidationState& state = result.m_state; const Txid& orphanHash = porphanTx->GetHash(); @@ -3438,7 +3092,7 @@ bool PeerManagerImpl::ProcessOrphanTx(Peer& peer) state.GetResult() != TxValidationResult::TX_UNKNOWN && state.GetResult() != TxValidationResult::TX_NO_MEMPOOL && state.GetResult() != TxValidationResult::TX_RESULT_UNSET)) { - ProcessInvalidTx(peer.m_id, porphanTx, state, /*maybe_add_extra_compact_tx=*/false); + ProcessInvalidTx(peer.m_id, porphanTx, state, /*first_time_failure=*/false); } return true; } @@ -3999,6 +3653,16 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, tx_relay->m_next_inv_send_time == 0s)); } + { + LOCK2(::cs_main, m_tx_download_mutex); + const CNodeState* state = State(pfrom.GetId()); + m_txdownloadman.ConnectedPeer(pfrom.GetId(), node::TxDownloadConnectionInfo { + .m_preferred = state->fPreferredDownload, + .m_relay_permissions = pfrom.HasPermission(NetPermissionFlags::Relay), + .m_wtxid_relay = peer->m_wtxid_relay, + }); + } + pfrom.fSuccessfullyConnected = true; return; } @@ -4268,12 +3932,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } const GenTxid gtxid = ToGenTxid(inv); - const bool fAlreadyHave = AlreadyHaveTx(gtxid, /*include_reconsiderable=*/true); - LogDebug(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); - AddKnownTx(*peer, inv.hash); - if (!fAlreadyHave && !m_chainman.IsInitialBlockDownload()) { - AddTxAnnouncement(pfrom, gtxid, current_time); + + if (!m_chainman.IsInitialBlockDownload()) { + const bool fAlreadyHave{m_txdownloadman.AddTxAnnouncement(pfrom.GetId(), gtxid, current_time, /*p2p_inv=*/true)}; + LogDebug(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); } } else { LogDebug(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId()); @@ -4565,22 +4228,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, LOCK2(cs_main, m_tx_download_mutex); - m_txrequest.ReceivedResponse(pfrom.GetId(), txid); - if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid); - - // We do the AlreadyHaveTx() check using wtxid, rather than txid - in the - // absence of witness malleation, this is strictly better, because the - // recent rejects filter may contain the wtxid but rarely contains - // the txid of a segwit transaction that has been rejected. - // In the presence of witness malleation, it's possible that by only - // doing the check with wtxid, we could overlook a transaction which - // was confirmed with a different witness, or exists in our mempool - // with a different witness, but this has limited downside: - // mempool validation does its own lookup of whether we have the txid - // already; and an adversary can already relay us old transactions - // (older than our recency filter) if trying to DoS us, without any need - // for witness malleation. - if (AlreadyHaveTx(GenTxid::Wtxid(wtxid), /*include_reconsiderable=*/true)) { + const auto& [should_validate, package_to_validate] = m_txdownloadman.ReceivedTx(pfrom.GetId(), ptx); + if (!should_validate) { if (pfrom.HasPermission(NetPermissionFlags::ForceRelay)) { // Always relay transactions received from peers with forcerelay // permission, even if they were already in the mempool, allowing @@ -4595,37 +4244,18 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } } - if (RecentRejectsReconsiderableFilter().contains(wtxid)) { - // When a transaction is already in m_lazy_recent_rejects_reconsiderable, we shouldn't submit - // it by itself again. However, look for a matching child in the orphanage, as it is - // possible that they succeed as a package. - LogDebug(BCLog::TXPACKAGES, "found tx %s (wtxid=%s) in reconsiderable rejects, looking for child in orphanage\n", - txid.ToString(), wtxid.ToString()); - if (auto package_to_validate{Find1P1CPackage(ptx, pfrom.GetId())}) { - const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)}; - LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(), - package_result.m_state.IsValid() ? "package accepted" : "package rejected"); - ProcessPackageResult(package_to_validate.value(), package_result); - } + if (package_to_validate) { + const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)}; + LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(), + package_result.m_state.IsValid() ? "package accepted" : "package rejected"); + ProcessPackageResult(package_to_validate.value(), package_result); } - // If a tx is detected by m_lazy_recent_rejects it is ignored. Because we haven't - // submitted the tx to our mempool, we won't have computed a DoS - // score for it or determined exactly why we consider it invalid. - // - // This means we won't penalize any peer subsequently relaying a DoSy - // tx (even if we penalized the first peer who gave it to us) because - // we have to account for m_lazy_recent_rejects showing false positives. In - // other words, we shouldn't penalize a peer if we aren't *sure* they - // submitted a DoSy tx. - // - // Note that m_lazy_recent_rejects doesn't just record DoSy or invalid - // transactions, but any tx not accepted by the mempool, which may be - // due to node policy (vs. consensus). So we can't blanket penalize a - // peer simply for relaying a tx that our m_lazy_recent_rejects has caught, - // regardless of false positives. return; } + // ReceivedTx should not be telling us to validate the tx and a package. + Assume(!package_to_validate.has_value()); + const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx); const TxValidationState& state = result.m_state; @@ -4633,90 +4263,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions); pfrom.m_last_tx_time = GetTime(); } - else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) - { - bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected - - // Deduplicate parent txids, so that we don't have to loop over - // the same parent txid more than once down below. - std::vector unique_parents; - unique_parents.reserve(tx.vin.size()); - for (const CTxIn& txin : tx.vin) { - // We start with all parents, and then remove duplicates below. - unique_parents.push_back(txin.prevout.hash); - } - std::sort(unique_parents.begin(), unique_parents.end()); - unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end()); - - // Distinguish between parents in m_lazy_recent_rejects and m_lazy_recent_rejects_reconsiderable. - // We can tolerate having up to 1 parent in m_lazy_recent_rejects_reconsiderable since we - // submit 1p1c packages. However, fail immediately if any are in m_lazy_recent_rejects. - std::optional rejected_parent_reconsiderable; - for (const uint256& parent_txid : unique_parents) { - if (RecentRejectsFilter().contains(parent_txid)) { - fRejectedParents = true; - break; - } else if (RecentRejectsReconsiderableFilter().contains(parent_txid) && !m_mempool.exists(GenTxid::Txid(parent_txid))) { - // More than 1 parent in m_lazy_recent_rejects_reconsiderable: 1p1c will not be - // sufficient to accept this package, so just give up here. - if (rejected_parent_reconsiderable.has_value()) { - fRejectedParents = true; - break; - } - rejected_parent_reconsiderable = parent_txid; - } - } - if (!fRejectedParents) { - const auto current_time{GetTime()}; - - for (const uint256& parent_txid : unique_parents) { - // Here, we only have the txid (and not wtxid) of the - // inputs, so we only request in txid mode, even for - // wtxidrelay peers. - // Eventually we should replace this with an improved - // protocol for getting all unconfirmed parents. - const auto gtxid{GenTxid::Txid(parent_txid)}; - AddKnownTx(*peer, parent_txid); - // Exclude m_lazy_recent_rejects_reconsiderable: the missing parent may have been - // previously rejected for being too low feerate. This orphan might CPFP it. - if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) AddTxAnnouncement(pfrom, gtxid, current_time); - } - - if (m_orphanage.AddTx(ptx, pfrom.GetId())) { - AddToCompactExtraTransactions(ptx); - } - - // Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore. - m_txrequest.ForgetTxHash(tx.GetHash()); - m_txrequest.ForgetTxHash(tx.GetWitnessHash()); - - // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789) - m_orphanage.LimitOrphans(m_opts.max_orphan_txs, m_rng); - } else { - LogDebug(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s (wtxid=%s)\n", - tx.GetHash().ToString(), - tx.GetWitnessHash().ToString()); - // We will continue to reject this tx since it has rejected - // parents so avoid re-requesting it from other peers. - // Here we add both the txid and the wtxid, as we know that - // regardless of what witness is provided, we will not accept - // this, so we don't need to allow for redownload of this txid - // from any of our non-wtxidrelay peers. - RecentRejectsFilter().insert(tx.GetHash().ToUint256()); - RecentRejectsFilter().insert(tx.GetWitnessHash().ToUint256()); - m_txrequest.ForgetTxHash(tx.GetHash()); - m_txrequest.ForgetTxHash(tx.GetWitnessHash()); - } - } if (state.IsInvalid()) { - ProcessInvalidTx(pfrom.GetId(), ptx, state, /*maybe_add_extra_compact_tx=*/true); - } - // When a transaction fails for TX_RECONSIDERABLE, look for a matching child in the - // orphanage, as it is possible that they succeed as a package. - if (state.GetResult() == TxValidationResult::TX_RECONSIDERABLE) { - LogDebug(BCLog::TXPACKAGES, "tx %s (wtxid=%s) failed but reconsiderable, looking for child in orphanage\n", - txid.ToString(), wtxid.ToString()); - if (auto package_to_validate{Find1P1CPackage(ptx, pfrom.GetId())}) { + if (auto package_to_validate{ProcessInvalidTx(pfrom.GetId(), ptx, state, /*first_time_failure=*/true)}) { const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)}; LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(), package_result.m_state.IsValid() ? "package accepted" : "package rejected"); @@ -5319,16 +4867,16 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (msg_type == NetMsgType::NOTFOUND) { std::vector vInv; vRecv >> vInv; - if (vInv.size() <= MAX_PEER_TX_ANNOUNCEMENTS + MAX_BLOCKS_IN_TRANSIT_PER_PEER) { - LOCK(m_tx_download_mutex); + std::vector tx_invs; + if (vInv.size() <= node::MAX_PEER_TX_ANNOUNCEMENTS + MAX_BLOCKS_IN_TRANSIT_PER_PEER) { for (CInv &inv : vInv) { if (inv.IsGenTxMsg()) { - // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as - // completed in TxRequestTracker. - m_txrequest.ReceivedResponse(pfrom.GetId(), inv.hash); + tx_invs.emplace_back(inv.hash); } } } + LOCK(m_tx_download_mutex); + m_txdownloadman.ReceivedNotFound(pfrom.GetId(), tx_invs); return; } @@ -5447,7 +4995,7 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic& interrupt // the extra work may not be noticed, possibly resulting in an // unnecessary 100ms delay) LOCK(m_tx_download_mutex); - if (m_orphanage.HaveTxToReconsider(peer->m_id)) fMoreWork = true; + if (m_txdownloadman.HaveMoreWork(peer->m_id)) fMoreWork = true; } catch (const std::exception& e) { LogDebug(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name()); } catch (...) { @@ -6343,31 +5891,14 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // { LOCK(m_tx_download_mutex); - std::vector> expired; - auto requestable = m_txrequest.GetRequestable(pto->GetId(), current_time, &expired); - for (const auto& entry : expired) { - LogDebug(BCLog::NET, "timeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "wtx" : "tx", - entry.second.GetHash().ToString(), entry.first); - } - for (const GenTxid& gtxid : requestable) { - // Exclude m_lazy_recent_rejects_reconsiderable: we may be requesting a missing parent - // that was previously rejected for being too low feerate. - if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) { - LogDebug(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx", - gtxid.GetHash().ToString(), pto->GetId()); - vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash()); - if (vGetData.size() >= MAX_GETDATA_SZ) { - MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData); - vGetData.clear(); - } - m_txrequest.RequestedTx(pto->GetId(), gtxid.GetHash(), current_time + GETDATA_TX_INTERVAL); - } else { - // We have already seen this transaction, no need to download. This is just a belt-and-suspenders, as - // this should already be called whenever a transaction becomes AlreadyHaveTx(). - m_txrequest.ForgetTxHash(gtxid.GetHash()); + for (const GenTxid& gtxid : m_txdownloadman.GetRequestsToSend(pto->GetId(), current_time)) { + vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash()); + if (vGetData.size() >= MAX_GETDATA_SZ) { + MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData); + vGetData.clear(); } } - } // release m_tx_download_mutex + } if (!vGetData.empty()) MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData); diff --git a/src/node/txdownloadman.h b/src/node/txdownloadman.h new file mode 100644 index 0000000000..28ca90c554 --- /dev/null +++ b/src/node/txdownloadman.h @@ -0,0 +1,178 @@ +// Copyright (c) 2024 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_NODE_TXDOWNLOADMAN_H +#define BITCOIN_NODE_TXDOWNLOADMAN_H + +#include +#include +#include + +#include +#include + +class CBlock; +class CRollingBloomFilter; +class CTxMemPool; +class GenTxid; +class TxRequestTracker; +namespace node { +class TxDownloadManagerImpl; + +/** Maximum number of in-flight transaction requests from a peer. It is not a hard limit, but the threshold at which + * point the OVERLOADED_PEER_TX_DELAY kicks in. */ +static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT = 100; +/** Maximum number of transactions to consider for requesting, per peer. It provides a reasonable DoS limit to + * per-peer memory usage spent on announcements, while covering peers continuously sending INVs at the maximum + * rate (by our own policy, see INVENTORY_BROADCAST_PER_SECOND) for several minutes, while not receiving + * the actual transaction (from any peer) in response to requests for them. */ +static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 5000; +/** How long to delay requesting transactions via txids, if we have wtxid-relaying peers */ +static constexpr auto TXID_RELAY_DELAY{2s}; +/** How long to delay requesting transactions from non-preferred peers */ +static constexpr auto NONPREF_PEER_TX_DELAY{2s}; +/** How long to delay requesting transactions from overloaded peers (see MAX_PEER_TX_REQUEST_IN_FLIGHT). */ +static constexpr auto OVERLOADED_PEER_TX_DELAY{2s}; +/** How long to wait before downloading a transaction from an additional peer */ +static constexpr auto GETDATA_TX_INTERVAL{60s}; +struct TxDownloadOptions { + /** Read-only reference to mempool. */ + const CTxMemPool& m_mempool; + /** RNG provided by caller. */ + FastRandomContext& m_rng; + /** Maximum number of transactions allowed in orphanage. */ + const uint32_t m_max_orphan_txs; + /** Instantiate TxRequestTracker as deterministic (used for tests). */ + bool m_deterministic_txrequest{false}; +}; +struct TxDownloadConnectionInfo { + /** Whether this peer is preferred for transaction download. */ + const bool m_preferred; + /** Whether this peer has Relay permissions. */ + const bool m_relay_permissions; + /** Whether this peer supports wtxid relay. */ + const bool m_wtxid_relay; +}; +struct PackageToValidate { + Package m_txns; + std::vector m_senders; + /** Construct a 1-parent-1-child package. */ + explicit PackageToValidate(const CTransactionRef& parent, + const CTransactionRef& child, + NodeId parent_sender, + NodeId child_sender) : + m_txns{parent, child}, + m_senders{parent_sender, child_sender} + {} + + // Move ctor + PackageToValidate(PackageToValidate&& other) : m_txns{std::move(other.m_txns)}, m_senders{std::move(other.m_senders)} {} + // Copy ctor + PackageToValidate(const PackageToValidate& other) = default; + + // Move assignment + PackageToValidate& operator=(PackageToValidate&& other) { + this->m_txns = std::move(other.m_txns); + this->m_senders = std::move(other.m_senders); + return *this; + } + + std::string ToString() const { + Assume(m_txns.size() == 2); + return strprintf("parent %s (wtxid=%s, sender=%d) + child %s (wtxid=%s, sender=%d)", + m_txns.front()->GetHash().ToString(), + m_txns.front()->GetWitnessHash().ToString(), + m_senders.front(), + m_txns.back()->GetHash().ToString(), + m_txns.back()->GetWitnessHash().ToString(), + m_senders.back()); + } +}; +struct RejectedTxTodo +{ + bool m_should_add_extra_compact_tx; + std::vector m_unique_parents; + std::optional m_package_to_validate; +}; + + +/** + * Class responsible for deciding what transactions to request and, once + * downloaded, whether and how to validate them. It is also responsible for + * deciding what transaction packages to validate and how to resolve orphan + * transactions. Its data structures include TxRequestTracker for scheduling + * requests, rolling bloom filters for remembering transactions that have + * already been {accepted, rejected, confirmed}, an orphanage, and a registry of + * each peer's transaction relay-related information. + * + * Caller needs to interact with TxDownloadManager: + * - ValidationInterface callbacks. + * - When a potential transaction relay peer connects or disconnects. + * - When a transaction or package is accepted or rejected from mempool + * - When a inv, notfound, or tx message is received + * - To get instructions for which getdata messages to send + * + * This class is not thread-safe. Access must be synchronized using an + * external mutex. + */ +class TxDownloadManager { + const std::unique_ptr m_impl; + +public: + explicit TxDownloadManager(const TxDownloadOptions& options); + ~TxDownloadManager(); + + // Responses to chain events. TxDownloadManager is not an actual client of ValidationInterface, these are called through PeerManager. + void ActiveTipChange(); + void BlockConnected(const std::shared_ptr& pblock); + void BlockDisconnected(); + + /** Creates a new PeerInfo. Saves the connection info to calculate tx announcement delays later. */ + void ConnectedPeer(NodeId nodeid, const TxDownloadConnectionInfo& info); + + /** Deletes all txrequest announcements and orphans for a given peer. */ + void DisconnectedPeer(NodeId nodeid); + + /** New inv has been received. May be added as a candidate to txrequest. + * @param[in] p2p_inv When true, only add this announcement if we don't already have the tx. + * Returns true if this was a dropped inv (p2p_inv=true and we already have the tx), false otherwise. */ + bool AddTxAnnouncement(NodeId peer, const GenTxid& gtxid, std::chrono::microseconds now, bool p2p_inv); + + /** Get getdata requests to send. */ + std::vector GetRequestsToSend(NodeId nodeid, std::chrono::microseconds current_time); + + /** Should be called when a notfound for a tx has been received. */ + void ReceivedNotFound(NodeId nodeid, const std::vector& txhashes); + + /** Respond to successful transaction submission to mempool */ + void MempoolAcceptedTx(const CTransactionRef& tx); + + /** Respond to transaction rejected from mempool */ + RejectedTxTodo MempoolRejectedTx(const CTransactionRef& ptx, const TxValidationState& state, NodeId nodeid, bool first_time_failure); + + /** Respond to package rejected from mempool */ + void MempoolRejectedPackage(const Package& package); + + /** Marks a tx as ReceivedResponse in txrequest and checks whether AlreadyHaveTx. + * Return a bool indicating whether this tx should be validated. If false, optionally, a + * PackageToValidate. */ + std::pair> ReceivedTx(NodeId nodeid, const CTransactionRef& ptx); + + /** Whether there are any orphans to reconsider for this peer. */ + bool HaveMoreWork(NodeId nodeid) const; + + /** Returns next orphan tx to consider, or nullptr if none exist. */ + CTransactionRef GetTxToReconsider(NodeId nodeid); + + /** Check that all data structures are empty. */ + void CheckIsEmpty() const; + + /** Check that all data structures that track per-peer information have nothing for this peer. */ + void CheckIsEmpty(NodeId nodeid) const; + + /** Wrapper for TxOrphanage::GetOrphanTransactions */ + std::vector GetOrphanTransactions() const; +}; +} // namespace node +#endif // BITCOIN_NODE_TXDOWNLOADMAN_H diff --git a/src/node/txdownloadman_impl.cpp b/src/node/txdownloadman_impl.cpp new file mode 100644 index 0000000000..f9635d049a --- /dev/null +++ b/src/node/txdownloadman_impl.cpp @@ -0,0 +1,536 @@ +// Copyright (c) 2024 +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace node { +// TxDownloadManager wrappers +TxDownloadManager::TxDownloadManager(const TxDownloadOptions& options) : + m_impl{std::make_unique(options)} +{} +TxDownloadManager::~TxDownloadManager() = default; + +void TxDownloadManager::ActiveTipChange() +{ + m_impl->ActiveTipChange(); +} +void TxDownloadManager::BlockConnected(const std::shared_ptr& pblock) +{ + m_impl->BlockConnected(pblock); +} +void TxDownloadManager::BlockDisconnected() +{ + m_impl->BlockDisconnected(); +} +void TxDownloadManager::ConnectedPeer(NodeId nodeid, const TxDownloadConnectionInfo& info) +{ + m_impl->ConnectedPeer(nodeid, info); +} +void TxDownloadManager::DisconnectedPeer(NodeId nodeid) +{ + m_impl->DisconnectedPeer(nodeid); +} +bool TxDownloadManager::AddTxAnnouncement(NodeId peer, const GenTxid& gtxid, std::chrono::microseconds now, bool p2p_inv) +{ + return m_impl->AddTxAnnouncement(peer, gtxid, now, p2p_inv); +} +std::vector TxDownloadManager::GetRequestsToSend(NodeId nodeid, std::chrono::microseconds current_time) +{ + return m_impl->GetRequestsToSend(nodeid, current_time); +} +void TxDownloadManager::ReceivedNotFound(NodeId nodeid, const std::vector& txhashes) +{ + m_impl->ReceivedNotFound(nodeid, txhashes); +} +void TxDownloadManager::MempoolAcceptedTx(const CTransactionRef& tx) +{ + m_impl->MempoolAcceptedTx(tx); +} +RejectedTxTodo TxDownloadManager::MempoolRejectedTx(const CTransactionRef& ptx, const TxValidationState& state, NodeId nodeid, bool first_time_failure) +{ + return m_impl->MempoolRejectedTx(ptx, state, nodeid, first_time_failure); +} +void TxDownloadManager::MempoolRejectedPackage(const Package& package) +{ + m_impl->MempoolRejectedPackage(package); +} +std::pair> TxDownloadManager::ReceivedTx(NodeId nodeid, const CTransactionRef& ptx) +{ + return m_impl->ReceivedTx(nodeid, ptx); +} +bool TxDownloadManager::HaveMoreWork(NodeId nodeid) const +{ + return m_impl->HaveMoreWork(nodeid); +} +CTransactionRef TxDownloadManager::GetTxToReconsider(NodeId nodeid) +{ + return m_impl->GetTxToReconsider(nodeid); +} +void TxDownloadManager::CheckIsEmpty() const +{ + m_impl->CheckIsEmpty(); +} +void TxDownloadManager::CheckIsEmpty(NodeId nodeid) const +{ + m_impl->CheckIsEmpty(nodeid); +} +std::vector TxDownloadManager::GetOrphanTransactions() const +{ + return m_impl->GetOrphanTransactions(); +} + +// TxDownloadManagerImpl +void TxDownloadManagerImpl::ActiveTipChange() +{ + RecentRejectsFilter().reset(); + RecentRejectsReconsiderableFilter().reset(); +} + +void TxDownloadManagerImpl::BlockConnected(const std::shared_ptr& pblock) +{ + m_orphanage.EraseForBlock(*pblock); + + for (const auto& ptx : pblock->vtx) { + RecentConfirmedTransactionsFilter().insert(ptx->GetHash().ToUint256()); + if (ptx->HasWitness()) { + RecentConfirmedTransactionsFilter().insert(ptx->GetWitnessHash().ToUint256()); + } + m_txrequest.ForgetTxHash(ptx->GetHash()); + m_txrequest.ForgetTxHash(ptx->GetWitnessHash()); + } +} + +void TxDownloadManagerImpl::BlockDisconnected() +{ + // To avoid relay problems with transactions that were previously + // confirmed, clear our filter of recently confirmed transactions whenever + // there's a reorg. + // This means that in a 1-block reorg (where 1 block is disconnected and + // then another block reconnected), our filter will drop to having only one + // block's worth of transactions in it, but that should be fine, since + // presumably the most common case of relaying a confirmed transaction + // should be just after a new block containing it is found. + RecentConfirmedTransactionsFilter().reset(); +} + +bool TxDownloadManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconsiderable) +{ + const uint256& hash = gtxid.GetHash(); + + if (gtxid.IsWtxid()) { + // Normal query by wtxid. + if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true; + } else { + // Never query by txid: it is possible that the transaction in the orphanage has the same + // txid but a different witness, which would give us a false positive result. If we decided + // not to request the transaction based on this result, an attacker could prevent us from + // downloading a transaction by intentionally creating a malleated version of it. While + // only one (or none!) of these transactions can ultimately be confirmed, we have no way of + // discerning which one that is, so the orphanage can store multiple transactions with the + // same txid. + // + // While we won't query by txid, we can try to "guess" what the wtxid is based on the txid. + // A non-segwit transaction's txid == wtxid. Query this txid "casted" to a wtxid. This will + // help us find non-segwit transactions, saving bandwidth, and should have no false positives. + if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true; + } + + if (include_reconsiderable && RecentRejectsReconsiderableFilter().contains(hash)) return true; + + if (RecentConfirmedTransactionsFilter().contains(hash)) return true; + + return RecentRejectsFilter().contains(hash) || m_opts.m_mempool.exists(gtxid); +} + +void TxDownloadManagerImpl::ConnectedPeer(NodeId nodeid, const TxDownloadConnectionInfo& info) +{ + // If already connected (shouldn't happen in practice), exit early. + if (m_peer_info.contains(nodeid)) return; + + m_peer_info.try_emplace(nodeid, info); + if (info.m_wtxid_relay) m_num_wtxid_peers += 1; +} + +void TxDownloadManagerImpl::DisconnectedPeer(NodeId nodeid) +{ + m_orphanage.EraseForPeer(nodeid); + m_txrequest.DisconnectedPeer(nodeid); + + if (auto it = m_peer_info.find(nodeid); it != m_peer_info.end()) { + if (it->second.m_connection_info.m_wtxid_relay) m_num_wtxid_peers -= 1; + m_peer_info.erase(it); + } + +} + +bool TxDownloadManagerImpl::AddTxAnnouncement(NodeId peer, const GenTxid& gtxid, std::chrono::microseconds now, bool p2p_inv) +{ + // If this is an inv received from a peer and we already have it, we can drop it. + // If this is a request for the parent of an orphan, we don't drop transactions that we already have. In particular, + // we *do* want to request parents that are in m_lazy_recent_rejects_reconsiderable, since they can be CPFP'd. + if (p2p_inv && AlreadyHaveTx(gtxid, /*include_reconsiderable=*/true)) return true; + + auto it = m_peer_info.find(peer); + if (it == m_peer_info.end()) return false; + const auto& info = it->second.m_connection_info; + if (!info.m_relay_permissions && m_txrequest.Count(peer) >= MAX_PEER_TX_ANNOUNCEMENTS) { + // Too many queued announcements for this peer + return false; + } + // Decide the TxRequestTracker parameters for this announcement: + // - "preferred": if fPreferredDownload is set (= outbound, or NetPermissionFlags::NoBan permission) + // - "reqtime": current time plus delays for: + // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections + // - TXID_RELAY_DELAY for txid announcements while wtxid peers are available + // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least + // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have NetPermissionFlags::Relay). + auto delay{0us}; + if (!info.m_preferred) delay += NONPREF_PEER_TX_DELAY; + if (!gtxid.IsWtxid() && m_num_wtxid_peers > 0) delay += TXID_RELAY_DELAY; + const bool overloaded = !info.m_relay_permissions && m_txrequest.CountInFlight(peer) >= MAX_PEER_TX_REQUEST_IN_FLIGHT; + if (overloaded) delay += OVERLOADED_PEER_TX_DELAY; + + m_txrequest.ReceivedInv(peer, gtxid, info.m_preferred, now + delay); + + return false; +} + +std::vector TxDownloadManagerImpl::GetRequestsToSend(NodeId nodeid, std::chrono::microseconds current_time) +{ + std::vector requests; + std::vector> expired; + auto requestable = m_txrequest.GetRequestable(nodeid, current_time, &expired); + for (const auto& entry : expired) { + LogDebug(BCLog::NET, "timeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "wtx" : "tx", + entry.second.GetHash().ToString(), entry.first); + } + for (const GenTxid& gtxid : requestable) { + if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) { + LogDebug(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx", + gtxid.GetHash().ToString(), nodeid); + requests.emplace_back(gtxid); + m_txrequest.RequestedTx(nodeid, gtxid.GetHash(), current_time + GETDATA_TX_INTERVAL); + } else { + // We have already seen this transaction, no need to download. This is just a belt-and-suspenders, as + // this should already be called whenever a transaction becomes AlreadyHaveTx(). + m_txrequest.ForgetTxHash(gtxid.GetHash()); + } + } + return requests; +} + +void TxDownloadManagerImpl::ReceivedNotFound(NodeId nodeid, const std::vector& txhashes) +{ + for (const auto& txhash : txhashes) { + // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as + // completed in TxRequestTracker. + m_txrequest.ReceivedResponse(nodeid, txhash); + } +} + +std::optional TxDownloadManagerImpl::Find1P1CPackage(const CTransactionRef& ptx, NodeId nodeid) +{ + const auto& parent_wtxid{ptx->GetWitnessHash()}; + + Assume(RecentRejectsReconsiderableFilter().contains(parent_wtxid.ToUint256())); + + // Prefer children from this peer. This helps prevent censorship attempts in which an attacker + // sends lots of fake children for the parent, and we (unluckily) keep selecting the fake + // children instead of the real one provided by the honest peer. + const auto cpfp_candidates_same_peer{m_orphanage.GetChildrenFromSamePeer(ptx, nodeid)}; + + // These children should be sorted from newest to oldest. In the (probably uncommon) case + // of children that replace each other, this helps us accept the highest feerate (probably the + // most recent) one efficiently. + for (const auto& child : cpfp_candidates_same_peer) { + Package maybe_cpfp_package{ptx, child}; + if (!RecentRejectsReconsiderableFilter().contains(GetPackageHash(maybe_cpfp_package)) && + !RecentRejectsFilter().contains(child->GetHash().ToUint256())) { + return PackageToValidate{ptx, child, nodeid, nodeid}; + } + } + + // If no suitable candidate from the same peer is found, also try children that were provided by + // a different peer. This is useful because sometimes multiple peers announce both transactions + // to us, and we happen to download them from different peers (we wouldn't have known that these + // 2 transactions are related). We still want to find 1p1c packages then. + // + // If we start tracking all announcers of orphans, we can restrict this logic to parent + child + // pairs in which both were provided by the same peer, i.e. delete this step. + const auto cpfp_candidates_different_peer{m_orphanage.GetChildrenFromDifferentPeer(ptx, nodeid)}; + + // Find the first 1p1c that hasn't already been rejected. We randomize the order to not + // create a bias that attackers can use to delay package acceptance. + // + // Create a random permutation of the indices. + std::vector tx_indices(cpfp_candidates_different_peer.size()); + std::iota(tx_indices.begin(), tx_indices.end(), 0); + std::shuffle(tx_indices.begin(), tx_indices.end(), m_opts.m_rng); + + for (const auto index : tx_indices) { + // If we already tried a package and failed for any reason, the combined hash was + // cached in m_lazy_recent_rejects_reconsiderable. + const auto [child_tx, child_sender] = cpfp_candidates_different_peer.at(index); + Package maybe_cpfp_package{ptx, child_tx}; + if (!RecentRejectsReconsiderableFilter().contains(GetPackageHash(maybe_cpfp_package)) && + !RecentRejectsFilter().contains(child_tx->GetHash().ToUint256())) { + return PackageToValidate{ptx, child_tx, nodeid, child_sender}; + } + } + return std::nullopt; +} + +void TxDownloadManagerImpl::MempoolAcceptedTx(const CTransactionRef& tx) +{ + // As this version of the transaction was acceptable, we can forget about any requests for it. + // No-op if the tx is not in txrequest. + m_txrequest.ForgetTxHash(tx->GetHash()); + m_txrequest.ForgetTxHash(tx->GetWitnessHash()); + + m_orphanage.AddChildrenToWorkSet(*tx); + // If it came from the orphanage, remove it. No-op if the tx is not in txorphanage. + m_orphanage.EraseTx(tx->GetWitnessHash()); +} + +node::RejectedTxTodo TxDownloadManagerImpl::MempoolRejectedTx(const CTransactionRef& ptx, const TxValidationState& state, NodeId nodeid, bool first_time_failure) +{ + const CTransaction& tx{*ptx}; + // Results returned to caller + // Whether we should call AddToCompactExtraTransactions at the end + bool add_extra_compact_tx{first_time_failure}; + // Hashes to pass to AddKnownTx later + std::vector unique_parents; + // Populated if failure is reconsiderable and eligible package is found. + std::optional package_to_validate; + + if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { + // Only process a new orphan if this is a first time failure, as otherwise it must be either + // already in orphanage or from 1p1c processing. + if (first_time_failure && !RecentRejectsFilter().contains(ptx->GetWitnessHash().ToUint256())) { + bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected + + // Deduplicate parent txids, so that we don't have to loop over + // the same parent txid more than once down below. + unique_parents.reserve(tx.vin.size()); + for (const CTxIn& txin : tx.vin) { + // We start with all parents, and then remove duplicates below. + unique_parents.push_back(txin.prevout.hash); + } + std::sort(unique_parents.begin(), unique_parents.end()); + unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end()); + + // Distinguish between parents in m_lazy_recent_rejects and m_lazy_recent_rejects_reconsiderable. + // We can tolerate having up to 1 parent in m_lazy_recent_rejects_reconsiderable since we + // submit 1p1c packages. However, fail immediately if any are in m_lazy_recent_rejects. + std::optional rejected_parent_reconsiderable; + for (const uint256& parent_txid : unique_parents) { + if (RecentRejectsFilter().contains(parent_txid)) { + fRejectedParents = true; + break; + } else if (RecentRejectsReconsiderableFilter().contains(parent_txid) && + !m_opts.m_mempool.exists(GenTxid::Txid(parent_txid))) { + // More than 1 parent in m_lazy_recent_rejects_reconsiderable: 1p1c will not be + // sufficient to accept this package, so just give up here. + if (rejected_parent_reconsiderable.has_value()) { + fRejectedParents = true; + break; + } + rejected_parent_reconsiderable = parent_txid; + } + } + if (!fRejectedParents) { + const auto current_time{GetTime()}; + + for (const uint256& parent_txid : unique_parents) { + // Here, we only have the txid (and not wtxid) of the + // inputs, so we only request in txid mode, even for + // wtxidrelay peers. + // Eventually we should replace this with an improved + // protocol for getting all unconfirmed parents. + const auto gtxid{GenTxid::Txid(parent_txid)}; + // Exclude m_lazy_recent_rejects_reconsiderable: the missing parent may have been + // previously rejected for being too low feerate. This orphan might CPFP it. + if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) { + AddTxAnnouncement(nodeid, gtxid, current_time, /*p2p_inv=*/false); + } + } + + // Potentially flip add_extra_compact_tx to false if AddTx returns false because the tx was already there + add_extra_compact_tx &= m_orphanage.AddTx(ptx, nodeid); + + // Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore. + m_txrequest.ForgetTxHash(tx.GetHash()); + m_txrequest.ForgetTxHash(tx.GetWitnessHash()); + + // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789) + m_orphanage.LimitOrphans(m_opts.m_max_orphan_txs, m_opts.m_rng); + } else { + unique_parents.clear(); + LogDebug(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s (wtxid=%s)\n", + tx.GetHash().ToString(), + tx.GetWitnessHash().ToString()); + // We will continue to reject this tx since it has rejected + // parents so avoid re-requesting it from other peers. + // Here we add both the txid and the wtxid, as we know that + // regardless of what witness is provided, we will not accept + // this, so we don't need to allow for redownload of this txid + // from any of our non-wtxidrelay peers. + RecentRejectsFilter().insert(tx.GetHash().ToUint256()); + RecentRejectsFilter().insert(tx.GetWitnessHash().ToUint256()); + m_txrequest.ForgetTxHash(tx.GetHash()); + m_txrequest.ForgetTxHash(tx.GetWitnessHash()); + } + } + } else if (state.GetResult() == TxValidationResult::TX_WITNESS_STRIPPED) { + add_extra_compact_tx = false; + } else { + // We can add the wtxid of this transaction to our reject filter. + // Do not add txids of witness transactions or witness-stripped + // transactions to the filter, as they can have been malleated; + // adding such txids to the reject filter would potentially + // interfere with relay of valid transactions from peers that + // do not support wtxid-based relay. See + // https://github.com/bitcoin/bitcoin/issues/8279 for details. + // We can remove this restriction (and always add wtxids to + // the filter even for witness stripped transactions) once + // wtxid-based relay is broadly deployed. + // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034 + // for concerns around weakening security of unupgraded nodes + // if we start doing this too early. + if (state.GetResult() == TxValidationResult::TX_RECONSIDERABLE) { + // If the result is TX_RECONSIDERABLE, add it to m_lazy_recent_rejects_reconsiderable + // because we should not download or submit this transaction by itself again, but may + // submit it as part of a package later. + RecentRejectsReconsiderableFilter().insert(ptx->GetWitnessHash().ToUint256()); + + if (first_time_failure) { + // When a transaction fails for TX_RECONSIDERABLE, look for a matching child in the + // orphanage, as it is possible that they succeed as a package. + LogDebug(BCLog::TXPACKAGES, "tx %s (wtxid=%s) failed but reconsiderable, looking for child in orphanage\n", + ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString()); + package_to_validate = Find1P1CPackage(ptx, nodeid); + } + } else { + RecentRejectsFilter().insert(ptx->GetWitnessHash().ToUint256()); + } + m_txrequest.ForgetTxHash(ptx->GetWitnessHash()); + // If the transaction failed for TX_INPUTS_NOT_STANDARD, + // then we know that the witness was irrelevant to the policy + // failure, since this check depends only on the txid + // (the scriptPubKey being spent is covered by the txid). + // Add the txid to the reject filter to prevent repeated + // processing of this transaction in the event that child + // transactions are later received (resulting in + // parent-fetching by txid via the orphan-handling logic). + // We only add the txid if it differs from the wtxid, to avoid wasting entries in the + // rolling bloom filter. + if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && ptx->HasWitness()) { + RecentRejectsFilter().insert(ptx->GetHash().ToUint256()); + m_txrequest.ForgetTxHash(ptx->GetHash()); + } + } + + // If the tx failed in ProcessOrphanTx, it should be removed from the orphanage unless the + // tx was still missing inputs. If the tx was not in the orphanage, EraseTx does nothing and returns 0. + if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS && m_orphanage.EraseTx(ptx->GetWitnessHash()) > 0) { + LogDebug(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString()); + } + + return RejectedTxTodo{ + .m_should_add_extra_compact_tx = add_extra_compact_tx, + .m_unique_parents = std::move(unique_parents), + .m_package_to_validate = std::move(package_to_validate) + }; +} + +void TxDownloadManagerImpl::MempoolRejectedPackage(const Package& package) +{ + RecentRejectsReconsiderableFilter().insert(GetPackageHash(package)); +} + +std::pair> TxDownloadManagerImpl::ReceivedTx(NodeId nodeid, const CTransactionRef& ptx) +{ + const uint256& txid = ptx->GetHash(); + const uint256& wtxid = ptx->GetWitnessHash(); + + // Mark that we have received a response + m_txrequest.ReceivedResponse(nodeid, txid); + if (ptx->HasWitness()) m_txrequest.ReceivedResponse(nodeid, wtxid); + + // First check if we should drop this tx. + // We do the AlreadyHaveTx() check using wtxid, rather than txid - in the + // absence of witness malleation, this is strictly better, because the + // recent rejects filter may contain the wtxid but rarely contains + // the txid of a segwit transaction that has been rejected. + // In the presence of witness malleation, it's possible that by only + // doing the check with wtxid, we could overlook a transaction which + // was confirmed with a different witness, or exists in our mempool + // with a different witness, but this has limited downside: + // mempool validation does its own lookup of whether we have the txid + // already; and an adversary can already relay us old transactions + // (older than our recency filter) if trying to DoS us, without any need + // for witness malleation. + if (AlreadyHaveTx(GenTxid::Wtxid(wtxid), /*include_reconsiderable=*/false)) { + // If a tx is detected by m_lazy_recent_rejects it is ignored. Because we haven't + // submitted the tx to our mempool, we won't have computed a DoS + // score for it or determined exactly why we consider it invalid. + // + // This means we won't penalize any peer subsequently relaying a DoSy + // tx (even if we penalized the first peer who gave it to us) because + // we have to account for m_lazy_recent_rejects showing false positives. In + // other words, we shouldn't penalize a peer if we aren't *sure* they + // submitted a DoSy tx. + // + // Note that m_lazy_recent_rejects doesn't just record DoSy or invalid + // transactions, but any tx not accepted by the mempool, which may be + // due to node policy (vs. consensus). So we can't blanket penalize a + // peer simply for relaying a tx that our m_lazy_recent_rejects has caught, + // regardless of false positives. + return {false, std::nullopt}; + } else if (RecentRejectsReconsiderableFilter().contains(wtxid)) { + // When a transaction is already in m_lazy_recent_rejects_reconsiderable, we shouldn't submit + // it by itself again. However, look for a matching child in the orphanage, as it is + // possible that they succeed as a package. + LogDebug(BCLog::TXPACKAGES, "found tx %s (wtxid=%s) in reconsiderable rejects, looking for child in orphanage\n", + txid.ToString(), wtxid.ToString()); + return {false, Find1P1CPackage(ptx, nodeid)}; + } + + + return {true, std::nullopt}; +} + +bool TxDownloadManagerImpl::HaveMoreWork(NodeId nodeid) +{ + return m_orphanage.HaveTxToReconsider(nodeid); +} + +CTransactionRef TxDownloadManagerImpl::GetTxToReconsider(NodeId nodeid) +{ + return m_orphanage.GetTxToReconsider(nodeid); +} + +void TxDownloadManagerImpl::CheckIsEmpty(NodeId nodeid) +{ + assert(m_txrequest.Count(nodeid) == 0); +} +void TxDownloadManagerImpl::CheckIsEmpty() +{ + assert(m_orphanage.Size() == 0); + assert(m_txrequest.Size() == 0); + assert(m_num_wtxid_peers == 0); +} +std::vector TxDownloadManagerImpl::GetOrphanTransactions() const +{ + return m_orphanage.GetOrphanTransactions(); +} +} // namespace node diff --git a/src/node/txdownloadman_impl.h b/src/node/txdownloadman_impl.h new file mode 100644 index 0000000000..48f02e607a --- /dev/null +++ b/src/node/txdownloadman_impl.h @@ -0,0 +1,192 @@ +// Copyright (c) 2024 +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +#ifndef BITCOIN_NODE_TXDOWNLOADMAN_IMPL_H +#define BITCOIN_NODE_TXDOWNLOADMAN_IMPL_H + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +class CTxMemPool; +namespace node { +class TxDownloadManagerImpl { +public: + TxDownloadOptions m_opts; + + /** Manages unvalidated tx data (orphan transactions for which we are downloading ancestors). */ + TxOrphanage m_orphanage; + /** Tracks candidates for requesting and downloading transaction data. */ + TxRequestTracker m_txrequest; + + /** + * Filter for transactions that were recently rejected by the mempool. + * These are not rerequested until the chain tip changes, at which point + * the entire filter is reset. + * + * Without this filter we'd be re-requesting txs from each of our peers, + * increasing bandwidth consumption considerably. For instance, with 100 + * peers, half of which relay a tx we don't accept, that might be a 50x + * bandwidth increase. A flooding attacker attempting to roll-over the + * filter using minimum-sized, 60byte, transactions might manage to send + * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a + * two minute window to send invs to us. + * + * Decreasing the false positive rate is fairly cheap, so we pick one in a + * million to make it highly unlikely for users to have issues with this + * filter. + * + * We typically only add wtxids to this filter. For non-segwit + * transactions, the txid == wtxid, so this only prevents us from + * re-downloading non-segwit transactions when communicating with + * non-wtxidrelay peers -- which is important for avoiding malleation + * attacks that could otherwise interfere with transaction relay from + * non-wtxidrelay peers. For communicating with wtxidrelay peers, having + * the reject filter store wtxids is exactly what we want to avoid + * redownload of a rejected transaction. + * + * In cases where we can tell that a segwit transaction will fail + * validation no matter the witness, we may add the txid of such + * transaction to the filter as well. This can be helpful when + * communicating with txid-relay peers or if we were to otherwise fetch a + * transaction via txid (eg in our orphan handling). + * + * Memory used: 1.3 MB + */ + std::unique_ptr m_lazy_recent_rejects{nullptr}; + + CRollingBloomFilter& RecentRejectsFilter() + { + if (!m_lazy_recent_rejects) { + m_lazy_recent_rejects = std::make_unique(120'000, 0.000'001); + } + + return *m_lazy_recent_rejects; + } + + /** + * Filter for: + * (1) wtxids of transactions that were recently rejected by the mempool but are + * eligible for reconsideration if submitted with other transactions. + * (2) packages (see GetPackageHash) we have already rejected before and should not retry. + * + * Similar to m_lazy_recent_rejects, this filter is used to save bandwidth when e.g. all of our peers + * have larger mempools and thus lower minimum feerates than us. + * + * When a transaction's error is TxValidationResult::TX_RECONSIDERABLE (in a package or by + * itself), add its wtxid to this filter. When a package fails for any reason, add the combined + * hash to this filter. + * + * Upon receiving an announcement for a transaction, if it exists in this filter, do not + * download the txdata. When considering packages, if it exists in this filter, drop it. + * + * Reset this filter when the chain tip changes. + * + * Parameters are picked to be the same as m_lazy_recent_rejects, with the same rationale. + */ + std::unique_ptr m_lazy_recent_rejects_reconsiderable{nullptr}; + + CRollingBloomFilter& RecentRejectsReconsiderableFilter() + { + if (!m_lazy_recent_rejects_reconsiderable) { + m_lazy_recent_rejects_reconsiderable = std::make_unique(120'000, 0.000'001); + } + + return *m_lazy_recent_rejects_reconsiderable; + } + + /* + * Filter for transactions that have been recently confirmed. + * We use this to avoid requesting transactions that have already been + * confirmed. + * + * Blocks don't typically have more than 4000 transactions, so this should + * be at least six blocks (~1 hr) worth of transactions that we can store, + * inserting both a txid and wtxid for every observed transaction. + * If the number of transactions appearing in a block goes up, or if we are + * seeing getdata requests more than an hour after initial announcement, we + * can increase this number. + * The false positive rate of 1/1M should come out to less than 1 + * transaction per day that would be inadvertently ignored (which is the + * same probability that we have in the reject filter). + */ + std::unique_ptr m_lazy_recent_confirmed_transactions{nullptr}; + + CRollingBloomFilter& RecentConfirmedTransactionsFilter() + { + if (!m_lazy_recent_confirmed_transactions) { + m_lazy_recent_confirmed_transactions = std::make_unique(48'000, 0.000'001); + } + + return *m_lazy_recent_confirmed_transactions; + } + + TxDownloadManagerImpl(const TxDownloadOptions& options) : m_opts{options}, m_txrequest{options.m_deterministic_txrequest} {} + + struct PeerInfo { + /** Information relevant to scheduling tx requests. */ + const TxDownloadConnectionInfo m_connection_info; + + PeerInfo(const TxDownloadConnectionInfo& info) : m_connection_info{info} {} + }; + + /** Information for all of the peers we may download transactions from. This is not necessarily + * all peers we are connected to (no block-relay-only and temporary connections). */ + std::map m_peer_info; + + /** Number of wtxid relay peers we have in m_peer_info. */ + uint32_t m_num_wtxid_peers{0}; + + void ActiveTipChange(); + void BlockConnected(const std::shared_ptr& pblock); + void BlockDisconnected(); + + /** Check whether we already have this gtxid in: + * - mempool + * - orphanage + * - m_recent_rejects + * - m_recent_rejects_reconsiderable (if include_reconsiderable = true) + * - m_recent_confirmed_transactions + * */ + bool AlreadyHaveTx(const GenTxid& gtxid, bool include_reconsiderable); + + void ConnectedPeer(NodeId nodeid, const TxDownloadConnectionInfo& info); + void DisconnectedPeer(NodeId nodeid); + + /** New inv has been received. May be added as a candidate to txrequest. */ + bool AddTxAnnouncement(NodeId peer, const GenTxid& gtxid, std::chrono::microseconds now, bool p2p_inv); + + /** Get getdata requests to send. */ + std::vector GetRequestsToSend(NodeId nodeid, std::chrono::microseconds current_time); + + /** Marks a tx as ReceivedResponse in txrequest. */ + void ReceivedNotFound(NodeId nodeid, const std::vector& txhashes); + + /** Look for a child of this transaction in the orphanage to form a 1-parent-1-child package, + * skipping any combinations that have already been tried. Return the resulting package along with + * the senders of its respective transactions, or std::nullopt if no package is found. */ + std::optional Find1P1CPackage(const CTransactionRef& ptx, NodeId nodeid); + + void MempoolAcceptedTx(const CTransactionRef& tx); + RejectedTxTodo MempoolRejectedTx(const CTransactionRef& ptx, const TxValidationState& state, NodeId nodeid, bool first_time_failure); + void MempoolRejectedPackage(const Package& package); + + std::pair> ReceivedTx(NodeId nodeid, const CTransactionRef& ptx); + + bool HaveMoreWork(NodeId nodeid); + CTransactionRef GetTxToReconsider(NodeId nodeid); + + void CheckIsEmpty(); + void CheckIsEmpty(NodeId nodeid); + + std::vector GetOrphanTransactions() const; +}; +} // namespace node +#endif // BITCOIN_NODE_TXDOWNLOADMAN_IMPL_H diff --git a/src/test/CMakeLists.txt b/src/test/CMakeLists.txt index c23fbae92f..c376c1905a 100644 --- a/src/test/CMakeLists.txt +++ b/src/test/CMakeLists.txt @@ -125,6 +125,7 @@ add_executable(test_bitcoin torcontrol_tests.cpp transaction_tests.cpp translation_tests.cpp + txdownload_tests.cpp txindex_tests.cpp txpackage_tests.cpp txreconciliation_tests.cpp diff --git a/src/test/fuzz/CMakeLists.txt b/src/test/fuzz/CMakeLists.txt index 1c7b0d5c25..2d5f93b4f1 100644 --- a/src/test/fuzz/CMakeLists.txt +++ b/src/test/fuzz/CMakeLists.txt @@ -117,6 +117,7 @@ add_executable(fuzz timeoffsets.cpp torcontrol.cpp transaction.cpp + txdownloadman.cpp tx_in.cpp tx_out.cpp tx_pool.cpp diff --git a/src/test/fuzz/txdownloadman.cpp b/src/test/fuzz/txdownloadman.cpp new file mode 100644 index 0000000000..eb903bb470 --- /dev/null +++ b/src/test/fuzz/txdownloadman.cpp @@ -0,0 +1,445 @@ +// Copyright (c) 2023 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace { + +const TestingSetup* g_setup; + +constexpr size_t NUM_COINS{50}; +COutPoint COINS[NUM_COINS]; + +static TxValidationResult TESTED_TX_RESULTS[] = { + // Skip TX_RESULT_UNSET + TxValidationResult::TX_CONSENSUS, + TxValidationResult::TX_RECENT_CONSENSUS_CHANGE, + TxValidationResult::TX_INPUTS_NOT_STANDARD, + TxValidationResult::TX_NOT_STANDARD, + TxValidationResult::TX_MISSING_INPUTS, + TxValidationResult::TX_PREMATURE_SPEND, + TxValidationResult::TX_WITNESS_MUTATED, + TxValidationResult::TX_WITNESS_STRIPPED, + TxValidationResult::TX_CONFLICT, + TxValidationResult::TX_MEMPOOL_POLICY, + // Skip TX_NO_MEMPOOL + TxValidationResult::TX_RECONSIDERABLE, + TxValidationResult::TX_UNKNOWN, +}; + +// Precomputed transactions. Some may conflict with each other. +std::vector TRANSACTIONS; + +// Limit the total number of peers because we don't expect coverage to change much with lots more peers. +constexpr int NUM_PEERS = 16; + +// Precomputed random durations (positive and negative, each ~exponentially distributed). +std::chrono::microseconds TIME_SKIPS[128]; + +static CTransactionRef MakeTransactionSpending(const std::vector& outpoints, size_t num_outputs, bool add_witness) +{ + CMutableTransaction tx; + // If no outpoints are given, create a random one. + for (const auto& outpoint : outpoints) { + tx.vin.emplace_back(outpoint); + } + if (add_witness) { + tx.vin[0].scriptWitness.stack.push_back({1}); + } + for (size_t o = 0; o < num_outputs; ++o) tx.vout.emplace_back(CENT, P2WSH_OP_TRUE); + return MakeTransactionRef(tx); +} +static std::vector PickCoins(FuzzedDataProvider& fuzzed_data_provider) +{ + std::vector ret; + ret.push_back(fuzzed_data_provider.PickValueInArray(COINS)); + LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10) { + ret.push_back(fuzzed_data_provider.PickValueInArray(COINS)); + } + return ret; +} + +void initialize() +{ + static const auto testing_setup = MakeNoLogFileContext(); + g_setup = testing_setup.get(); + for (uint32_t i = 0; i < uint32_t{NUM_COINS}; ++i) { + COINS[i] = COutPoint{Txid::FromUint256((HashWriter() << i).GetHash()), i}; + } + size_t outpoints_index = 0; + // 2 transactions same txid different witness + { + auto tx1{MakeTransactionSpending({COINS[outpoints_index]}, /*num_outputs=*/5, /*add_witness=*/false)}; + auto tx2{MakeTransactionSpending({COINS[outpoints_index]}, /*num_outputs=*/5, /*add_witness=*/true)}; + Assert(tx1->GetHash() == tx2->GetHash()); + TRANSACTIONS.emplace_back(tx1); + TRANSACTIONS.emplace_back(tx2); + outpoints_index += 1; + } + // 2 parents 1 child + { + auto tx_parent_1{MakeTransactionSpending({COINS[outpoints_index++]}, /*num_outputs=*/1, /*add_witness=*/true)}; + TRANSACTIONS.emplace_back(tx_parent_1); + auto tx_parent_2{MakeTransactionSpending({COINS[outpoints_index++]}, /*num_outputs=*/1, /*add_witness=*/false)}; + TRANSACTIONS.emplace_back(tx_parent_2); + TRANSACTIONS.emplace_back(MakeTransactionSpending({COutPoint{tx_parent_1->GetHash(), 0}, COutPoint{tx_parent_2->GetHash(), 0}}, + /*num_outputs=*/1, /*add_witness=*/true)); + } + // 1 parent 2 children + { + auto tx_parent{MakeTransactionSpending({COINS[outpoints_index++]}, /*num_outputs=*/2, /*add_witness=*/true)}; + TRANSACTIONS.emplace_back(tx_parent); + TRANSACTIONS.emplace_back(MakeTransactionSpending({COutPoint{tx_parent->GetHash(), 0}}, + /*num_outputs=*/1, /*add_witness=*/true)); + TRANSACTIONS.emplace_back(MakeTransactionSpending({COutPoint{tx_parent->GetHash(), 1}}, + /*num_outputs=*/1, /*add_witness=*/true)); + } + // chain of 5 segwit + { + COutPoint& last_outpoint = COINS[outpoints_index++]; + for (auto i{0}; i < 5; ++i) { + auto tx{MakeTransactionSpending({last_outpoint}, /*num_outputs=*/1, /*add_witness=*/true)}; + TRANSACTIONS.emplace_back(tx); + last_outpoint = COutPoint{tx->GetHash(), 0}; + } + } + // chain of 5 non-segwit + { + COutPoint& last_outpoint = COINS[outpoints_index++]; + for (auto i{0}; i < 5; ++i) { + auto tx{MakeTransactionSpending({last_outpoint}, /*num_outputs=*/1, /*add_witness=*/false)}; + TRANSACTIONS.emplace_back(tx); + last_outpoint = COutPoint{tx->GetHash(), 0}; + } + } + // Also create a loose tx for each outpoint. Some of these transactions conflict with the above + // or have the same txid. + for (const auto& outpoint : COINS) { + TRANSACTIONS.emplace_back(MakeTransactionSpending({outpoint}, /*num_outputs=*/1, /*add_witness=*/true)); + } + + // Create random-looking time jumps + int i = 0; + // TIME_SKIPS[N] for N=0..15 is just N microseconds. + for (; i < 16; ++i) { + TIME_SKIPS[i] = std::chrono::microseconds{i}; + } + // TIME_SKIPS[N] for N=16..127 has randomly-looking but roughly exponentially increasing values up to + // 198.416453 seconds. + for (; i < 128; ++i) { + int diff_bits = ((i - 10) * 2) / 9; + uint64_t diff = 1 + (CSipHasher(0, 0).Write(i).Finalize() >> (64 - diff_bits)); + TIME_SKIPS[i] = TIME_SKIPS[i - 1] + std::chrono::microseconds{diff}; + } +} + +void CheckPackageToValidate(const node::PackageToValidate& package_to_validate, NodeId peer) +{ + Assert(package_to_validate.m_senders.size() == 2); + Assert(package_to_validate.m_senders.front() == peer); + Assert(package_to_validate.m_senders.back() < NUM_PEERS); + + // Package is a 1p1c + const auto& package = package_to_validate.m_txns; + Assert(IsChildWithParents(package)); + Assert(package.size() == 2); +} + +FUZZ_TARGET(txdownloadman, .init = initialize) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + + // Initialize txdownloadman + bilingual_str error; + CTxMemPool pool{MemPoolOptionsForTest(g_setup->m_node), error}; + const auto max_orphan_count = fuzzed_data_provider.ConsumeIntegralInRange(0, 300); + FastRandomContext det_rand{true}; + node::TxDownloadManager txdownloadman{node::TxDownloadOptions{pool, det_rand, max_orphan_count, true}}; + + std::chrono::microseconds time{244466666}; + + LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000) + { + NodeId rand_peer = fuzzed_data_provider.ConsumeIntegralInRange(0, NUM_PEERS - 1); + + // Transaction can be one of the premade ones or a randomly generated one + auto rand_tx = fuzzed_data_provider.ConsumeBool() ? + MakeTransactionSpending(PickCoins(fuzzed_data_provider), + /*num_outputs=*/fuzzed_data_provider.ConsumeIntegralInRange(1, 500), + /*add_witness=*/fuzzed_data_provider.ConsumeBool()) : + TRANSACTIONS.at(fuzzed_data_provider.ConsumeIntegralInRange(0, TRANSACTIONS.size() - 1)); + + CallOneOf( + fuzzed_data_provider, + [&] { + node::TxDownloadConnectionInfo info{ + .m_preferred = fuzzed_data_provider.ConsumeBool(), + .m_relay_permissions = fuzzed_data_provider.ConsumeBool(), + .m_wtxid_relay = fuzzed_data_provider.ConsumeBool() + }; + txdownloadman.ConnectedPeer(rand_peer, info); + }, + [&] { + txdownloadman.DisconnectedPeer(rand_peer); + txdownloadman.CheckIsEmpty(rand_peer); + }, + [&] { + txdownloadman.ActiveTipChange(); + }, + [&] { + CBlock block; + block.vtx.push_back(rand_tx); + txdownloadman.BlockConnected(std::make_shared(block)); + }, + [&] { + txdownloadman.BlockDisconnected(); + }, + [&] { + txdownloadman.MempoolAcceptedTx(rand_tx); + }, + [&] { + TxValidationState state; + state.Invalid(fuzzed_data_provider.PickValueInArray(TESTED_TX_RESULTS), ""); + bool first_time_failure{fuzzed_data_provider.ConsumeBool()}; + + node::RejectedTxTodo todo = txdownloadman.MempoolRejectedTx(rand_tx, state, rand_peer, first_time_failure); + Assert(first_time_failure || !todo.m_should_add_extra_compact_tx); + }, + [&] { + GenTxid gtxid = fuzzed_data_provider.ConsumeBool() ? + GenTxid::Txid(rand_tx->GetHash()) : + GenTxid::Wtxid(rand_tx->GetWitnessHash()); + txdownloadman.AddTxAnnouncement(rand_peer, gtxid, time, /*p2p_inv=*/fuzzed_data_provider.ConsumeBool()); + }, + [&] { + txdownloadman.GetRequestsToSend(rand_peer, time); + }, + [&] { + txdownloadman.ReceivedTx(rand_peer, rand_tx); + const auto& [should_validate, maybe_package] = txdownloadman.ReceivedTx(rand_peer, rand_tx); + // The only possible results should be: + // - Don't validate the tx, no package. + // - Don't validate the tx, package. + // - Validate the tx, no package. + // The only combination that doesn't make sense is validate both tx and package. + Assert(!(should_validate && maybe_package.has_value())); + if (maybe_package.has_value()) CheckPackageToValidate(*maybe_package, rand_peer); + }, + [&] { + txdownloadman.ReceivedNotFound(rand_peer, {rand_tx->GetWitnessHash()}); + }, + [&] { + const bool expect_work{txdownloadman.HaveMoreWork(rand_peer)}; + const auto ptx = txdownloadman.GetTxToReconsider(rand_peer); + // expect_work=true doesn't necessarily mean the next item from the workset isn't a + // nullptr, as the transaction could have been removed from orphanage without being + // removed from the peer's workset. + if (ptx) { + // However, if there was a non-null tx in the workset, HaveMoreWork should have + // returned true. + Assert(expect_work); + } + } + ); + // Jump forwards or backwards + auto time_skip = fuzzed_data_provider.PickValueInArray(TIME_SKIPS); + if (fuzzed_data_provider.ConsumeBool()) time_skip *= -1; + time += time_skip; + } + // Disconnect everybody, check that all data structures are empty. + for (NodeId nodeid = 0; nodeid < NUM_PEERS; ++nodeid) { + txdownloadman.DisconnectedPeer(nodeid); + txdownloadman.CheckIsEmpty(nodeid); + } + txdownloadman.CheckIsEmpty(); +} + +// Give node 0 relay permissions, and nobody else. This helps us remember who is a RelayPermissions +// peer without tracking anything (this is only for the txdownload_impl target). +static bool HasRelayPermissions(NodeId peer) { return peer == 0; } + +static void CheckInvariants(const node::TxDownloadManagerImpl& txdownload_impl, size_t max_orphan_count) +{ + const TxOrphanage& orphanage = txdownload_impl.m_orphanage; + + // Orphanage usage should never exceed what is allowed + Assert(orphanage.Size() <= max_orphan_count); + + // We should never have more than the maximum in-flight requests out for a peer. + for (NodeId peer = 0; peer < NUM_PEERS; ++peer) { + if (!HasRelayPermissions(peer)) { + Assert(txdownload_impl.m_txrequest.CountInFlight(peer) <= node::MAX_PEER_TX_REQUEST_IN_FLIGHT); + } + } + txdownload_impl.m_txrequest.SanityCheck(); +} + +FUZZ_TARGET(txdownloadman_impl, .init = initialize) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + + // Initialize a TxDownloadManagerImpl + bilingual_str error; + CTxMemPool pool{MemPoolOptionsForTest(g_setup->m_node), error}; + const auto max_orphan_count = fuzzed_data_provider.ConsumeIntegralInRange(0, 300); + FastRandomContext det_rand{true}; + node::TxDownloadManagerImpl txdownload_impl{node::TxDownloadOptions{pool, det_rand, max_orphan_count, true}}; + + std::chrono::microseconds time{244466666}; + + LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000) + { + NodeId rand_peer = fuzzed_data_provider.ConsumeIntegralInRange(0, NUM_PEERS - 1); + + // Transaction can be one of the premade ones or a randomly generated one + auto rand_tx = fuzzed_data_provider.ConsumeBool() ? + MakeTransactionSpending(PickCoins(fuzzed_data_provider), + /*num_outputs=*/fuzzed_data_provider.ConsumeIntegralInRange(1, 500), + /*add_witness=*/fuzzed_data_provider.ConsumeBool()) : + TRANSACTIONS.at(fuzzed_data_provider.ConsumeIntegralInRange(0, TRANSACTIONS.size() - 1)); + + CallOneOf( + fuzzed_data_provider, + [&] { + node::TxDownloadConnectionInfo info{ + .m_preferred = fuzzed_data_provider.ConsumeBool(), + .m_relay_permissions = HasRelayPermissions(rand_peer), + .m_wtxid_relay = fuzzed_data_provider.ConsumeBool() + }; + txdownload_impl.ConnectedPeer(rand_peer, info); + }, + [&] { + txdownload_impl.DisconnectedPeer(rand_peer); + txdownload_impl.CheckIsEmpty(rand_peer); + }, + [&] { + txdownload_impl.ActiveTipChange(); + // After a block update, nothing should be in the rejection caches + for (const auto& tx : TRANSACTIONS) { + Assert(!txdownload_impl.RecentRejectsFilter().contains(tx->GetWitnessHash().ToUint256())); + Assert(!txdownload_impl.RecentRejectsFilter().contains(tx->GetHash().ToUint256())); + Assert(!txdownload_impl.RecentRejectsReconsiderableFilter().contains(tx->GetWitnessHash().ToUint256())); + Assert(!txdownload_impl.RecentRejectsReconsiderableFilter().contains(tx->GetHash().ToUint256())); + } + }, + [&] { + CBlock block; + block.vtx.push_back(rand_tx); + txdownload_impl.BlockConnected(std::make_shared(block)); + // Block transactions must be removed from orphanage + Assert(!txdownload_impl.m_orphanage.HaveTx(rand_tx->GetWitnessHash())); + }, + [&] { + txdownload_impl.BlockDisconnected(); + Assert(!txdownload_impl.RecentConfirmedTransactionsFilter().contains(rand_tx->GetWitnessHash().ToUint256())); + Assert(!txdownload_impl.RecentConfirmedTransactionsFilter().contains(rand_tx->GetHash().ToUint256())); + }, + [&] { + txdownload_impl.MempoolAcceptedTx(rand_tx); + }, + [&] { + TxValidationState state; + state.Invalid(fuzzed_data_provider.PickValueInArray(TESTED_TX_RESULTS), ""); + bool first_time_failure{fuzzed_data_provider.ConsumeBool()}; + + bool reject_contains_wtxid{txdownload_impl.RecentRejectsFilter().contains(rand_tx->GetWitnessHash().ToUint256())}; + + node::RejectedTxTodo todo = txdownload_impl.MempoolRejectedTx(rand_tx, state, rand_peer, first_time_failure); + Assert(first_time_failure || !todo.m_should_add_extra_compact_tx); + if (!reject_contains_wtxid) Assert(todo.m_unique_parents.size() <= rand_tx->vin.size()); + }, + [&] { + GenTxid gtxid = fuzzed_data_provider.ConsumeBool() ? + GenTxid::Txid(rand_tx->GetHash()) : + GenTxid::Wtxid(rand_tx->GetWitnessHash()); + txdownload_impl.AddTxAnnouncement(rand_peer, gtxid, time, /*p2p_inv=*/fuzzed_data_provider.ConsumeBool()); + }, + [&] { + const auto getdata_requests = txdownload_impl.GetRequestsToSend(rand_peer, time); + // TxDownloadManager should not be telling us to request things we already have. + // Exclude m_lazy_recent_rejects_reconsiderable because it may request low-feerate parent of orphan. + for (const auto& gtxid : getdata_requests) { + Assert(!txdownload_impl.AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)); + } + }, + [&] { + const auto& [should_validate, maybe_package] = txdownload_impl.ReceivedTx(rand_peer, rand_tx); + // The only possible results should be: + // - Don't validate the tx, no package. + // - Don't validate the tx, package. + // - Validate the tx, no package. + // The only combination that doesn't make sense is validate both tx and package. + Assert(!(should_validate && maybe_package.has_value())); + if (should_validate) { + Assert(!txdownload_impl.AlreadyHaveTx(GenTxid::Wtxid(rand_tx->GetWitnessHash()), /*include_reconsiderable=*/true)); + } + if (maybe_package.has_value()) { + CheckPackageToValidate(*maybe_package, rand_peer); + + const auto& package = maybe_package->m_txns; + // Parent is in m_lazy_recent_rejects_reconsiderable and child is in m_orphanage + Assert(txdownload_impl.RecentRejectsReconsiderableFilter().contains(rand_tx->GetWitnessHash().ToUint256())); + Assert(txdownload_impl.m_orphanage.HaveTx(maybe_package->m_txns.back()->GetWitnessHash())); + // Package has not been rejected + Assert(!txdownload_impl.RecentRejectsReconsiderableFilter().contains(GetPackageHash(package))); + // Neither is in m_lazy_recent_rejects + Assert(!txdownload_impl.RecentRejectsFilter().contains(package.front()->GetWitnessHash().ToUint256())); + Assert(!txdownload_impl.RecentRejectsFilter().contains(package.back()->GetWitnessHash().ToUint256())); + } + }, + [&] { + txdownload_impl.ReceivedNotFound(rand_peer, {rand_tx->GetWitnessHash()}); + }, + [&] { + const bool expect_work{txdownload_impl.HaveMoreWork(rand_peer)}; + const auto ptx{txdownload_impl.GetTxToReconsider(rand_peer)}; + // expect_work=true doesn't necessarily mean the next item from the workset isn't a + // nullptr, as the transaction could have been removed from orphanage without being + // removed from the peer's workset. + if (ptx) { + // However, if there was a non-null tx in the workset, HaveMoreWork should have + // returned true. + Assert(expect_work); + Assert(txdownload_impl.AlreadyHaveTx(GenTxid::Wtxid(ptx->GetWitnessHash()), /*include_reconsiderable=*/false)); + // Presumably we have validated this tx. Use "missing inputs" to keep it in the + // orphanage longer. Later iterations might call MempoolAcceptedTx or + // MempoolRejectedTx with a different error. + TxValidationState state_missing_inputs; + state_missing_inputs.Invalid(TxValidationResult::TX_MISSING_INPUTS, ""); + txdownload_impl.MempoolRejectedTx(ptx, state_missing_inputs, rand_peer, fuzzed_data_provider.ConsumeBool()); + } + } + ); + + // Jump ahead in time + time += fuzzed_data_provider.PickValueInArray(TIME_SKIPS); + CheckInvariants(txdownload_impl, max_orphan_count); + } + // Disconnect everybody, check that all data structures are empty. + for (NodeId nodeid = 0; nodeid < NUM_PEERS; ++nodeid) { + txdownload_impl.DisconnectedPeer(nodeid); + txdownload_impl.CheckIsEmpty(nodeid); + } + txdownload_impl.CheckIsEmpty(); +} + +} // namespace diff --git a/src/test/txdownload_tests.cpp b/src/test/txdownload_tests.cpp new file mode 100644 index 0000000000..3eb57a6353 --- /dev/null +++ b/src/test/txdownload_tests.cpp @@ -0,0 +1,337 @@ +// Copyright (c) 2011-2022 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include