Merge bitcoin/bitcoin#21160: net/net processing: Move tx inventory into net_processing

1066d10f71 scripted-diff: rename TxRelay members (John Newbery)
575bbd0dea [net processing] Move tx relay data to Peer (John Newbery)
785f55f7ee [net processing] Move m_wtxid_relay to Peer (John Newbery)
36346703f8 [net] Add CNode.m_relays_txs and CNode.m_bloom_filter_loaded (John Newbery)

Pull request description:

  This continues the work of moving application layer data into net_processing, by moving all tx data into the new Peer object added in #19607.

  For motivation, see #19398.

ACKs for top commit:
  dergoegge:
    ACK 1066d10f71 - This is a good layer separation improvement with no behavior changes.
  glozow:
    utACK 1066d10f71

Tree-SHA512: 0c9d6b8a0a05e2d816b6d6588b7df133842ec960ae67667813422aa7bd8eb5308599c714f3822a98ddbdf364ffab9050b055079277ba4aff24092557ff99ebcc
This commit is contained in:
fanquake 2022-03-25 14:54:47 +00:00
commit 9344697e57
No known key found for this signature in database
GPG key ID: 2EEB9F5CC09526C1
13 changed files with 188 additions and 220 deletions

View file

@ -626,12 +626,6 @@ void CNode::CopyStats(CNodeStats& stats)
X(addr);
X(addrBind);
stats.m_network = ConnectedThroughNetwork();
if (m_tx_relay != nullptr) {
LOCK(m_tx_relay->cs_filter);
stats.fRelayTxes = m_tx_relay->fRelayTxes;
} else {
stats.fRelayTxes = false;
}
X(m_last_send);
X(m_last_recv);
X(m_last_tx_time);
@ -658,11 +652,6 @@ void CNode::CopyStats(CNodeStats& stats)
X(nRecvBytes);
}
X(m_permissionFlags);
if (m_tx_relay != nullptr) {
stats.minFeeFilter = m_tx_relay->minFeeFilter;
} else {
stats.minFeeFilter = 0;
}
X(m_last_ping_time);
X(m_min_ping_time);
@ -913,7 +902,7 @@ static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEviction
{
// There is a fall-through here because it is common for a node to have more than a few peers that have not yet relayed txn.
if (a.m_last_tx_time != b.m_last_tx_time) return a.m_last_tx_time < b.m_last_tx_time;
if (a.fRelayTxes != b.fRelayTxes) return b.fRelayTxes;
if (a.m_relay_txs != b.m_relay_txs) return b.m_relay_txs;
if (a.fBloomFilter != b.fBloomFilter) return a.fBloomFilter;
return a.m_connected > b.m_connected;
}
@ -921,7 +910,7 @@ static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEviction
// Pick out the potential block-relay only peers, and sort them by last block time.
static bool CompareNodeBlockRelayOnlyTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b)
{
if (a.fRelayTxes != b.fRelayTxes) return a.fRelayTxes;
if (a.m_relay_txs != b.m_relay_txs) return a.m_relay_txs;
if (a.m_last_block_time != b.m_last_block_time) return a.m_last_block_time < b.m_last_block_time;
if (a.fRelevantServices != b.fRelevantServices) return b.fRelevantServices;
return a.m_connected > b.m_connected;
@ -1046,7 +1035,7 @@ void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& evicti
EraseLastKElements(vEvictionCandidates, CompareNodeTXTime, 4);
// Protect up to 8 non-tx-relay peers that have sent us novel blocks.
EraseLastKElements(vEvictionCandidates, CompareNodeBlockRelayOnlyTime, 8,
[](const NodeEvictionCandidate& n) { return !n.fRelayTxes && n.fRelevantServices; });
[](const NodeEvictionCandidate& n) { return !n.m_relay_txs && n.fRelevantServices; });
// Protect 4 nodes that most recently sent us novel blocks.
// An attacker cannot manipulate this metric without performing useful work.
@ -1112,18 +1101,11 @@ bool CConnman::AttemptToEvictConnection()
continue;
if (node->fDisconnect)
continue;
bool peer_relay_txes = false;
bool peer_filter_not_null = false;
if (node->m_tx_relay != nullptr) {
LOCK(node->m_tx_relay->cs_filter);
peer_relay_txes = node->m_tx_relay->fRelayTxes;
peer_filter_not_null = node->m_tx_relay->pfilter != nullptr;
}
NodeEvictionCandidate candidate = {node->GetId(), node->m_connected, node->m_min_ping_time,
node->m_last_block_time, node->m_last_tx_time,
HasAllDesirableServiceFlags(node->nServices),
peer_relay_txes, peer_filter_not_null, node->nKeyedNetGroup,
node->m_prefer_evict, node->addr.IsLocal(),
node->m_relays_txs.load(), node->m_bloom_filter_loaded.load(),
node->nKeyedNetGroup, node->m_prefer_evict, node->addr.IsLocal(),
node->ConnectedThroughNetwork()};
vEvictionCandidates.push_back(candidate);
}
@ -3031,9 +3013,6 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, std::shared_ptr<Sock> s
nLocalServices(nLocalServicesIn)
{
if (inbound_onion) assert(conn_type_in == ConnectionType::INBOUND);
if (conn_type_in != ConnectionType::BLOCK_RELAY) {
m_tx_relay = std::make_unique<TxRelay>();
}
for (const std::string &msg : getAllNetMessageTypes())
mapRecvBytesPerMsgCmd[msg] = 0;

View file

@ -257,7 +257,6 @@ class CNodeStats
public:
NodeId nodeid;
ServiceFlags nServices;
bool fRelayTxes;
std::chrono::seconds m_last_send;
std::chrono::seconds m_last_recv;
std::chrono::seconds m_last_tx_time;
@ -278,7 +277,6 @@ public:
NetPermissionFlags m_permissionFlags;
std::chrono::microseconds m_last_ping_time;
std::chrono::microseconds m_min_ping_time;
CAmount minFeeFilter;
// Our address, as reported by the peer
std::string addrLocal;
// Address of this peer
@ -555,34 +553,15 @@ public:
// Peer selected us as (compact blocks) high-bandwidth peer (BIP152)
std::atomic<bool> m_bip152_highbandwidth_from{false};
struct TxRelay {
mutable RecursiveMutex cs_filter;
// We use fRelayTxes for two purposes -
// a) it allows us to not relay tx invs before receiving the peer's version message
// b) the peer may tell us in its version message that we should not relay tx invs
// unless it loads a bloom filter.
bool fRelayTxes GUARDED_BY(cs_filter){false};
std::unique_ptr<CBloomFilter> pfilter PT_GUARDED_BY(cs_filter) GUARDED_BY(cs_filter){nullptr};
/** Whether we should relay transactions to this peer (their version
* message did not include fRelay=false and this is not a block-relay-only
* connection). This only changes from false to true. It will never change
* back to false. Used only in inbound eviction logic. */
std::atomic_bool m_relays_txs{false};
mutable RecursiveMutex cs_tx_inventory;
CRollingBloomFilter filterInventoryKnown GUARDED_BY(cs_tx_inventory){50000, 0.000001};
// Set of transaction ids we still have to announce.
// They are sorted by the mempool before relay, so the order is not important.
std::set<uint256> setInventoryTxToSend;
// Used for BIP35 mempool sending
bool fSendMempool GUARDED_BY(cs_tx_inventory){false};
// Last time a "MEMPOOL" request was serviced.
std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
std::chrono::microseconds nNextInvSend{0};
/** Minimum fee rate with which to filter inv's to this node */
std::atomic<CAmount> minFeeFilter{0};
CAmount lastSentFeeFilter{0};
std::chrono::microseconds m_next_send_feefilter{0};
};
// m_tx_relay == nullptr if we're not relaying transactions with this peer
std::unique_ptr<TxRelay> m_tx_relay;
/** Whether this peer has loaded a bloom filter. Used only in inbound
* eviction logic. */
std::atomic_bool m_bloom_filter_loaded{false};
/** UNIX epoch time of the last block received from this peer that we had
* not yet seen (e.g. not already received from another peer), that passed
@ -658,23 +637,6 @@ public:
nRefCount--;
}
void AddKnownTx(const uint256& hash)
{
if (m_tx_relay != nullptr) {
LOCK(m_tx_relay->cs_tx_inventory);
m_tx_relay->filterInventoryKnown.insert(hash);
}
}
void PushTxInventory(const uint256& hash)
{
if (m_tx_relay == nullptr) return;
LOCK(m_tx_relay->cs_tx_inventory);
if (!m_tx_relay->filterInventoryKnown.contains(hash)) {
m_tx_relay->setInventoryTxToSend.insert(hash);
}
}
void CloseSocketDisconnect();
void CopyStats(CNodeStats& stats);
@ -1308,7 +1270,7 @@ struct NodeEvictionCandidate
std::chrono::seconds m_last_block_time;
std::chrono::seconds m_last_tx_time;
bool fRelevantServices;
bool fRelayTxes;
bool m_relay_txs;
bool fBloomFilter;
uint64_t nKeyedNetGroup;
bool prefer_evict;

View file

@ -233,6 +233,39 @@ struct Peer {
/** Whether a ping has been requested by the user */
std::atomic<bool> m_ping_queued{false};
/** Whether this peer relays txs via wtxid */
std::atomic<bool> m_wtxid_relay{false};
struct TxRelay {
mutable RecursiveMutex m_bloom_filter_mutex;
// We use m_relay_txs for two purposes -
// a) it allows us to not relay tx invs before receiving the peer's version message
// b) the peer may tell us in its version message that we should not relay tx invs
// unless it loads a bloom filter.
bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr};
mutable RecursiveMutex m_tx_inventory_mutex;
CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
// Set of transaction ids we still have to announce.
// They are sorted by the mempool before relay, so the order is not important.
std::set<uint256> m_tx_inventory_to_send;
// Used for BIP35 mempool sending
bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
// Last time a "MEMPOOL" request was serviced.
std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
std::chrono::microseconds m_next_inv_send_time{0};
/** Minimum fee rate with which to filter inv's to this node */
std::atomic<CAmount> m_fee_filter_received{0};
CAmount m_fee_filter_sent{0};
std::chrono::microseconds m_next_send_feefilter{0};
};
/** Transaction relay data. Will be a nullptr if we're not relaying
* transactions with this peer (e.g. if it's a block-relay-only peer) */
std::unique_ptr<TxRelay> m_tx_relay;
/** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */
std::vector<CAddress> m_addrs_to_send;
/** Probabilistic filter to track recent addr messages relayed with this
@ -291,8 +324,9 @@ struct Peer {
/** Work queue of items requested by this peer **/
std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
explicit Peer(NodeId id)
explicit Peer(NodeId id, bool tx_relay)
: m_id(id)
, m_tx_relay(tx_relay ? std::make_unique<TxRelay>() : nullptr)
{}
};
@ -332,9 +366,6 @@ public:
const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override;
private:
void _RelayTransaction(const uint256& txid, const uint256& wtxid)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Consider evicting an outbound peer based on the amount of time they've been behind our tip */
void ConsiderEviction(CNode& pto, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@ -395,7 +426,7 @@ private:
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
/** Send a version message to a peer */
void PushNodeVersion(CNode& pnode);
void PushNodeVersion(CNode& pnode, Peer& peer);
/** Send a ping message every PING_INTERVAL or if requested via RPC. May
* mark the peer to be disconnected if a ping has timed out.
@ -416,7 +447,7 @@ private:
void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable);
/** Send `feefilter` message. */
void MaybeSendFeefilter(CNode& node, std::chrono::microseconds current_time);
void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time);
const CChainParams& m_chainparams;
CConnman& m_connman;
@ -465,7 +496,7 @@ private:
std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
/** Number of peers with wtxid relay. */
int m_wtxid_relay_peers GUARDED_BY(cs_main) = 0;
std::atomic<int> m_wtxid_relay_peers{0};
/** Number of outbound peers with m_chain_sync.m_protect. */
int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
@ -780,9 +811,6 @@ struct CNodeState {
//! A rolling bloom filter of all announced tx CInvs to this peer.
CRollingBloomFilter m_recently_announced_invs = CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001};
//! Whether this peer relays txs via wtxid
bool m_wtxid_relay{false};
CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
};
@ -827,6 +855,14 @@ static void PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& ins
}
}
static void AddKnownTx(Peer& peer, const uint256& hash)
{
if (peer.m_tx_relay != nullptr) {
LOCK(peer.m_tx_relay->m_tx_inventory_mutex);
peer.m_tx_relay->m_tx_inventory_known_filter.insert(hash);
}
}
static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
nPreferredDownload -= state->fPreferredDownload;
@ -1122,7 +1158,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(NodeId nodeid, unsigned int count
} // namespace
void PeerManagerImpl::PushNodeVersion(CNode& pnode)
void PeerManagerImpl::PushNodeVersion(CNode& pnode, Peer& peer)
{
// Note that pnode->GetLocalServices() is a reflection of the local
// services we were offering when the CNode object was created for this
@ -1137,7 +1173,7 @@ void PeerManagerImpl::PushNodeVersion(CNode& pnode)
CService addr_you = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? addr : CService();
uint64_t your_services{addr.nServices};
const bool tx_relay = !m_ignore_incoming_txs && pnode.m_tx_relay != nullptr && !pnode.IsFeelerConn();
const bool tx_relay = !m_ignore_incoming_txs && peer.m_tx_relay != nullptr && !pnode.IsFeelerConn();
m_connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime,
your_services, addr_you, // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime)
my_services, CService(), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime)
@ -1194,13 +1230,13 @@ void PeerManagerImpl::InitializeNode(CNode *pnode)
mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(pnode->IsInboundConn()));
assert(m_txrequest.Count(nodeid) == 0);
}
PeerRef peer = std::make_shared<Peer>(nodeid, /*tx_relay=*/ !pnode->IsBlockOnlyConn());
{
PeerRef peer = std::make_shared<Peer>(nodeid);
LOCK(m_peer_mutex);
m_peer_map.emplace_hint(m_peer_map.end(), nodeid, std::move(peer));
m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
}
if (!pnode->IsInboundConn()) {
PushNodeVersion(*pnode);
PushNodeVersion(*pnode, *peer);
}
}
@ -1212,8 +1248,7 @@ void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler)
CTransactionRef tx = m_mempool.get(txid);
if (tx != nullptr) {
LOCK(cs_main);
_RelayTransaction(txid, tx->GetWitnessHash());
RelayTransaction(txid, tx->GetWitnessHash());
} else {
m_mempool.RemoveUnbroadcastTx(txid, true);
}
@ -1240,6 +1275,8 @@ void PeerManagerImpl::FinalizeNode(const CNode& node)
PeerRef peer = RemovePeer(nodeid);
assert(peer != nullptr);
misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
m_wtxid_relay_peers -= peer->m_wtxid_relay;
assert(m_wtxid_relay_peers >= 0);
}
CNodeState *state = State(nodeid);
assert(state != nullptr);
@ -1257,8 +1294,6 @@ void PeerManagerImpl::FinalizeNode(const CNode& node)
assert(m_peers_downloading_from >= 0);
m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
assert(m_outbound_peers_with_protect_from_disconnect >= 0);
m_wtxid_relay_peers -= state->m_wtxid_relay;
assert(m_wtxid_relay_peers >= 0);
mapNodeState.erase(nodeid);
@ -1331,6 +1366,14 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c
ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
}
if (peer->m_tx_relay != nullptr) {
stats.m_relay_txs = WITH_LOCK(peer->m_tx_relay->m_bloom_filter_mutex, return peer->m_tx_relay->m_relay_txs);
stats.m_fee_filter_received = peer->m_tx_relay->m_fee_filter_received.load();
} else {
stats.m_relay_txs = false;
stats.m_fee_filter_received = 0;
}
stats.m_ping_wait = ping_wait;
stats.m_addr_processed = peer->m_addr_processed.load();
stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
@ -1746,22 +1789,17 @@ void PeerManagerImpl::SendPings()
void PeerManagerImpl::RelayTransaction(const uint256& txid, const uint256& wtxid)
{
WITH_LOCK(cs_main, _RelayTransaction(txid, wtxid););
}
LOCK(m_peer_mutex);
for(auto& it : m_peer_map) {
Peer& peer = *it.second;
if (!peer.m_tx_relay) continue;
void PeerManagerImpl::_RelayTransaction(const uint256& txid, const uint256& wtxid)
{
m_connman.ForEachNode([&txid, &wtxid](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
CNodeState* state = State(pnode->GetId());
if (state == nullptr) return;
if (state->m_wtxid_relay) {
pnode->PushTxInventory(wtxid);
} else {
pnode->PushTxInventory(txid);
const uint256& hash{peer.m_wtxid_relay ? wtxid : txid};
LOCK(peer.m_tx_relay->m_tx_inventory_mutex);
if (!peer.m_tx_relay->m_tx_inventory_known_filter.contains(hash)) {
peer.m_tx_relay->m_tx_inventory_to_send.insert(hash);
}
});
};
}
void PeerManagerImpl::RelayAddress(NodeId originator,
@ -1907,11 +1945,11 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
} else if (inv.IsMsgFilteredBlk()) {
bool sendMerkleBlock = false;
CMerkleBlock merkleBlock;
if (pfrom.m_tx_relay != nullptr) {
LOCK(pfrom.m_tx_relay->cs_filter);
if (pfrom.m_tx_relay->pfilter) {
if (peer.m_tx_relay != nullptr) {
LOCK(peer.m_tx_relay->m_bloom_filter_mutex);
if (peer.m_tx_relay->m_bloom_filter) {
sendMerkleBlock = true;
merkleBlock = CMerkleBlock(*pblock, *pfrom.m_tx_relay->pfilter);
merkleBlock = CMerkleBlock(*pblock, *peer.m_tx_relay->m_bloom_filter);
}
}
if (sendMerkleBlock) {
@ -2000,7 +2038,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
const auto now{GetTime<std::chrono::seconds>()};
// Get last mempool request time
const auto mempool_req = pfrom.m_tx_relay != nullptr ? pfrom.m_tx_relay->m_last_mempool_req.load() : std::chrono::seconds::min();
const auto mempool_req = peer.m_tx_relay != nullptr ? peer.m_tx_relay->m_last_mempool_req.load() : std::chrono::seconds::min();
// Process as many TX items from the front of the getdata queue as
// possible, since they're common and it's efficient to batch process
@ -2013,7 +2051,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
const CInv &inv = *it++;
if (pfrom.m_tx_relay == nullptr) {
if (peer.m_tx_relay == nullptr) {
// Ignore GETDATA requests for transactions from blocks-only peers.
continue;
}
@ -2041,7 +2079,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
}
for (const uint256& parent_txid : parent_ids_to_add) {
// Relaying a transaction with a recent but unconfirmed parent.
if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(parent_txid))) {
if (WITH_LOCK(peer.m_tx_relay->m_tx_inventory_mutex, return !peer.m_tx_relay->m_tx_inventory_known_filter.contains(parent_txid))) {
LOCK(cs_main);
State(pfrom.GetId())->m_recently_announced_invs.insert(parent_txid);
}
@ -2321,7 +2359,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
_RelayTransaction(orphanHash, porphanTx->GetWitnessHash());
RelayTransaction(orphanHash, porphanTx->GetWitnessHash());
m_orphanage.AddChildrenToWorkSet(*porphanTx, orphan_work_set);
m_orphanage.EraseTx(orphanHash);
for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
@ -2637,7 +2675,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// Inbound peers send us their version message when they connect.
// We send our version message in response.
if (pfrom.IsInboundConn()) {
PushNodeVersion(pfrom);
PushNodeVersion(pfrom, *peer);
}
// Change version
@ -2676,9 +2714,10 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// set nodes not capable of serving the complete blockchain history as "limited nodes"
pfrom.m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
if (pfrom.m_tx_relay != nullptr) {
LOCK(pfrom.m_tx_relay->cs_filter);
pfrom.m_tx_relay->fRelayTxes = fRelay; // set to true after we get the first filter* message
if (peer->m_tx_relay != nullptr) {
LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
peer->m_tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message
if (fRelay) pfrom.m_relays_txs = true;
}
if((nServices & NODE_WITNESS))
@ -2867,9 +2906,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) {
LOCK(cs_main);
if (!State(pfrom.GetId())->m_wtxid_relay) {
State(pfrom.GetId())->m_wtxid_relay = true;
if (!peer->m_wtxid_relay) {
peer->m_wtxid_relay = true;
m_wtxid_relay_peers++;
} else {
LogPrint(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId());
@ -3005,7 +3043,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// Reject tx INVs when the -blocksonly setting is enabled, or this is a
// block-relay-only peer
bool reject_tx_invs{m_ignore_incoming_txs || (pfrom.m_tx_relay == nullptr)};
bool reject_tx_invs{m_ignore_incoming_txs || (peer->m_tx_relay == nullptr)};
// Allow peers with relay permission to send data other than blocks in blocks only mode
if (pfrom.HasPermission(NetPermissionFlags::Relay)) {
@ -3023,7 +3061,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// Ignore INVs that don't match wtxidrelay setting.
// Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
// This is fine as no INV messages are involved in that process.
if (State(pfrom.GetId())->m_wtxid_relay) {
if (peer->m_wtxid_relay) {
if (inv.IsMsgTx()) continue;
} else {
if (inv.IsMsgWtx()) continue;
@ -3052,7 +3090,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
const bool fAlreadyHave = AlreadyHaveTx(gtxid);
LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
pfrom.AddKnownTx(inv.hash);
AddKnownTx(*peer, inv.hash);
if (!fAlreadyHave && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
AddTxAnnouncement(pfrom, gtxid, current_time);
}
@ -3282,8 +3320,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// Stop processing the transaction early if
// 1) We are in blocks only mode and peer has no relay permission
// 2) This peer is a block-relay-only peer
if ((m_ignore_incoming_txs && !pfrom.HasPermission(NetPermissionFlags::Relay)) || (pfrom.m_tx_relay == nullptr))
{
if ((m_ignore_incoming_txs && !pfrom.HasPermission(NetPermissionFlags::Relay)) || (peer->m_tx_relay == nullptr)) {
LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId());
pfrom.fDisconnect = true;
return;
@ -3301,21 +3338,19 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
const uint256& txid = ptx->GetHash();
const uint256& wtxid = ptx->GetWitnessHash();
LOCK2(cs_main, g_cs_orphans);
CNodeState* nodestate = State(pfrom.GetId());
const uint256& hash = nodestate->m_wtxid_relay ? wtxid : txid;
pfrom.AddKnownTx(hash);
if (nodestate->m_wtxid_relay && txid != wtxid) {
// Insert txid into filterInventoryKnown, even for
const uint256& hash = peer->m_wtxid_relay ? wtxid : txid;
AddKnownTx(*peer, hash);
if (peer->m_wtxid_relay && txid != wtxid) {
// Insert txid into m_tx_inventory_known_filter, even for
// wtxidrelay peers. This prevents re-adding of
// unconfirmed parents to the recently_announced
// filter, when a child tx is requested. See
// ProcessGetData().
pfrom.AddKnownTx(txid);
AddKnownTx(*peer, txid);
}
LOCK2(cs_main, g_cs_orphans);
m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid);
@ -3340,7 +3375,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
LogPrintf("Not relaying non-mempool transaction %s from forcerelay peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
} else {
LogPrintf("Force relaying tx %s from peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
_RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
}
}
return;
@ -3354,7 +3389,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// requests for it.
m_txrequest.ForgetTxHash(tx.GetHash());
m_txrequest.ForgetTxHash(tx.GetWitnessHash());
_RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
m_orphanage.AddChildrenToWorkSet(tx, peer->m_orphan_work_set);
pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
@ -3401,7 +3436,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// Eventually we should replace this with an improved
// protocol for getting all unconfirmed parents.
const auto gtxid{GenTxid::Txid(parent_txid)};
pfrom.AddKnownTx(parent_txid);
AddKnownTx(*peer, parent_txid);
if (!AlreadyHaveTx(gtxid)) AddTxAnnouncement(pfrom, gtxid, current_time);
}
@ -3897,9 +3932,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
if (pfrom.m_tx_relay != nullptr) {
LOCK(pfrom.m_tx_relay->cs_tx_inventory);
pfrom.m_tx_relay->fSendMempool = true;
if (peer->m_tx_relay != nullptr) {
LOCK(peer->m_tx_relay->m_tx_inventory_mutex);
peer->m_tx_relay->m_send_mempool = true;
}
return;
}
@ -3993,11 +4028,13 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// There is no excuse for sending a too-large filter
Misbehaving(pfrom.GetId(), 100, "too-large bloom filter");
}
else if (pfrom.m_tx_relay != nullptr)
else if (peer->m_tx_relay != nullptr)
{
LOCK(pfrom.m_tx_relay->cs_filter);
pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter));
pfrom.m_tx_relay->fRelayTxes = true;
LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
peer->m_tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
pfrom.m_bloom_filter_loaded = true;
peer->m_tx_relay->m_relay_txs = true;
pfrom.m_relays_txs = true;
}
return;
}
@ -4016,10 +4053,10 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
bool bad = false;
if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
bad = true;
} else if (pfrom.m_tx_relay != nullptr) {
LOCK(pfrom.m_tx_relay->cs_filter);
if (pfrom.m_tx_relay->pfilter) {
pfrom.m_tx_relay->pfilter->insert(vData);
} else if (peer->m_tx_relay != nullptr) {
LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
if (peer->m_tx_relay->m_bloom_filter) {
peer->m_tx_relay->m_bloom_filter->insert(vData);
} else {
bad = true;
}
@ -4036,12 +4073,14 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
pfrom.fDisconnect = true;
return;
}
if (pfrom.m_tx_relay == nullptr) {
if (peer->m_tx_relay == nullptr) {
return;
}
LOCK(pfrom.m_tx_relay->cs_filter);
pfrom.m_tx_relay->pfilter = nullptr;
pfrom.m_tx_relay->fRelayTxes = true;
LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
peer->m_tx_relay->m_bloom_filter = nullptr;
pfrom.m_bloom_filter_loaded = false;
peer->m_tx_relay->m_relay_txs = true;
pfrom.m_relays_txs = true;
return;
}
@ -4049,8 +4088,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
CAmount newFeeFilter = 0;
vRecv >> newFeeFilter;
if (MoneyRange(newFeeFilter)) {
if (pfrom.m_tx_relay != nullptr) {
pfrom.m_tx_relay->minFeeFilter = newFeeFilter;
if (peer->m_tx_relay != nullptr) {
peer->m_tx_relay->m_fee_filter_received = newFeeFilter;
}
LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
}
@ -4508,10 +4547,10 @@ void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::micros
}
}
void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, std::chrono::microseconds current_time)
void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time)
{
if (m_ignore_incoming_txs) return;
if (!pto.m_tx_relay) return;
if (!peer.m_tx_relay) return;
if (pto.GetCommonVersion() < FEEFILTER_VERSION) return;
// peers with the forcerelay permission should not filter txs to us
if (pto.HasPermission(NetPermissionFlags::ForceRelay)) return;
@ -4525,27 +4564,27 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, std::chrono::microseconds c
currentFilter = MAX_MONEY;
} else {
static const CAmount MAX_FILTER{g_filter_rounder.round(MAX_MONEY)};
if (pto.m_tx_relay->lastSentFeeFilter == MAX_FILTER) {
if (peer.m_tx_relay->m_fee_filter_sent == MAX_FILTER) {
// Send the current filter if we sent MAX_FILTER previously
// and made it out of IBD.
pto.m_tx_relay->m_next_send_feefilter = 0us;
peer.m_tx_relay->m_next_send_feefilter = 0us;
}
}
if (current_time > pto.m_tx_relay->m_next_send_feefilter) {
if (current_time > peer.m_tx_relay->m_next_send_feefilter) {
CAmount filterToSend = g_filter_rounder.round(currentFilter);
// We always have a fee filter of at least minRelayTxFee
filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
if (filterToSend != pto.m_tx_relay->lastSentFeeFilter) {
if (filterToSend != peer.m_tx_relay->m_fee_filter_sent) {
m_connman.PushMessage(&pto, CNetMsgMaker(pto.GetCommonVersion()).Make(NetMsgType::FEEFILTER, filterToSend));
pto.m_tx_relay->lastSentFeeFilter = filterToSend;
peer.m_tx_relay->m_fee_filter_sent = filterToSend;
}
pto.m_tx_relay->m_next_send_feefilter = GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
peer.m_tx_relay->m_next_send_feefilter = GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
}
// If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
// until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < pto.m_tx_relay->m_next_send_feefilter &&
(currentFilter < 3 * pto.m_tx_relay->lastSentFeeFilter / 4 || currentFilter > 4 * pto.m_tx_relay->lastSentFeeFilter / 3)) {
pto.m_tx_relay->m_next_send_feefilter = current_time + GetRandomDuration<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_tx_relay->m_next_send_feefilter &&
(currentFilter < 3 * peer.m_tx_relay->m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_tx_relay->m_fee_filter_sent / 3)) {
peer.m_tx_relay->m_next_send_feefilter = current_time + GetRandomDuration<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
}
}
@ -4812,45 +4851,45 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
peer->m_blocks_for_inv_relay.clear();
}
if (pto->m_tx_relay != nullptr) {
LOCK(pto->m_tx_relay->cs_tx_inventory);
if (peer->m_tx_relay != nullptr) {
LOCK(peer->m_tx_relay->m_tx_inventory_mutex);
// Check whether periodic sends should happen
bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
if (pto->m_tx_relay->nNextInvSend < current_time) {
if (peer->m_tx_relay->m_next_inv_send_time < current_time) {
fSendTrickle = true;
if (pto->IsInboundConn()) {
pto->m_tx_relay->nNextInvSend = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
peer->m_tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
} else {
pto->m_tx_relay->nNextInvSend = GetExponentialRand(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
peer->m_tx_relay->m_next_inv_send_time = GetExponentialRand(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
}
}
// Time to send but the peer has requested we not relay transactions.
if (fSendTrickle) {
LOCK(pto->m_tx_relay->cs_filter);
if (!pto->m_tx_relay->fRelayTxes) pto->m_tx_relay->setInventoryTxToSend.clear();
LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
if (!peer->m_tx_relay->m_relay_txs) peer->m_tx_relay->m_tx_inventory_to_send.clear();
}
// Respond to BIP35 mempool requests
if (fSendTrickle && pto->m_tx_relay->fSendMempool) {
if (fSendTrickle && peer->m_tx_relay->m_send_mempool) {
auto vtxinfo = m_mempool.infoAll();
pto->m_tx_relay->fSendMempool = false;
const CFeeRate filterrate{pto->m_tx_relay->minFeeFilter.load()};
peer->m_tx_relay->m_send_mempool = false;
const CFeeRate filterrate{peer->m_tx_relay->m_fee_filter_received.load()};
LOCK(pto->m_tx_relay->cs_filter);
LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
for (const auto& txinfo : vtxinfo) {
const uint256& hash = state.m_wtxid_relay ? txinfo.tx->GetWitnessHash() : txinfo.tx->GetHash();
CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
pto->m_tx_relay->setInventoryTxToSend.erase(hash);
const uint256& hash = peer->m_wtxid_relay ? txinfo.tx->GetWitnessHash() : txinfo.tx->GetHash();
CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
peer->m_tx_relay->m_tx_inventory_to_send.erase(hash);
// Don't send transactions that peers will not put into their mempool
if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
continue;
}
if (pto->m_tx_relay->pfilter) {
if (!pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
if (peer->m_tx_relay->m_bloom_filter) {
if (!peer->m_tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
}
pto->m_tx_relay->filterInventoryKnown.insert(hash);
peer->m_tx_relay->m_tx_inventory_known_filter.insert(hash);
// Responses to MEMPOOL requests bypass the m_recently_announced_invs filter.
vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) {
@ -4858,37 +4897,37 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
vInv.clear();
}
}
pto->m_tx_relay->m_last_mempool_req = std::chrono::duration_cast<std::chrono::seconds>(current_time);
peer->m_tx_relay->m_last_mempool_req = std::chrono::duration_cast<std::chrono::seconds>(current_time);
}
// Determine transactions to relay
if (fSendTrickle) {
// Produce a vector with all candidates for sending
std::vector<std::set<uint256>::iterator> vInvTx;
vInvTx.reserve(pto->m_tx_relay->setInventoryTxToSend.size());
for (std::set<uint256>::iterator it = pto->m_tx_relay->setInventoryTxToSend.begin(); it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) {
vInvTx.reserve(peer->m_tx_relay->m_tx_inventory_to_send.size());
for (std::set<uint256>::iterator it = peer->m_tx_relay->m_tx_inventory_to_send.begin(); it != peer->m_tx_relay->m_tx_inventory_to_send.end(); it++) {
vInvTx.push_back(it);
}
const CFeeRate filterrate{pto->m_tx_relay->minFeeFilter.load()};
const CFeeRate filterrate{peer->m_tx_relay->m_fee_filter_received.load()};
// Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
// A heap is used so that not all items need sorting if only a few are being sent.
CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, state.m_wtxid_relay);
CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, peer->m_wtxid_relay);
std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
// No reason to drain out at many times the network's capacity,
// especially since we have many peers and some will draw much shorter delays.
unsigned int nRelayedTransactions = 0;
LOCK(pto->m_tx_relay->cs_filter);
LOCK(peer->m_tx_relay->m_bloom_filter_mutex);
while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
// Fetch the top element from the heap
std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
std::set<uint256>::iterator it = vInvTx.back();
vInvTx.pop_back();
uint256 hash = *it;
CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
// Remove it from the to-be-sent set
pto->m_tx_relay->setInventoryTxToSend.erase(it);
peer->m_tx_relay->m_tx_inventory_to_send.erase(it);
// Check if not in the filter already
if (pto->m_tx_relay->filterInventoryKnown.contains(hash)) {
if (peer->m_tx_relay->m_tx_inventory_known_filter.contains(hash)) {
continue;
}
// Not in the mempool anymore? don't bother sending it.
@ -4902,7 +4941,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
continue;
}
if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
if (peer->m_tx_relay->m_bloom_filter && !peer->m_tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
// Send
State(pto->GetId())->m_recently_announced_invs.insert(hash);
vInv.push_back(inv);
@ -4929,14 +4968,14 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
vInv.clear();
}
pto->m_tx_relay->filterInventoryKnown.insert(hash);
peer->m_tx_relay->m_tx_inventory_known_filter.insert(hash);
if (hash != txid) {
// Insert txid into filterInventoryKnown, even for
// Insert txid into m_tx_inventory_known_filter, even for
// wtxidrelay peers. This prevents re-adding of
// unconfirmed parents to the recently_announced
// filter, when a child tx is requested. See
// ProcessGetData().
pto->m_tx_relay->filterInventoryKnown.insert(txid);
peer->m_tx_relay->m_tx_inventory_known_filter.insert(txid);
}
}
}
@ -5057,6 +5096,6 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (!vGetData.empty())
m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
} // release cs_main
MaybeSendFeefilter(*pto, current_time);
MaybeSendFeefilter(*pto, *peer, current_time);
return true;
}

View file

@ -29,6 +29,8 @@ struct CNodeStateStats {
int m_starting_height = -1;
std::chrono::microseconds m_ping_wait;
std::vector<int> vHeightInFlight;
bool m_relay_txs;
CAmount m_fee_filter_received;
uint64_t m_addr_processed = 0;
uint64_t m_addr_rate_limited = 0;
bool m_addr_relay_enabled{false};

View file

@ -1169,7 +1169,6 @@ void RPCConsole::updateDetailWidget()
peerAddrDetails += "<br />" + tr("via %1").arg(QString::fromStdString(stats->nodeStats.addrLocal));
ui->peerHeading->setText(peerAddrDetails);
ui->peerServices->setText(GUIUtil::formatServicesStr(stats->nodeStats.nServices));
ui->peerRelayTxes->setText(stats->nodeStats.fRelayTxes ? ts.yes : ts.no);
QString bip152_hb_settings;
if (stats->nodeStats.m_bip152_highbandwidth_to) bip152_hb_settings = ts.to;
if (stats->nodeStats.m_bip152_highbandwidth_from) bip152_hb_settings += (bip152_hb_settings.isEmpty() ? ts.from : QLatin1Char('/') + ts.from);
@ -1221,6 +1220,7 @@ void RPCConsole::updateDetailWidget()
ui->peerAddrRelayEnabled->setText(stats->nodeStateStats.m_addr_relay_enabled ? ts.yes : ts.no);
ui->peerAddrProcessed->setText(QString::number(stats->nodeStateStats.m_addr_processed));
ui->peerAddrRateLimited->setText(QString::number(stats->nodeStateStats.m_addr_rate_limited));
ui->peerRelayTxes->setText(stats->nodeStateStats.m_relay_txs ? ts.yes : ts.no);
}
ui->peersTabRightPanel->show();

View file

@ -197,7 +197,6 @@ static RPCHelpMan getpeerinfo()
}
obj.pushKV("services", strprintf("%016x", stats.nServices));
obj.pushKV("servicesnames", GetServicesNames(stats.nServices));
obj.pushKV("relaytxes", stats.fRelayTxes);
obj.pushKV("lastsend", count_seconds(stats.m_last_send));
obj.pushKV("lastrecv", count_seconds(stats.m_last_recv));
obj.pushKV("last_transaction", count_seconds(stats.m_last_tx_time));
@ -232,6 +231,8 @@ static RPCHelpMan getpeerinfo()
heights.push_back(height);
}
obj.pushKV("inflight", heights);
obj.pushKV("relaytxes", statestats.m_relay_txs);
obj.pushKV("minfeefilter", ValueFromAmount(statestats.m_fee_filter_received));
obj.pushKV("addr_relay_enabled", statestats.m_addr_relay_enabled);
obj.pushKV("addr_processed", statestats.m_addr_processed);
obj.pushKV("addr_rate_limited", statestats.m_addr_rate_limited);
@ -241,7 +242,6 @@ static RPCHelpMan getpeerinfo()
permissions.push_back(permission);
}
obj.pushKV("permissions", permissions);
obj.pushKV("minfeefilter", ValueFromAmount(stats.minFeeFilter));
UniValue sendPerMsgCmd(UniValue::VOBJ);
for (const auto& i : stats.mapSendBytesPerMsgCmd) {

View file

@ -51,16 +51,6 @@ FUZZ_TARGET_INIT(net, initialize_net)
node.Release();
}
},
[&] {
const std::optional<CInv> inv_opt = ConsumeDeserializable<CInv>(fuzzed_data_provider);
if (!inv_opt) {
return;
}
node.AddKnownTx(inv_opt->hash);
},
[&] {
node.PushTxInventory(ConsumeUInt256(fuzzed_data_provider));
},
[&] {
const std::optional<CService> service_opt = ConsumeDeserializable<CService>(fuzzed_data_provider);
if (!service_opt) {

View file

@ -26,7 +26,7 @@ FUZZ_TARGET(node_eviction)
/*m_last_block_time=*/std::chrono::seconds{fuzzed_data_provider.ConsumeIntegral<int64_t>()},
/*m_last_tx_time=*/std::chrono::seconds{fuzzed_data_provider.ConsumeIntegral<int64_t>()},
/*fRelevantServices=*/fuzzed_data_provider.ConsumeBool(),
/*fRelayTxes=*/fuzzed_data_provider.ConsumeBool(),
/*m_relay_txs=*/fuzzed_data_provider.ConsumeBool(),
/*fBloomFilter=*/fuzzed_data_provider.ConsumeBool(),
/*nKeyedNetGroup=*/fuzzed_data_provider.ConsumeIntegral<uint64_t>(),
/*prefer_evict=*/fuzzed_data_provider.ConsumeBool(),

View file

@ -255,10 +255,6 @@ void FillNode(FuzzedDataProvider& fuzzed_data_provider, ConnmanTestMsg& connman,
assert(node.nVersion == version);
assert(node.GetCommonVersion() == std::min(version, PROTOCOL_VERSION));
assert(node.nServices == remote_services);
if (node.m_tx_relay != nullptr) {
LOCK(node.m_tx_relay->cs_filter);
assert(node.m_tx_relay->fRelayTxes == filter_txs);
}
node.m_permissionFlags = permission_flags;
if (successfully_connected) {
CSerializedNetMsg msg_verack{mm.Make(NetMsgType::VERACK)};

View file

@ -627,7 +627,7 @@ BOOST_AUTO_TEST_CASE(peer_eviction_test)
number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) {
candidate.m_last_block_time = std::chrono::seconds{number_of_nodes - candidate.id};
if (candidate.id <= 7) {
candidate.fRelayTxes = false;
candidate.m_relay_txs = false;
candidate.fRelevantServices = true;
}
},
@ -646,7 +646,7 @@ BOOST_AUTO_TEST_CASE(peer_eviction_test)
number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) {
candidate.m_last_block_time = std::chrono::seconds{number_of_nodes - candidate.id};
if (candidate.id <= 7) {
candidate.fRelayTxes = false;
candidate.m_relay_txs = false;
candidate.fRelevantServices = true;
}
},

View file

@ -52,7 +52,7 @@ std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(int n_candida
/*m_last_block_time=*/std::chrono::seconds{random_context.randrange(100)},
/*m_last_tx_time=*/std::chrono::seconds{random_context.randrange(100)},
/*fRelevantServices=*/random_context.randbool(),
/*fRelayTxes=*/random_context.randbool(),
/*m_relay_txs=*/random_context.randbool(),
/*fBloomFilter=*/random_context.randbool(),
/*nKeyedNetGroup=*/random_context.randrange(100),
/*prefer_evict=*/random_context.randbool(),

View file

@ -94,7 +94,7 @@ class P2PBlocksOnly(BitcoinTestFramework):
self.nodes[0].sendrawtransaction(tx_hex)
# Bump time forward to ensure nNextInvSend timer pops
# Bump time forward to ensure m_next_inv_send_time timer pops
self.nodes[0].setmocktime(int(time.time()) + 60)
conn.sync_send_with_ping()

View file

@ -38,7 +38,7 @@ class ResendWalletTransactionsTest(BitcoinTestFramework):
# Can take a few seconds due to transaction trickling
peer_first.wait_for_broadcast([txid])
# Add a second peer since txs aren't rebroadcast to the same peer (see filterInventoryKnown)
# Add a second peer since txs aren't rebroadcast to the same peer (see m_tx_inventory_known_filter)
peer_second = node.add_p2p_connection(P2PTxInvStore())
self.log.info("Create a block")