net, net_processing: use existing RNG objects more

PeerManagerImpl, as well as several net functions, already have existing
FastRandomContext objects. Reuse them instead of constructing new ones.
This commit is contained in:
Pieter Wuille 2024-06-18 11:23:10 -04:00
parent d5fcbe966b
commit 8e31cf9c9b
2 changed files with 13 additions and 13 deletions

View file

@ -2481,9 +2481,9 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
auto start = GetTime<std::chrono::microseconds>();
// Minimum time before next feeler connection (in microseconds).
auto next_feeler = start + FastRandomContext().rand_exp_duration(FEELER_INTERVAL);
auto next_extra_block_relay = start + FastRandomContext().rand_exp_duration(EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
auto next_extra_network_peer{start + FastRandomContext().rand_exp_duration(EXTRA_NETWORK_PEER_INTERVAL)};
auto next_feeler = start + rng.rand_exp_duration(FEELER_INTERVAL);
auto next_extra_block_relay = start + rng.rand_exp_duration(EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
auto next_extra_network_peer{start + rng.rand_exp_duration(EXTRA_NETWORK_PEER_INTERVAL)};
const bool dnsseed = gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED);
bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS);
const bool use_seednodes{gArgs.IsArgSet("-seednode")};
@ -2642,10 +2642,10 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// Because we can promote these connections to block-relay-only
// connections, they do not get their own ConnectionType enum
// (similar to how we deal with extra outbound peers).
next_extra_block_relay = now + FastRandomContext().rand_exp_duration(EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
next_extra_block_relay = now + rng.rand_exp_duration(EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
conn_type = ConnectionType::BLOCK_RELAY;
} else if (now > next_feeler) {
next_feeler = now + FastRandomContext().rand_exp_duration(FEELER_INTERVAL);
next_feeler = now + rng.rand_exp_duration(FEELER_INTERVAL);
conn_type = ConnectionType::FEELER;
fFeeler = true;
} else if (nOutboundFullRelay == m_max_outbound_full_relay &&
@ -2658,7 +2658,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// This is not attempted if the user changed -maxconnections to a value
// so low that less than MAX_OUTBOUND_FULL_RELAY_CONNECTIONS are made,
// to prevent interactions with otherwise protected outbound peers.
next_extra_network_peer = now + FastRandomContext().rand_exp_duration(EXTRA_NETWORK_PEER_INTERVAL);
next_extra_network_peer = now + rng.rand_exp_duration(EXTRA_NETWORK_PEER_INTERVAL);
} else {
// skip to next iteration of while loop
continue;

View file

@ -936,7 +936,7 @@ private:
* accurately determine when we received the transaction (and potentially
* determine the transaction's origin). */
std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now,
std::chrono::seconds average_interval);
std::chrono::seconds average_interval) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
// All of the following cache a recent block, and are protected by m_most_recent_block_mutex
@ -1244,7 +1244,7 @@ std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::micros
// If this function were called from multiple threads simultaneously
// it would possible that both update the next send variable, and return a different result to their caller.
// This is not possible in practice as only the net processing thread invokes this function.
m_next_inv_to_inbounds = now + FastRandomContext().rand_exp_duration(average_interval);
m_next_inv_to_inbounds = now + m_rng.rand_exp_duration(average_interval);
}
return m_next_inv_to_inbounds;
}
@ -5654,13 +5654,13 @@ void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::micros
CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()};
PushAddress(peer, local_addr);
}
peer.m_next_local_addr_send = current_time + FastRandomContext().rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
peer.m_next_local_addr_send = current_time + m_rng.rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
}
// We sent an `addr` message to this peer recently. Nothing more to do.
if (current_time <= peer.m_next_addr_send) return;
peer.m_next_addr_send = current_time + FastRandomContext().rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL);
peer.m_next_addr_send = current_time + m_rng.rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL);
if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) {
// Should be impossible since we always check size before adding to
@ -5747,13 +5747,13 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::mi
MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend);
peer.m_fee_filter_sent = filterToSend;
}
peer.m_next_send_feefilter = current_time + FastRandomContext().rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL);
peer.m_next_send_feefilter = current_time + m_rng.rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL);
}
// If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
// until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter &&
(currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
peer.m_next_send_feefilter = current_time + FastRandomContext().randrange<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
peer.m_next_send_feefilter = current_time + m_rng.randrange<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
}
}
@ -6059,7 +6059,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (pto->IsInboundConn()) {
tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
} else {
tx_relay->m_next_inv_send_time = current_time + FastRandomContext().rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
}
}