2010-08-29 12:58:15 -04:00
|
|
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
2022-12-24 20:49:50 -03:00
|
|
|
// Copyright (c) 2009-2022 The Bitcoin Core developers
|
2014-12-13 01:09:33 -03:00
|
|
|
// Distributed under the MIT software license, see the accompanying
|
2012-05-18 10:02:28 -04:00
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
2013-04-13 02:13:08 -03:00
|
|
|
|
2011-05-15 03:11:04 -04:00
|
|
|
#ifndef BITCOIN_NET_H
|
|
|
|
#define BITCOIN_NET_H
|
|
|
|
|
2023-07-27 15:10:34 -04:00
|
|
|
#include <bip324.h>
|
2020-06-08 22:26:22 -04:00
|
|
|
#include <chainparams.h>
|
2021-09-29 12:09:55 -03:00
|
|
|
#include <common/bloom.h>
|
2022-06-28 08:27:57 -04:00
|
|
|
#include <compat/compat.h>
|
2021-09-10 23:29:00 -03:00
|
|
|
#include <consensus/amount.h>
|
2018-08-24 18:48:23 -03:00
|
|
|
#include <crypto/siphash.h>
|
2017-11-09 21:57:53 -03:00
|
|
|
#include <hash.h>
|
2020-12-04 14:03:05 -03:00
|
|
|
#include <i2p.h>
|
2023-09-06 10:55:14 -03:00
|
|
|
#include <kernel/messagestartchars.h>
|
2019-06-20 05:37:51 -04:00
|
|
|
#include <net_permissions.h>
|
2020-06-29 14:15:06 -04:00
|
|
|
#include <netaddress.h>
|
2019-10-16 14:37:19 -03:00
|
|
|
#include <netbase.h>
|
2021-08-31 13:40:18 -04:00
|
|
|
#include <netgroup.h>
|
2023-09-06 10:55:14 -03:00
|
|
|
#include <node/connection_types.h>
|
2023-11-28 13:56:26 -03:00
|
|
|
#include <node/protocol_version.h>
|
2017-11-09 21:57:53 -03:00
|
|
|
#include <policy/feerate.h>
|
|
|
|
#include <protocol.h>
|
|
|
|
#include <random.h>
|
2020-07-13 13:20:47 -04:00
|
|
|
#include <span.h>
|
2017-11-09 21:57:53 -03:00
|
|
|
#include <streams.h>
|
|
|
|
#include <sync.h>
|
2020-04-11 11:47:17 -04:00
|
|
|
#include <uint256.h>
|
2020-10-12 17:57:37 -03:00
|
|
|
#include <util/check.h>
|
2021-04-23 06:15:15 -04:00
|
|
|
#include <util/sock.h>
|
2022-10-11 04:33:22 -03:00
|
|
|
#include <util/threadinterrupt.h>
|
2013-04-13 02:13:08 -03:00
|
|
|
|
2016-05-20 12:19:26 -04:00
|
|
|
#include <atomic>
|
2020-10-12 17:57:37 -03:00
|
|
|
#include <condition_variable>
|
2019-12-29 18:04:02 -03:00
|
|
|
#include <cstdint>
|
2011-05-15 03:11:04 -04:00
|
|
|
#include <deque>
|
2021-07-22 12:23:21 -04:00
|
|
|
#include <functional>
|
2022-04-01 07:23:01 -03:00
|
|
|
#include <list>
|
2020-05-16 21:05:44 -04:00
|
|
|
#include <map>
|
2016-04-16 15:47:18 -03:00
|
|
|
#include <memory>
|
2021-03-15 00:59:05 -03:00
|
|
|
#include <optional>
|
2023-01-06 07:23:46 -03:00
|
|
|
#include <queue>
|
2020-10-12 17:57:37 -03:00
|
|
|
#include <thread>
|
2022-11-30 17:55:22 -03:00
|
|
|
#include <unordered_set>
|
2020-09-18 09:47:08 -03:00
|
|
|
#include <vector>
|
2011-05-15 03:11:04 -04:00
|
|
|
|
2021-11-30 10:49:43 -03:00
|
|
|
class AddrMan;
|
2017-10-05 17:40:43 -03:00
|
|
|
class BanMan;
|
2023-09-12 08:42:36 -03:00
|
|
|
class CChainParams;
|
2021-11-30 10:49:43 -03:00
|
|
|
class CNode;
|
|
|
|
class CScheduler;
|
2020-04-11 11:47:17 -04:00
|
|
|
struct bilingual_str;
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2019-08-16 04:45:13 -04:00
|
|
|
/** Default for -whitelistrelay. */
|
|
|
|
static const bool DEFAULT_WHITELISTRELAY = true;
|
|
|
|
/** Default for -whitelistforcerelay. */
|
|
|
|
static const bool DEFAULT_WHITELISTFORCERELAY = false;
|
|
|
|
|
2013-10-14 19:34:20 -03:00
|
|
|
/** Time after which to disconnect, after waiting for a ping response (or inactivity). */
|
2020-07-10 12:19:11 -04:00
|
|
|
static constexpr std::chrono::minutes TIMEOUT_INTERVAL{20};
|
2020-09-30 00:19:57 -03:00
|
|
|
/** Run the feeler connection loop once every 2 minutes. **/
|
|
|
|
static constexpr auto FEELER_INTERVAL = 2min;
|
2020-09-01 17:05:47 -04:00
|
|
|
/** Run the extra block-relay-only connection loop once every 5 minutes. **/
|
2020-09-30 00:19:57 -03:00
|
|
|
static constexpr auto EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL = 5min;
|
2016-01-03 14:54:50 -03:00
|
|
|
/** Maximum length of incoming protocol messages (no message over 4 MB is currently acceptable). */
|
|
|
|
static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH = 4 * 1000 * 1000;
|
2019-03-23 12:34:40 -03:00
|
|
|
/** Maximum length of the user agent string in `version` message */
|
2015-07-31 13:05:42 -03:00
|
|
|
static const unsigned int MAX_SUBVERSION_LENGTH = 256;
|
2019-03-09 14:55:06 -03:00
|
|
|
/** Maximum number of automatic outgoing nodes over which we'll relay everything (blocks, tx, addrs, etc) */
|
|
|
|
static const int MAX_OUTBOUND_FULL_RELAY_CONNECTIONS = 8;
|
2016-12-11 01:39:26 -03:00
|
|
|
/** Maximum number of addnode outgoing nodes */
|
|
|
|
static const int MAX_ADDNODE_CONNECTIONS = 8;
|
2019-03-09 14:55:06 -03:00
|
|
|
/** Maximum number of block-relay-only outgoing connections */
|
2020-06-09 01:04:07 -04:00
|
|
|
static const int MAX_BLOCK_RELAY_ONLY_CONNECTIONS = 2;
|
2019-05-08 04:34:01 -04:00
|
|
|
/** Maximum number of feeler connections */
|
|
|
|
static const int MAX_FEELER_CONNECTIONS = 1;
|
2014-05-29 07:02:22 -04:00
|
|
|
/** -listen default */
|
|
|
|
static const bool DEFAULT_LISTEN = true;
|
2015-08-01 14:41:21 -03:00
|
|
|
/** The maximum number of peer connections to maintain. */
|
|
|
|
static const unsigned int DEFAULT_MAX_PEER_CONNECTIONS = 125;
|
2015-11-05 20:05:06 -03:00
|
|
|
/** The default for -maxuploadtarget. 0 = Unlimited */
|
2021-11-17 07:47:30 -03:00
|
|
|
static const std::string DEFAULT_MAX_UPLOAD_TARGET{"0M"};
|
2015-11-14 09:47:53 -03:00
|
|
|
/** Default for blocks only*/
|
|
|
|
static const bool DEFAULT_BLOCKSONLY = false;
|
2018-11-15 20:30:26 -03:00
|
|
|
/** -peertimeout default */
|
|
|
|
static const int64_t DEFAULT_PEER_CONNECT_TIMEOUT = 60;
|
2020-07-13 13:20:47 -04:00
|
|
|
/** Number of file descriptors required for message capture **/
|
|
|
|
static const int NUM_FDS_MESSAGE_CAPTURE = 1;
|
2023-05-05 05:14:51 -04:00
|
|
|
/** Interval for ASMap Health Check **/
|
|
|
|
static constexpr std::chrono::hours ASMAP_HEALTH_CHECK_INTERVAL{24};
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2021-05-24 15:57:50 -04:00
|
|
|
static constexpr bool DEFAULT_FORCEDNSSEED{false};
|
|
|
|
static constexpr bool DEFAULT_DNSSEED{true};
|
|
|
|
static constexpr bool DEFAULT_FIXEDSEEDS{true};
|
2015-06-27 16:21:41 -03:00
|
|
|
static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000;
|
|
|
|
static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000;
|
|
|
|
|
2023-08-22 17:50:59 -04:00
|
|
|
static constexpr bool DEFAULT_V2_TRANSPORT{false};
|
|
|
|
|
2017-04-10 16:00:23 -03:00
|
|
|
typedef int64_t NodeId;
|
2016-05-06 15:50:24 -03:00
|
|
|
|
2021-12-28 18:26:20 -03:00
|
|
|
struct AddedNodeParams {
|
|
|
|
std::string m_added_node;
|
|
|
|
bool m_use_v2transport;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct AddedNodeInfo {
|
|
|
|
AddedNodeParams m_params;
|
2016-04-16 19:12:58 -03:00
|
|
|
CService resolvedAddress;
|
|
|
|
bool fConnected;
|
|
|
|
bool fInbound;
|
|
|
|
};
|
|
|
|
|
2016-04-16 19:30:03 -03:00
|
|
|
class CNodeStats;
|
2016-05-25 21:26:46 -04:00
|
|
|
class CClientUIInterface;
|
|
|
|
|
2022-03-24 07:44:08 -03:00
|
|
|
struct CSerializedNetMsg {
|
2016-11-10 19:05:23 -03:00
|
|
|
CSerializedNetMsg() = default;
|
|
|
|
CSerializedNetMsg(CSerializedNetMsg&&) = default;
|
|
|
|
CSerializedNetMsg& operator=(CSerializedNetMsg&&) = default;
|
2022-03-24 07:44:08 -03:00
|
|
|
// No implicit copying, only moves.
|
2016-11-10 19:05:23 -03:00
|
|
|
CSerializedNetMsg(const CSerializedNetMsg& msg) = delete;
|
|
|
|
CSerializedNetMsg& operator=(const CSerializedNetMsg&) = delete;
|
|
|
|
|
2022-03-24 07:44:08 -03:00
|
|
|
CSerializedNetMsg Copy() const
|
|
|
|
{
|
|
|
|
CSerializedNetMsg copy;
|
|
|
|
copy.data = data;
|
|
|
|
copy.m_type = m_type;
|
|
|
|
return copy;
|
|
|
|
}
|
|
|
|
|
2016-11-10 19:05:23 -03:00
|
|
|
std::vector<unsigned char> data;
|
2020-05-10 13:48:11 -04:00
|
|
|
std::string m_type;
|
2023-07-24 13:23:39 -04:00
|
|
|
|
|
|
|
/** Compute total memory usage of this object (own memory + any dynamic memory). */
|
|
|
|
size_t GetMemoryUsage() const noexcept;
|
2016-11-10 19:05:23 -03:00
|
|
|
};
|
|
|
|
|
2020-10-19 10:32:54 -03:00
|
|
|
/**
|
|
|
|
* Look up IP addresses from all interfaces on the machine and add them to the
|
|
|
|
* list of local addresses to self-advertise.
|
|
|
|
* The loopback interface is skipped and only the first address from each
|
|
|
|
* interface is used.
|
|
|
|
*/
|
2021-01-06 03:39:04 -03:00
|
|
|
void Discover();
|
2020-10-19 10:32:54 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
uint16_t GetListenPort();
|
2016-06-19 21:42:15 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
enum
|
|
|
|
{
|
|
|
|
LOCAL_NONE, // unknown
|
|
|
|
LOCAL_IF, // address a local interface listens on
|
|
|
|
LOCAL_BIND, // address explicit bound to
|
2020-02-22 20:34:13 -03:00
|
|
|
LOCAL_MAPPED, // address reported by UPnP or NAT-PMP
|
2021-01-06 03:39:04 -03:00
|
|
|
LOCAL_MANUAL, // address explicitly specified (-externalip=)
|
2016-04-16 20:13:12 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
LOCAL_MAX
|
|
|
|
};
|
2016-04-16 18:43:11 -03:00
|
|
|
|
2022-07-04 12:02:28 -04:00
|
|
|
/** Returns a local address that we should advertise to this peer. */
|
|
|
|
std::optional<CService> GetLocalAddrForPeer(CNode& node);
|
2017-10-23 14:36:15 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
bool AddLocal(const CService& addr, int nScore = LOCAL_NONE);
|
|
|
|
bool AddLocal(const CNetAddr& addr, int nScore = LOCAL_NONE);
|
|
|
|
void RemoveLocal(const CService& addr);
|
|
|
|
bool SeenLocal(const CService& addr);
|
|
|
|
bool IsLocal(const CService& addr);
|
2023-04-11 15:09:31 -04:00
|
|
|
CService GetLocalAddress(const CNode& peer);
|
2016-04-16 19:12:58 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
extern bool fDiscover;
|
|
|
|
extern bool fListen;
|
2016-04-16 19:30:03 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/** Subversion as sent to the P2P network in `version` messages */
|
|
|
|
extern std::string strSubVersion;
|
2016-04-19 01:04:58 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
struct LocalServiceInfo {
|
|
|
|
int nScore;
|
2021-03-01 17:35:28 -03:00
|
|
|
uint16_t nPort;
|
2021-01-06 03:39:04 -03:00
|
|
|
};
|
2016-04-18 22:44:42 -03:00
|
|
|
|
2022-04-20 03:10:13 -04:00
|
|
|
extern GlobalMutex g_maplocalhost_mutex;
|
2022-01-19 07:04:52 -03:00
|
|
|
extern std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(g_maplocalhost_mutex);
|
2016-04-18 22:44:42 -03:00
|
|
|
|
2022-04-07 07:43:52 -04:00
|
|
|
extern const std::string NET_MESSAGE_TYPE_OTHER;
|
2022-01-29 10:28:07 -03:00
|
|
|
using mapMsgTypeSize = std::map</* message type */ std::string, /* total bytes */ uint64_t>;
|
2016-04-18 22:44:42 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
class CNodeStats
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
NodeId nodeid;
|
2020-07-10 12:19:11 -04:00
|
|
|
std::chrono::seconds m_last_send;
|
|
|
|
std::chrono::seconds m_last_recv;
|
2021-12-13 08:32:28 -03:00
|
|
|
std::chrono::seconds m_last_tx_time;
|
|
|
|
std::chrono::seconds m_last_block_time;
|
|
|
|
std::chrono::seconds m_connected;
|
2021-01-06 03:39:04 -03:00
|
|
|
int64_t nTimeOffset;
|
2021-08-26 04:39:10 -04:00
|
|
|
std::string m_addr_name;
|
2021-01-06 03:39:04 -03:00
|
|
|
int nVersion;
|
|
|
|
std::string cleanSubVer;
|
|
|
|
bool fInbound;
|
2023-05-16 15:36:38 -04:00
|
|
|
// We requested high bandwidth connection to peer
|
2021-01-06 03:39:04 -03:00
|
|
|
bool m_bip152_highbandwidth_to;
|
2023-05-16 15:36:38 -04:00
|
|
|
// Peer requested high bandwidth connection
|
2021-01-06 03:39:04 -03:00
|
|
|
bool m_bip152_highbandwidth_from;
|
|
|
|
int m_starting_height;
|
|
|
|
uint64_t nSendBytes;
|
2022-04-07 07:43:52 -04:00
|
|
|
mapMsgTypeSize mapSendBytesPerMsgType;
|
2021-01-06 03:39:04 -03:00
|
|
|
uint64_t nRecvBytes;
|
2022-04-07 07:43:52 -04:00
|
|
|
mapMsgTypeSize mapRecvBytesPerMsgType;
|
2022-09-01 04:50:26 -04:00
|
|
|
NetPermissionFlags m_permission_flags;
|
2020-09-29 23:11:53 -03:00
|
|
|
std::chrono::microseconds m_last_ping_time;
|
|
|
|
std::chrono::microseconds m_min_ping_time;
|
2021-01-06 03:39:04 -03:00
|
|
|
// Our address, as reported by the peer
|
|
|
|
std::string addrLocal;
|
|
|
|
// Address of this peer
|
|
|
|
CAddress addr;
|
|
|
|
// Bind address of our side of the connection
|
|
|
|
CAddress addrBind;
|
|
|
|
// Network the peer connected through
|
|
|
|
Network m_network;
|
|
|
|
uint32_t m_mapped_as;
|
2021-01-02 06:44:03 -03:00
|
|
|
ConnectionType m_conn_type;
|
2023-07-30 23:26:04 -04:00
|
|
|
/** Transport protocol type. */
|
|
|
|
TransportProtocolType m_transport_type;
|
|
|
|
/** BIP324 session id string in hex, if any. */
|
|
|
|
std::string m_session_id;
|
2021-01-06 03:39:04 -03:00
|
|
|
};
|
2016-04-18 22:44:42 -03:00
|
|
|
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/** Transport protocol agnostic message container.
|
|
|
|
* Ideally it should only contain receive time, payload,
|
2022-01-15 15:59:19 -03:00
|
|
|
* type and size.
|
2021-01-06 03:39:04 -03:00
|
|
|
*/
|
2023-11-16 08:53:31 -03:00
|
|
|
class CNetMessage
|
|
|
|
{
|
2021-01-06 03:39:04 -03:00
|
|
|
public:
|
2023-11-16 08:53:31 -03:00
|
|
|
DataStream m_recv; //!< received message data
|
2021-01-06 03:39:04 -03:00
|
|
|
std::chrono::microseconds m_time{0}; //!< time of message receipt
|
|
|
|
uint32_t m_message_size{0}; //!< size of the payload
|
|
|
|
uint32_t m_raw_message_size{0}; //!< used wire size of the message (including header/checksum)
|
2022-01-15 15:59:19 -03:00
|
|
|
std::string m_type;
|
2017-01-14 21:00:00 -03:00
|
|
|
|
2023-11-16 08:53:31 -03:00
|
|
|
explicit CNetMessage(DataStream&& recv_in) : m_recv(std::move(recv_in)) {}
|
2023-03-24 09:13:07 -03:00
|
|
|
// Only one CNetMessage object will exist for the same message on either
|
|
|
|
// the receive or processing queue. For performance reasons we therefore
|
|
|
|
// delete the copy constructor and assignment operator to avoid the
|
|
|
|
// possibility of copying CNetMessage objects.
|
|
|
|
CNetMessage(CNetMessage&&) = default;
|
|
|
|
CNetMessage(const CNetMessage&) = delete;
|
|
|
|
CNetMessage& operator=(CNetMessage&&) = default;
|
|
|
|
CNetMessage& operator=(const CNetMessage&) = delete;
|
2021-01-06 03:39:04 -03:00
|
|
|
};
|
2018-05-21 15:02:40 -04:00
|
|
|
|
2023-07-05 16:22:52 -04:00
|
|
|
/** The Transport converts one connection's sent messages to wire bytes, and received bytes back. */
|
|
|
|
class Transport {
|
2021-01-06 03:39:04 -03:00
|
|
|
public:
|
2023-07-05 16:22:52 -04:00
|
|
|
virtual ~Transport() {}
|
|
|
|
|
2023-07-30 23:26:04 -04:00
|
|
|
struct Info
|
|
|
|
{
|
|
|
|
TransportProtocolType transport_type;
|
|
|
|
std::optional<uint256> session_id;
|
|
|
|
};
|
|
|
|
|
|
|
|
/** Retrieve information about this transport. */
|
|
|
|
virtual Info GetInfo() const noexcept = 0;
|
|
|
|
|
2023-07-05 16:22:52 -04:00
|
|
|
// 1. Receiver side functions, for decoding bytes received on the wire into transport protocol
|
2023-07-26 13:19:31 -04:00
|
|
|
// agnostic CNetMessage (message type & payload) objects.
|
2023-07-05 16:22:52 -04:00
|
|
|
|
2023-08-14 16:37:05 -04:00
|
|
|
/** Returns true if the current message is complete (so GetReceivedMessage can be called). */
|
|
|
|
virtual bool ReceivedMessageComplete() const = 0;
|
2023-07-29 13:59:35 -04:00
|
|
|
|
|
|
|
/** Feed wire bytes to the transport.
|
|
|
|
*
|
|
|
|
* @return false if some bytes were invalid, in which case the transport can't be used anymore.
|
|
|
|
*
|
|
|
|
* Consumed bytes are chopped off the front of msg_bytes.
|
|
|
|
*/
|
|
|
|
virtual bool ReceivedBytes(Span<const uint8_t>& msg_bytes) = 0;
|
|
|
|
|
|
|
|
/** Retrieve a completed message from transport.
|
|
|
|
*
|
|
|
|
* This can only be called when ReceivedMessageComplete() is true.
|
|
|
|
*
|
|
|
|
* If reject_message=true is returned the message itself is invalid, but (other than false
|
|
|
|
* returned by ReceivedBytes) the transport is not in an inconsistent state.
|
|
|
|
*/
|
2023-08-14 16:37:05 -04:00
|
|
|
virtual CNetMessage GetReceivedMessage(std::chrono::microseconds time, bool& reject_message) = 0;
|
2023-07-05 16:22:52 -04:00
|
|
|
|
2023-07-21 16:31:59 -04:00
|
|
|
// 2. Sending side functions, for converting messages into bytes to be sent over the wire.
|
2023-07-05 16:22:52 -04:00
|
|
|
|
2023-07-21 16:31:59 -04:00
|
|
|
/** Set the next message to send.
|
|
|
|
*
|
|
|
|
* If no message can currently be set (perhaps because the previous one is not yet done being
|
|
|
|
* sent), returns false, and msg will be unmodified. Otherwise msg is enqueued (and
|
|
|
|
* possibly moved-from) and true is returned.
|
|
|
|
*/
|
|
|
|
virtual bool SetMessageToSend(CSerializedNetMsg& msg) noexcept = 0;
|
|
|
|
|
|
|
|
/** Return type for GetBytesToSend, consisting of:
|
|
|
|
* - Span<const uint8_t> to_send: span of bytes to be sent over the wire (possibly empty).
|
|
|
|
* - bool more: whether there will be more bytes to be sent after the ones in to_send are
|
|
|
|
* all sent (as signaled by MarkBytesSent()).
|
2023-07-27 15:10:34 -04:00
|
|
|
* - const std::string& m_type: message type on behalf of which this is being sent
|
|
|
|
* ("" for bytes that are not on behalf of any message).
|
2023-07-21 16:31:59 -04:00
|
|
|
*/
|
|
|
|
using BytesToSend = std::tuple<
|
|
|
|
Span<const uint8_t> /*to_send*/,
|
|
|
|
bool /*more*/,
|
|
|
|
const std::string& /*m_type*/
|
|
|
|
>;
|
|
|
|
|
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
|
|
|
/** Get bytes to send on the wire, if any, along with other information about it.
|
2023-07-21 16:31:59 -04:00
|
|
|
*
|
|
|
|
* As a const function, it does not modify the transport's observable state, and is thus safe
|
|
|
|
* to be called multiple times.
|
|
|
|
*
|
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
|
|
|
* @param[in] have_next_message If true, the "more" return value reports whether more will
|
|
|
|
* be sendable after a SetMessageToSend call. It is set by the caller when they know
|
|
|
|
* they have another message ready to send, and only care about what happens
|
|
|
|
* after that. The have_next_message argument only affects this "more" return value
|
|
|
|
* and nothing else.
|
2023-07-21 16:31:59 -04:00
|
|
|
*
|
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
|
|
|
* Effectively, there are three possible outcomes about whether there are more bytes
|
|
|
|
* to send:
|
|
|
|
* - Yes: the transport itself has more bytes to send later. For example, for
|
|
|
|
* V1Transport this happens during the sending of the header of a
|
|
|
|
* message, when there is a non-empty payload that follows.
|
|
|
|
* - No: the transport itself has no more bytes to send, but will have bytes to
|
|
|
|
* send if handed a message through SetMessageToSend. In V1Transport this
|
|
|
|
* happens when sending the payload of a message.
|
|
|
|
* - Blocked: the transport itself has no more bytes to send, and is also incapable
|
|
|
|
* of sending anything more at all now, if it were handed another
|
2023-07-27 15:10:34 -04:00
|
|
|
* message to send. This occurs in V2Transport before the handshake is
|
|
|
|
* complete, as the encryption ciphers are not set up for sending
|
|
|
|
* messages before that point.
|
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
|
|
|
*
|
|
|
|
* The boolean 'more' is true for Yes, false for Blocked, and have_next_message
|
|
|
|
* controls what is returned for No.
|
|
|
|
*
|
|
|
|
* @return a BytesToSend object. The to_send member returned acts as a stream which is only
|
|
|
|
* ever appended to. This means that with the exception of MarkBytesSent (which pops
|
|
|
|
* bytes off the front of later to_sends), operations on the transport can only append
|
|
|
|
* to what is being returned. Also note that m_type and to_send refer to data that is
|
|
|
|
* internal to the transport, and calling any non-const function on this object may
|
|
|
|
* invalidate them.
|
2023-07-21 16:31:59 -04:00
|
|
|
*/
|
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
|
|
|
virtual BytesToSend GetBytesToSend(bool have_next_message) const noexcept = 0;
|
2023-07-21 16:31:59 -04:00
|
|
|
|
|
|
|
/** Report how many bytes returned by the last GetBytesToSend() have been sent.
|
|
|
|
*
|
|
|
|
* bytes_sent cannot exceed to_send.size() of the last GetBytesToSend() result.
|
|
|
|
*
|
|
|
|
* If bytes_sent=0, this call has no effect.
|
|
|
|
*/
|
|
|
|
virtual void MarkBytesSent(size_t bytes_sent) noexcept = 0;
|
2023-07-24 13:23:39 -04:00
|
|
|
|
|
|
|
/** Return the memory usage of this transport attributable to buffered data to send. */
|
|
|
|
virtual size_t GetSendMemoryUsage() const noexcept = 0;
|
2023-08-22 20:42:24 -04:00
|
|
|
|
|
|
|
// 3. Miscellaneous functions.
|
|
|
|
|
|
|
|
/** Whether upon disconnections, a reconnect with V1 is warranted. */
|
|
|
|
virtual bool ShouldReconnectV1() const noexcept = 0;
|
2021-01-06 03:39:04 -03:00
|
|
|
};
|
2019-12-24 15:18:44 -03:00
|
|
|
|
2023-07-05 16:22:52 -04:00
|
|
|
class V1Transport final : public Transport
|
2021-01-06 03:39:04 -03:00
|
|
|
{
|
2016-04-16 15:47:18 -03:00
|
|
|
private:
|
2023-11-11 09:04:18 -03:00
|
|
|
const MessageStartChars m_magic_bytes;
|
2021-01-06 03:39:04 -03:00
|
|
|
const NodeId m_node_id; // Only for logging
|
2023-07-26 13:19:31 -04:00
|
|
|
mutable Mutex m_recv_mutex; //!< Lock for receive state
|
|
|
|
mutable CHash256 hasher GUARDED_BY(m_recv_mutex);
|
|
|
|
mutable uint256 data_hash GUARDED_BY(m_recv_mutex);
|
|
|
|
bool in_data GUARDED_BY(m_recv_mutex); // parsing header (false) or data (true)
|
2023-11-16 08:53:31 -03:00
|
|
|
DataStream hdrbuf GUARDED_BY(m_recv_mutex){}; // partially received header
|
2023-07-26 13:19:31 -04:00
|
|
|
CMessageHeader hdr GUARDED_BY(m_recv_mutex); // complete header
|
2023-11-16 08:53:31 -03:00
|
|
|
DataStream vRecv GUARDED_BY(m_recv_mutex){}; // received message data
|
2023-07-26 13:19:31 -04:00
|
|
|
unsigned int nHdrPos GUARDED_BY(m_recv_mutex);
|
|
|
|
unsigned int nDataPos GUARDED_BY(m_recv_mutex);
|
|
|
|
|
|
|
|
const uint256& GetMessageHash() const EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex);
|
|
|
|
int readHeader(Span<const uint8_t> msg_bytes) EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex);
|
|
|
|
int readData(Span<const uint8_t> msg_bytes) EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex);
|
|
|
|
|
|
|
|
void Reset() EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex) {
|
|
|
|
AssertLockHeld(m_recv_mutex);
|
2021-01-06 03:39:04 -03:00
|
|
|
vRecv.clear();
|
|
|
|
hdrbuf.clear();
|
|
|
|
hdrbuf.resize(24);
|
|
|
|
in_data = false;
|
|
|
|
nHdrPos = 0;
|
|
|
|
nDataPos = 0;
|
|
|
|
data_hash.SetNull();
|
|
|
|
hasher.Reset();
|
|
|
|
}
|
2016-04-16 16:59:10 -03:00
|
|
|
|
2023-07-26 13:19:31 -04:00
|
|
|
bool CompleteInternal() const noexcept EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex)
|
|
|
|
{
|
|
|
|
AssertLockHeld(m_recv_mutex);
|
|
|
|
if (!in_data) return false;
|
|
|
|
return hdr.nMessageSize == nDataPos;
|
|
|
|
}
|
|
|
|
|
2023-07-21 16:31:59 -04:00
|
|
|
/** Lock for sending state. */
|
|
|
|
mutable Mutex m_send_mutex;
|
|
|
|
/** The header of the message currently being sent. */
|
|
|
|
std::vector<uint8_t> m_header_to_send GUARDED_BY(m_send_mutex);
|
|
|
|
/** The data of the message currently being sent. */
|
|
|
|
CSerializedNetMsg m_message_to_send GUARDED_BY(m_send_mutex);
|
|
|
|
/** Whether we're currently sending header bytes or message bytes. */
|
|
|
|
bool m_sending_header GUARDED_BY(m_send_mutex) {false};
|
|
|
|
/** How many bytes have been sent so far (from m_header_to_send, or from m_message_to_send.data). */
|
|
|
|
size_t m_bytes_sent GUARDED_BY(m_send_mutex) {0};
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
public:
|
2023-11-16 08:53:31 -03:00
|
|
|
explicit V1Transport(const NodeId node_id) noexcept;
|
2016-04-18 22:44:42 -03:00
|
|
|
|
2023-08-14 16:37:05 -04:00
|
|
|
bool ReceivedMessageComplete() const override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex)
|
2021-01-06 03:39:04 -03:00
|
|
|
{
|
2023-07-26 13:19:31 -04:00
|
|
|
AssertLockNotHeld(m_recv_mutex);
|
|
|
|
return WITH_LOCK(m_recv_mutex, return CompleteInternal());
|
2021-01-06 03:39:04 -03:00
|
|
|
}
|
2023-07-26 13:19:31 -04:00
|
|
|
|
2023-07-30 23:26:04 -04:00
|
|
|
Info GetInfo() const noexcept override;
|
|
|
|
|
2023-07-29 13:59:35 -04:00
|
|
|
bool ReceivedBytes(Span<const uint8_t>& msg_bytes) override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex)
|
2021-01-06 03:39:04 -03:00
|
|
|
{
|
2023-07-26 13:19:31 -04:00
|
|
|
AssertLockNotHeld(m_recv_mutex);
|
|
|
|
LOCK(m_recv_mutex);
|
2021-01-06 03:39:04 -03:00
|
|
|
int ret = in_data ? readData(msg_bytes) : readHeader(msg_bytes);
|
|
|
|
if (ret < 0) {
|
|
|
|
Reset();
|
|
|
|
} else {
|
|
|
|
msg_bytes = msg_bytes.subspan(ret);
|
|
|
|
}
|
2023-07-29 13:59:35 -04:00
|
|
|
return ret >= 0;
|
2021-01-06 03:39:04 -03:00
|
|
|
}
|
2023-08-14 16:37:05 -04:00
|
|
|
|
|
|
|
CNetMessage GetReceivedMessage(std::chrono::microseconds time, bool& reject_message) override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex);
|
2020-09-12 12:03:06 -03:00
|
|
|
|
2023-07-21 16:31:59 -04:00
|
|
|
bool SetMessageToSend(CSerializedNetMsg& msg) noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex);
|
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
|
|
|
BytesToSend GetBytesToSend(bool have_next_message) const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex);
|
2023-07-21 16:31:59 -04:00
|
|
|
void MarkBytesSent(size_t bytes_sent) noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex);
|
2023-07-24 13:23:39 -04:00
|
|
|
size_t GetSendMemoryUsage() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex);
|
2023-08-22 20:42:24 -04:00
|
|
|
bool ShouldReconnectV1() const noexcept override { return false; }
|
2021-01-06 03:39:04 -03:00
|
|
|
};
|
2016-04-18 22:44:42 -03:00
|
|
|
|
2023-07-27 15:10:34 -04:00
|
|
|
class V2Transport final : public Transport
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
/** Contents of the version packet to send. BIP324 stipulates that senders should leave this
|
|
|
|
* empty, and receivers should ignore it. Future extensions can change what is sent as long as
|
|
|
|
* an empty version packet contents is interpreted as no extensions supported. */
|
|
|
|
static constexpr std::array<std::byte, 0> VERSION_CONTENTS = {};
|
|
|
|
|
2023-08-29 22:37:18 -04:00
|
|
|
/** The length of the V1 prefix to match bytes initially received by responders with to
|
|
|
|
* determine if their peer is speaking V1 or V2. */
|
2023-10-03 15:41:55 -03:00
|
|
|
static constexpr size_t V1_PREFIX_LEN = 16;
|
2023-08-29 22:37:18 -04:00
|
|
|
|
2023-07-27 15:10:34 -04:00
|
|
|
// The sender side and receiver side of V2Transport are state machines that are transitioned
|
|
|
|
// through, based on what has been received. The receive state corresponds to the contents of,
|
|
|
|
// and bytes received to, the receive buffer. The send state controls what can be appended to
|
2023-08-29 22:37:18 -04:00
|
|
|
// the send buffer and what can be sent from it.
|
2023-07-27 15:10:34 -04:00
|
|
|
|
|
|
|
/** State type that defines the current contents of the receive buffer and/or how the next
|
|
|
|
* received bytes added to it will be interpreted.
|
|
|
|
*
|
|
|
|
* Diagram:
|
|
|
|
*
|
2023-08-29 22:37:18 -04:00
|
|
|
* start(responder)
|
|
|
|
* |
|
2023-09-23 20:50:01 -03:00
|
|
|
* | start(initiator) /---------\
|
|
|
|
* | | | |
|
|
|
|
* v v v |
|
|
|
|
* KEY_MAYBE_V1 -> KEY -> GARB_GARBTERM -> VERSION -> APP -> APP_READY
|
2023-08-29 22:37:18 -04:00
|
|
|
* |
|
|
|
|
* \-------> V1
|
2023-07-27 15:10:34 -04:00
|
|
|
*/
|
|
|
|
enum class RecvState : uint8_t {
|
2023-08-29 22:37:18 -04:00
|
|
|
/** (Responder only) either v2 public key or v1 header.
|
|
|
|
*
|
|
|
|
* This is the initial state for responders, before data has been received to distinguish
|
|
|
|
* v1 from v2 connections. When that happens, the state becomes either KEY (for v2) or V1
|
|
|
|
* (for v1). */
|
|
|
|
KEY_MAYBE_V1,
|
|
|
|
|
2023-07-27 15:10:34 -04:00
|
|
|
/** Public key.
|
|
|
|
*
|
2023-08-29 22:37:18 -04:00
|
|
|
* This is the initial state for initiators, during which the other side's public key is
|
2023-07-27 15:10:34 -04:00
|
|
|
* received. When that information arrives, the ciphers get initialized and the state
|
|
|
|
* becomes GARB_GARBTERM. */
|
|
|
|
KEY,
|
|
|
|
|
|
|
|
/** Garbage and garbage terminator.
|
|
|
|
*
|
|
|
|
* Whenever a byte is received, the last 16 bytes are compared with the expected garbage
|
2023-09-23 20:50:01 -03:00
|
|
|
* terminator. When that happens, the state becomes VERSION. If no matching terminator is
|
2023-07-27 15:10:34 -04:00
|
|
|
* received in 4111 bytes (4095 for the maximum garbage length, and 16 bytes for the
|
|
|
|
* terminator), the connection aborts. */
|
|
|
|
GARB_GARBTERM,
|
|
|
|
|
|
|
|
/** Version packet.
|
|
|
|
*
|
2023-09-23 20:50:01 -03:00
|
|
|
* A packet is received, and decrypted/verified. If that fails, the connection aborts. The
|
|
|
|
* first received packet in this state (whether it's a decoy or not) is expected to
|
|
|
|
* authenticate the garbage received during the GARB_GARBTERM state as associated
|
|
|
|
* authenticated data (AAD). The first non-decoy packet in this state is interpreted as
|
|
|
|
* version negotiation (currently, that means ignoring the contents, but it can be used for
|
|
|
|
* negotiating future extensions), and afterwards the state becomes APP. */
|
2023-07-27 15:10:34 -04:00
|
|
|
VERSION,
|
|
|
|
|
|
|
|
/** Application packet.
|
|
|
|
*
|
|
|
|
* A packet is received, and decrypted/verified. If that succeeds, the state becomes
|
|
|
|
* APP_READY and the decrypted contents is kept in m_recv_decode_buffer until it is
|
|
|
|
* retrieved as a message by GetMessage(). */
|
|
|
|
APP,
|
|
|
|
|
|
|
|
/** Nothing (an application packet is available for GetMessage()).
|
|
|
|
*
|
|
|
|
* Nothing can be received in this state. When the message is retrieved by GetMessage,
|
|
|
|
* the state becomes APP again. */
|
|
|
|
APP_READY,
|
2023-08-29 22:37:18 -04:00
|
|
|
|
|
|
|
/** Nothing (this transport is using v1 fallback).
|
|
|
|
*
|
|
|
|
* All receive operations are redirected to m_v1_fallback. */
|
|
|
|
V1,
|
2023-07-27 15:10:34 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
/** State type that controls the sender side.
|
|
|
|
*
|
|
|
|
* Diagram:
|
|
|
|
*
|
2023-08-29 22:37:18 -04:00
|
|
|
* start(responder)
|
|
|
|
* |
|
|
|
|
* | start(initiator)
|
|
|
|
* | |
|
|
|
|
* v v
|
|
|
|
* MAYBE_V1 -> AWAITING_KEY -> READY
|
|
|
|
* |
|
|
|
|
* \-----> V1
|
2023-07-27 15:10:34 -04:00
|
|
|
*/
|
|
|
|
enum class SendState : uint8_t {
|
2023-08-29 22:37:18 -04:00
|
|
|
/** (Responder only) Not sending until v1 or v2 is detected.
|
|
|
|
*
|
2023-09-08 14:55:47 -03:00
|
|
|
* This is the initial state for responders. The send buffer is empty.
|
|
|
|
* When the receiver determines whether this
|
2023-08-29 22:37:18 -04:00
|
|
|
* is a V1 or V2 connection, the sender state becomes AWAITING_KEY (for v2) or V1 (for v1).
|
|
|
|
*/
|
|
|
|
MAYBE_V1,
|
|
|
|
|
2023-07-27 15:10:34 -04:00
|
|
|
/** Waiting for the other side's public key.
|
|
|
|
*
|
2023-09-08 15:45:56 -03:00
|
|
|
* This is the initial state for initiators. The public key and garbage is sent out. When
|
|
|
|
* the receiver receives the other side's public key and transitions to GARB_GARBTERM, the
|
|
|
|
* sender state becomes READY. */
|
2023-07-27 15:10:34 -04:00
|
|
|
AWAITING_KEY,
|
|
|
|
|
|
|
|
/** Normal sending state.
|
|
|
|
*
|
|
|
|
* In this state, the ciphers are initialized, so packets can be sent. When this state is
|
2023-09-23 20:50:01 -03:00
|
|
|
* entered, the garbage terminator and version packet are appended to the send buffer (in
|
|
|
|
* addition to the key and garbage which may still be there). In this state a message can be
|
|
|
|
* provided if the send buffer is empty. */
|
2023-07-27 15:10:34 -04:00
|
|
|
READY,
|
2023-08-29 22:37:18 -04:00
|
|
|
|
|
|
|
/** This transport is using v1 fallback.
|
|
|
|
*
|
|
|
|
* All send operations are redirected to m_v1_fallback. */
|
|
|
|
V1,
|
2023-07-27 15:10:34 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
/** Cipher state. */
|
|
|
|
BIP324Cipher m_cipher;
|
|
|
|
/** Whether we are the initiator side. */
|
|
|
|
const bool m_initiating;
|
|
|
|
/** NodeId (for debug logging). */
|
|
|
|
const NodeId m_nodeid;
|
2023-08-29 22:37:18 -04:00
|
|
|
/** Encapsulate a V1Transport to fall back to. */
|
|
|
|
V1Transport m_v1_fallback;
|
2023-07-27 15:10:34 -04:00
|
|
|
|
|
|
|
/** Lock for receiver-side fields. */
|
|
|
|
mutable Mutex m_recv_mutex ACQUIRED_BEFORE(m_send_mutex);
|
2023-09-23 20:50:01 -03:00
|
|
|
/** In {VERSION, APP}, the decrypted packet length, if m_recv_buffer.size() >=
|
2023-07-27 15:10:34 -04:00
|
|
|
* BIP324Cipher::LENGTH_LEN. Unspecified otherwise. */
|
|
|
|
uint32_t m_recv_len GUARDED_BY(m_recv_mutex) {0};
|
|
|
|
/** Receive buffer; meaning is determined by m_recv_state. */
|
|
|
|
std::vector<uint8_t> m_recv_buffer GUARDED_BY(m_recv_mutex);
|
2023-09-26 08:26:27 -03:00
|
|
|
/** AAD expected in next received packet (currently used only for garbage). */
|
|
|
|
std::vector<uint8_t> m_recv_aad GUARDED_BY(m_recv_mutex);
|
2023-07-27 15:10:34 -04:00
|
|
|
/** Buffer to put decrypted contents in, for converting to CNetMessage. */
|
|
|
|
std::vector<uint8_t> m_recv_decode_buffer GUARDED_BY(m_recv_mutex);
|
|
|
|
/** Current receiver state. */
|
|
|
|
RecvState m_recv_state GUARDED_BY(m_recv_mutex);
|
|
|
|
|
|
|
|
/** Lock for sending-side fields. If both sending and receiving fields are accessed,
|
|
|
|
* m_recv_mutex must be acquired before m_send_mutex. */
|
|
|
|
mutable Mutex m_send_mutex ACQUIRED_AFTER(m_recv_mutex);
|
|
|
|
/** The send buffer; meaning is determined by m_send_state. */
|
|
|
|
std::vector<uint8_t> m_send_buffer GUARDED_BY(m_send_mutex);
|
|
|
|
/** How many bytes from the send buffer have been sent so far. */
|
|
|
|
uint32_t m_send_pos GUARDED_BY(m_send_mutex) {0};
|
2023-09-08 14:55:47 -03:00
|
|
|
/** The garbage sent, or to be sent (MAYBE_V1 and AWAITING_KEY state only). */
|
|
|
|
std::vector<uint8_t> m_send_garbage GUARDED_BY(m_send_mutex);
|
2023-07-27 15:10:34 -04:00
|
|
|
/** Type of the message being sent. */
|
|
|
|
std::string m_send_type GUARDED_BY(m_send_mutex);
|
|
|
|
/** Current sender state. */
|
|
|
|
SendState m_send_state GUARDED_BY(m_send_mutex);
|
2023-08-22 20:42:24 -04:00
|
|
|
/** Whether we've sent at least 24 bytes (which would trigger disconnect for V1 peers). */
|
|
|
|
bool m_sent_v1_header_worth GUARDED_BY(m_send_mutex) {false};
|
2023-07-27 15:10:34 -04:00
|
|
|
|
|
|
|
/** Change the receive state. */
|
|
|
|
void SetReceiveState(RecvState recv_state) noexcept EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex);
|
|
|
|
/** Change the send state. */
|
|
|
|
void SetSendState(SendState send_state) noexcept EXCLUSIVE_LOCKS_REQUIRED(m_send_mutex);
|
|
|
|
/** Given a packet's contents, find the message type (if valid), and strip it from contents. */
|
|
|
|
static std::optional<std::string> GetMessageType(Span<const uint8_t>& contents) noexcept;
|
|
|
|
/** Determine how many received bytes can be processed in one go (not allowed in V1 state). */
|
|
|
|
size_t GetMaxBytesToProcess() noexcept EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex);
|
2023-09-08 14:55:47 -03:00
|
|
|
/** Put our public key + garbage in the send buffer. */
|
|
|
|
void StartSendingHandshake() noexcept EXCLUSIVE_LOCKS_REQUIRED(m_send_mutex);
|
2023-08-29 22:37:18 -04:00
|
|
|
/** Process bytes in m_recv_buffer, while in KEY_MAYBE_V1 state. */
|
|
|
|
void ProcessReceivedMaybeV1Bytes() noexcept EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex, !m_send_mutex);
|
2023-07-27 15:10:34 -04:00
|
|
|
/** Process bytes in m_recv_buffer, while in KEY state. */
|
2023-09-06 00:38:15 -03:00
|
|
|
bool ProcessReceivedKeyBytes() noexcept EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex, !m_send_mutex);
|
2023-07-27 15:10:34 -04:00
|
|
|
/** Process bytes in m_recv_buffer, while in GARB_GARBTERM state. */
|
|
|
|
bool ProcessReceivedGarbageBytes() noexcept EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex);
|
2023-09-23 20:50:01 -03:00
|
|
|
/** Process bytes in m_recv_buffer, while in VERSION/APP state. */
|
2023-07-27 15:10:34 -04:00
|
|
|
bool ProcessReceivedPacketBytes() noexcept EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex);
|
|
|
|
|
|
|
|
public:
|
|
|
|
static constexpr uint32_t MAX_GARBAGE_LEN = 4095;
|
|
|
|
|
|
|
|
/** Construct a V2 transport with securely generated random keys.
|
|
|
|
*
|
|
|
|
* @param[in] nodeid the node's NodeId (only for debug log output).
|
|
|
|
* @param[in] initiating whether we are the initiator side.
|
|
|
|
*/
|
2023-11-16 08:53:31 -03:00
|
|
|
V2Transport(NodeId nodeid, bool initiating) noexcept;
|
2023-07-27 15:10:34 -04:00
|
|
|
|
2023-07-30 11:43:10 -04:00
|
|
|
/** Construct a V2 transport with specified keys and garbage (test use only). */
|
2023-11-16 08:53:31 -03:00
|
|
|
V2Transport(NodeId nodeid, bool initiating, const CKey& key, Span<const std::byte> ent32, std::vector<uint8_t> garbage) noexcept;
|
2023-07-27 15:10:34 -04:00
|
|
|
|
|
|
|
// Receive side functions.
|
|
|
|
bool ReceivedMessageComplete() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex);
|
|
|
|
bool ReceivedBytes(Span<const uint8_t>& msg_bytes) noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex, !m_send_mutex);
|
|
|
|
CNetMessage GetReceivedMessage(std::chrono::microseconds time, bool& reject_message) noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex);
|
|
|
|
|
|
|
|
// Send side functions.
|
|
|
|
bool SetMessageToSend(CSerializedNetMsg& msg) noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex);
|
|
|
|
BytesToSend GetBytesToSend(bool have_next_message) const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex);
|
|
|
|
void MarkBytesSent(size_t bytes_sent) noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex);
|
|
|
|
size_t GetSendMemoryUsage() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex);
|
2023-08-22 20:42:24 -04:00
|
|
|
|
|
|
|
// Miscellaneous functions.
|
|
|
|
bool ShouldReconnectV1() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex, !m_send_mutex);
|
2023-07-30 23:26:04 -04:00
|
|
|
Info GetInfo() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex);
|
2023-07-27 15:10:34 -04:00
|
|
|
};
|
|
|
|
|
2022-08-31 03:04:13 -04:00
|
|
|
struct CNodeOptions
|
|
|
|
{
|
2022-09-01 04:44:07 -04:00
|
|
|
NetPermissionFlags permission_flags = NetPermissionFlags::None;
|
2022-08-31 03:04:13 -04:00
|
|
|
std::unique_ptr<i2p::sam::Session> i2p_sam_session = nullptr;
|
2022-09-01 04:44:33 -04:00
|
|
|
bool prefer_evict = false;
|
2023-03-24 11:45:50 -03:00
|
|
|
size_t recv_flood_size{DEFAULT_MAXRECEIVEBUFFER * 1000};
|
2023-08-21 16:55:47 -04:00
|
|
|
bool use_v2transport = false;
|
2022-08-31 03:04:13 -04:00
|
|
|
};
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/** Information about a peer */
|
|
|
|
class CNode
|
|
|
|
{
|
|
|
|
public:
|
2023-07-21 16:31:59 -04:00
|
|
|
/** Transport serializer/deserializer. The receive side functions are only called under cs_vRecv, while
|
|
|
|
* the sending side functions are only called under cs_vSend. */
|
2023-07-05 16:22:52 -04:00
|
|
|
const std::unique_ptr<Transport> m_transport;
|
2018-11-15 20:30:26 -03:00
|
|
|
|
2022-09-01 04:50:26 -04:00
|
|
|
const NetPermissionFlags m_permission_flags;
|
2021-04-23 09:15:23 -04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Socket used for communication with the node.
|
|
|
|
* May not own a Sock object (after `CloseSocketDisconnect()` or during tests).
|
|
|
|
* `shared_ptr` (instead of `unique_ptr`) is used to avoid premature close of
|
|
|
|
* the underlying file descriptor by one thread while another thread is
|
|
|
|
* poll(2)-ing it for activity.
|
|
|
|
* @see https://github.com/bitcoin/bitcoin/issues/21744 for details.
|
|
|
|
*/
|
2021-04-23 09:30:46 -04:00
|
|
|
std::shared_ptr<Sock> m_sock GUARDED_BY(m_sock_mutex);
|
2021-04-23 09:15:23 -04:00
|
|
|
|
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
|
|
|
/** Sum of GetMemoryUsage of all vSendMsg entries. */
|
2023-07-24 13:23:39 -04:00
|
|
|
size_t m_send_memusage GUARDED_BY(cs_vSend){0};
|
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
|
|
|
/** Total number of bytes sent on the wire to this peer. */
|
2021-01-06 03:39:04 -03:00
|
|
|
uint64_t nSendBytes GUARDED_BY(cs_vSend){0};
|
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
|
|
|
/** Messages still to be fed to m_transport->SetMessageToSend. */
|
|
|
|
std::deque<CSerializedNetMsg> vSendMsg GUARDED_BY(cs_vSend);
|
2021-01-06 03:39:04 -03:00
|
|
|
Mutex cs_vSend;
|
2021-04-23 09:30:46 -04:00
|
|
|
Mutex m_sock_mutex;
|
2021-01-06 03:39:04 -03:00
|
|
|
Mutex cs_vRecv;
|
2016-04-17 19:34:32 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
uint64_t nRecvBytes GUARDED_BY(cs_vRecv){0};
|
2020-05-16 21:05:44 -04:00
|
|
|
|
2020-07-10 12:19:11 -04:00
|
|
|
std::atomic<std::chrono::seconds> m_last_send{0s};
|
|
|
|
std::atomic<std::chrono::seconds> m_last_recv{0s};
|
2021-12-13 08:02:10 -03:00
|
|
|
//! Unix epoch time at peer connection
|
2021-12-13 08:32:28 -03:00
|
|
|
const std::chrono::seconds m_connected;
|
2021-01-06 03:39:04 -03:00
|
|
|
std::atomic<int64_t> nTimeOffset{0};
|
|
|
|
// Address of this peer
|
|
|
|
const CAddress addr;
|
|
|
|
// Bind address of our side of the connection
|
|
|
|
const CAddress addrBind;
|
2021-08-24 13:54:13 -04:00
|
|
|
const std::string m_addr_name;
|
2023-08-22 20:42:24 -04:00
|
|
|
/** The pszDest argument provided to ConnectNode(). Only used for reconnections. */
|
|
|
|
const std::string m_dest;
|
2021-01-26 18:08:28 -03:00
|
|
|
//! Whether this peer is an inbound onion, i.e. connected via our Tor onion service.
|
2021-02-12 17:40:19 -03:00
|
|
|
const bool m_inbound_onion;
|
2021-01-06 03:39:04 -03:00
|
|
|
std::atomic<int> nVersion{0};
|
2022-01-15 22:14:12 -03:00
|
|
|
Mutex m_subver_mutex;
|
2020-05-16 21:05:44 -04:00
|
|
|
/**
|
2021-01-06 03:39:04 -03:00
|
|
|
* cleanSubVer is a sanitized string of the user agent byte array we read
|
|
|
|
* from the wire. This cleaned string can safely be logged or displayed.
|
2020-05-16 21:05:44 -04:00
|
|
|
*/
|
2022-01-15 22:11:04 -03:00
|
|
|
std::string cleanSubVer GUARDED_BY(m_subver_mutex){};
|
2022-09-01 04:44:33 -04:00
|
|
|
const bool m_prefer_evict{false}; // This peer is preferred for eviction.
|
2021-01-06 03:39:04 -03:00
|
|
|
bool HasPermission(NetPermissionFlags permission) const {
|
2022-09-01 04:50:26 -04:00
|
|
|
return NetPermissions::HasFlag(m_permission_flags, permission);
|
2021-01-06 03:39:04 -03:00
|
|
|
}
|
2020-12-14 11:51:26 -03:00
|
|
|
/** fSuccessfullyConnected is set to true on receiving VERACK from the peer. */
|
2021-01-06 03:39:04 -03:00
|
|
|
std::atomic_bool fSuccessfullyConnected{false};
|
|
|
|
// Setting fDisconnect to true will cause the node to be disconnected the
|
|
|
|
// next time DisconnectNodes() runs
|
|
|
|
std::atomic_bool fDisconnect{false};
|
|
|
|
CSemaphoreGrant grantOutbound;
|
|
|
|
std::atomic<int> nRefCount{0};
|
2019-03-09 14:55:06 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
const uint64_t nKeyedNetGroup;
|
|
|
|
std::atomic_bool fPauseRecv{false};
|
|
|
|
std::atomic_bool fPauseSend{false};
|
2019-03-09 14:55:06 -03:00
|
|
|
|
2023-03-24 11:29:21 -03:00
|
|
|
const ConnectionType m_conn_type;
|
2023-03-14 13:38:03 -03:00
|
|
|
|
2023-03-14 13:38:46 -03:00
|
|
|
/** Move all messages from the received queue to the processing queue. */
|
2023-03-24 11:45:50 -03:00
|
|
|
void MarkReceivedMsgsForProcessing()
|
2023-03-14 14:24:58 -03:00
|
|
|
EXCLUSIVE_LOCKS_REQUIRED(!m_msg_process_queue_mutex);
|
2023-03-14 13:38:46 -03:00
|
|
|
|
2023-03-14 13:58:59 -03:00
|
|
|
/** Poll the next message from the processing queue of this connection.
|
|
|
|
*
|
|
|
|
* Returns std::nullopt if the processing queue is empty, or a pair
|
|
|
|
* consisting of the message and a bool that indicates if the processing
|
|
|
|
* queue has more entries. */
|
2023-03-24 11:45:50 -03:00
|
|
|
std::optional<std::pair<CNetMessage, bool>> PollMessage()
|
2023-03-14 14:24:58 -03:00
|
|
|
EXCLUSIVE_LOCKS_REQUIRED(!m_msg_process_queue_mutex);
|
2023-03-14 13:58:59 -03:00
|
|
|
|
2023-03-14 13:48:32 -03:00
|
|
|
/** Account for the total size of a sent message in the per msg type connection stats. */
|
|
|
|
void AccountForSentBytes(const std::string& msg_type, size_t sent_bytes)
|
|
|
|
EXCLUSIVE_LOCKS_REQUIRED(cs_vSend)
|
|
|
|
{
|
|
|
|
mapSendBytesPerMsgType[msg_type] += sent_bytes;
|
|
|
|
}
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
bool IsOutboundOrBlockRelayConn() const {
|
|
|
|
switch (m_conn_type) {
|
|
|
|
case ConnectionType::OUTBOUND_FULL_RELAY:
|
|
|
|
case ConnectionType::BLOCK_RELAY:
|
|
|
|
return true;
|
|
|
|
case ConnectionType::INBOUND:
|
|
|
|
case ConnectionType::MANUAL:
|
|
|
|
case ConnectionType::ADDR_FETCH:
|
|
|
|
case ConnectionType::FEELER:
|
|
|
|
return false;
|
|
|
|
} // no default case, so the compiler can warn about missing cases
|
2019-03-09 14:55:06 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
assert(false);
|
|
|
|
}
|
2016-09-09 07:48:10 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
bool IsFullOutboundConn() const {
|
|
|
|
return m_conn_type == ConnectionType::OUTBOUND_FULL_RELAY;
|
|
|
|
}
|
2020-09-12 12:05:54 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
bool IsManualConn() const {
|
|
|
|
return m_conn_type == ConnectionType::MANUAL;
|
|
|
|
}
|
2016-12-27 19:12:44 -03:00
|
|
|
|
2023-06-11 15:26:18 -04:00
|
|
|
bool IsManualOrFullOutboundConn() const
|
|
|
|
{
|
|
|
|
switch (m_conn_type) {
|
|
|
|
case ConnectionType::INBOUND:
|
|
|
|
case ConnectionType::FEELER:
|
|
|
|
case ConnectionType::BLOCK_RELAY:
|
|
|
|
case ConnectionType::ADDR_FETCH:
|
|
|
|
return false;
|
|
|
|
case ConnectionType::OUTBOUND_FULL_RELAY:
|
|
|
|
case ConnectionType::MANUAL:
|
|
|
|
return true;
|
|
|
|
} // no default case, so the compiler can warn about missing cases
|
|
|
|
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
bool IsBlockOnlyConn() const {
|
|
|
|
return m_conn_type == ConnectionType::BLOCK_RELAY;
|
|
|
|
}
|
2016-12-31 04:05:26 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
bool IsFeelerConn() const {
|
|
|
|
return m_conn_type == ConnectionType::FEELER;
|
|
|
|
}
|
2016-12-27 19:12:44 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
bool IsAddrFetchConn() const {
|
|
|
|
return m_conn_type == ConnectionType::ADDR_FETCH;
|
|
|
|
}
|
2016-12-27 19:12:44 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
bool IsInboundConn() const {
|
|
|
|
return m_conn_type == ConnectionType::INBOUND;
|
|
|
|
}
|
2017-10-23 14:36:15 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
bool ExpectServicesFromConn() const {
|
|
|
|
switch (m_conn_type) {
|
|
|
|
case ConnectionType::INBOUND:
|
|
|
|
case ConnectionType::MANUAL:
|
|
|
|
case ConnectionType::FEELER:
|
|
|
|
return false;
|
|
|
|
case ConnectionType::OUTBOUND_FULL_RELAY:
|
|
|
|
case ConnectionType::BLOCK_RELAY:
|
|
|
|
case ConnectionType::ADDR_FETCH:
|
|
|
|
return true;
|
|
|
|
} // no default case, so the compiler can warn about missing cases
|
2020-09-01 17:05:47 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
assert(false);
|
|
|
|
}
|
2018-05-21 15:02:40 -04:00
|
|
|
|
2020-09-30 13:07:36 -03:00
|
|
|
/**
|
2021-01-06 03:39:04 -03:00
|
|
|
* Get network the peer connected through.
|
|
|
|
*
|
|
|
|
* Returns Network::NET_ONION for *inbound* onion connections,
|
|
|
|
* and CNetAddr::GetNetClass() otherwise. The latter cannot be used directly
|
|
|
|
* because it doesn't detect the former, and it's not the responsibility of
|
|
|
|
* the CNetAddr class to know the actual network a peer is connected through.
|
|
|
|
*
|
|
|
|
* @return network the peer connected through.
|
2020-09-30 13:07:36 -03:00
|
|
|
*/
|
2021-01-06 03:39:04 -03:00
|
|
|
Network ConnectedThroughNetwork() const;
|
2020-09-30 13:07:36 -03:00
|
|
|
|
2023-07-19 13:11:06 -04:00
|
|
|
/** Whether this peer connected through a privacy network. */
|
|
|
|
[[nodiscard]] bool IsConnectedThroughPrivacyNet() const;
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
// We selected peer as (compact blocks) high-bandwidth peer (BIP152)
|
|
|
|
std::atomic<bool> m_bip152_highbandwidth_to{false};
|
|
|
|
// Peer selected us as (compact blocks) high-bandwidth peer (BIP152)
|
|
|
|
std::atomic<bool> m_bip152_highbandwidth_from{false};
|
|
|
|
|
2020-07-20 13:46:13 -04:00
|
|
|
/** Whether this peer provides all services that we want. Used for eviction decisions */
|
|
|
|
std::atomic_bool m_has_all_wanted_services{false};
|
|
|
|
|
2022-10-26 13:17:01 -03:00
|
|
|
/** Whether we should relay transactions to this peer. This only changes
|
|
|
|
* from false to true. It will never change back to false. */
|
2021-01-27 17:44:10 -03:00
|
|
|
std::atomic_bool m_relays_txs{false};
|
2013-06-05 23:21:41 -04:00
|
|
|
|
2021-01-27 17:44:10 -03:00
|
|
|
/** Whether this peer has loaded a bloom filter. Used only in inbound
|
|
|
|
* eviction logic. */
|
|
|
|
std::atomic_bool m_bloom_filter_loaded{false};
|
2012-02-19 16:44:35 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/** UNIX epoch time of the last block received from this peer that we had
|
|
|
|
* not yet seen (e.g. not already received from another peer), that passed
|
|
|
|
* preliminary validity checks and was saved to disk, even if we don't
|
|
|
|
* connect the block or it eventually fails connection. Used as an inbound
|
|
|
|
* peer eviction criterium in CConnman::AttemptToEvictConnection. */
|
2021-12-13 08:32:28 -03:00
|
|
|
std::atomic<std::chrono::seconds> m_last_block_time{0s};
|
2012-02-12 09:45:24 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/** UNIX epoch time of the last transaction received from this peer that we
|
|
|
|
* had not yet seen (e.g. not already received from another peer) and that
|
|
|
|
* was accepted into our mempool. Used as an inbound peer eviction criterium
|
|
|
|
* in CConnman::AttemptToEvictConnection. */
|
2021-12-13 08:32:28 -03:00
|
|
|
std::atomic<std::chrono::seconds> m_last_tx_time{0s};
|
2019-01-09 21:41:37 -03:00
|
|
|
|
2020-06-21 18:56:19 -04:00
|
|
|
/** Last measured round-trip time. Used only for RPC/GUI stats/debugging.*/
|
2020-09-29 23:11:53 -03:00
|
|
|
std::atomic<std::chrono::microseconds> m_last_ping_time{0us};
|
2020-06-21 18:56:19 -04:00
|
|
|
|
|
|
|
/** Lowest measured round-trip time. Used as an inbound peer eviction
|
|
|
|
* criterium in CConnman::AttemptToEvictConnection. */
|
2020-09-29 23:11:53 -03:00
|
|
|
std::atomic<std::chrono::microseconds> m_min_ping_time{std::chrono::microseconds::max()};
|
2019-01-09 21:41:37 -03:00
|
|
|
|
2022-06-08 11:26:24 -04:00
|
|
|
CNode(NodeId id,
|
|
|
|
std::shared_ptr<Sock> sock,
|
|
|
|
const CAddress& addrIn,
|
|
|
|
uint64_t nKeyedNetGroupIn,
|
|
|
|
uint64_t nLocalHostNonceIn,
|
|
|
|
const CAddress& addrBindIn,
|
|
|
|
const std::string& addrNameIn,
|
|
|
|
ConnectionType conn_type_in,
|
|
|
|
bool inbound_onion,
|
2022-08-31 03:04:13 -04:00
|
|
|
CNodeOptions&& node_opts = {});
|
2021-01-06 03:39:04 -03:00
|
|
|
CNode(const CNode&) = delete;
|
|
|
|
CNode& operator=(const CNode&) = delete;
|
2012-02-12 09:45:24 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
NodeId GetId() const {
|
|
|
|
return id;
|
|
|
|
}
|
2012-06-29 17:24:53 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
uint64_t GetLocalNonce() const {
|
|
|
|
return nLocalHostNonce;
|
|
|
|
}
|
2012-06-29 17:24:53 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
int GetRefCount() const
|
|
|
|
{
|
|
|
|
assert(nRefCount >= 0);
|
|
|
|
return nRefCount;
|
|
|
|
}
|
2019-06-13 04:39:44 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/**
|
|
|
|
* Receive bytes from the buffer and deserialize them into messages.
|
|
|
|
*
|
|
|
|
* @param[in] msg_bytes The raw data
|
|
|
|
* @param[out] complete Set True if at least one message has been
|
|
|
|
* deserialized and is ready to be processed
|
|
|
|
* @return True if the peer should stay connected,
|
|
|
|
* False if the peer should be disconnected from.
|
|
|
|
*/
|
2022-04-20 02:47:29 -04:00
|
|
|
bool ReceiveMsgBytes(Span<const uint8_t> msg_bytes, bool& complete) EXCLUSIVE_LOCKS_REQUIRED(!cs_vRecv);
|
2019-06-13 04:39:44 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
void SetCommonVersion(int greatest_common_version)
|
2019-06-13 04:39:44 -04:00
|
|
|
{
|
2021-01-06 03:39:04 -03:00
|
|
|
Assume(m_greatest_common_version == INIT_PROTO_VERSION);
|
|
|
|
m_greatest_common_version = greatest_common_version;
|
|
|
|
}
|
|
|
|
int GetCommonVersion() const
|
|
|
|
{
|
|
|
|
return m_greatest_common_version;
|
2019-06-13 04:39:44 -04:00
|
|
|
}
|
|
|
|
|
2022-04-20 02:47:29 -04:00
|
|
|
CService GetAddrLocal() const EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex);
|
2021-01-06 03:39:04 -03:00
|
|
|
//! May not be called more than once
|
2022-04-20 02:47:29 -04:00
|
|
|
void SetAddrLocal(const CService& addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex);
|
2019-06-13 05:25:54 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
CNode* AddRef()
|
|
|
|
{
|
|
|
|
nRefCount++;
|
|
|
|
return this;
|
|
|
|
}
|
2012-11-15 21:41:12 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
void Release()
|
|
|
|
{
|
|
|
|
nRefCount--;
|
|
|
|
}
|
2014-07-06 10:06:46 -04:00
|
|
|
|
2022-04-20 02:47:29 -04:00
|
|
|
void CloseSocketDisconnect() EXCLUSIVE_LOCKS_REQUIRED(!m_sock_mutex);
|
2021-01-06 03:39:04 -03:00
|
|
|
|
2022-04-20 02:47:29 -04:00
|
|
|
void CopyStats(CNodeStats& stats) EXCLUSIVE_LOCKS_REQUIRED(!m_subver_mutex, !m_addr_local_mutex, !cs_vSend, !cs_vRecv);
|
2021-01-06 03:39:04 -03:00
|
|
|
|
2021-01-02 06:44:03 -03:00
|
|
|
std::string ConnectionTypeAsString() const { return ::ConnectionTypeAsString(m_conn_type); }
|
2021-01-06 03:39:04 -03:00
|
|
|
|
2020-06-21 18:56:19 -04:00
|
|
|
/** A ping-pong round trip has completed successfully. Update latest and minimum ping times. */
|
|
|
|
void PongReceived(std::chrono::microseconds ping_time) {
|
2020-09-29 23:11:53 -03:00
|
|
|
m_last_ping_time = ping_time;
|
|
|
|
m_min_ping_time = std::min(m_min_ping_time.load(), ping_time);
|
2020-06-21 18:56:19 -04:00
|
|
|
}
|
|
|
|
|
2021-01-09 06:06:56 -03:00
|
|
|
private:
|
|
|
|
const NodeId id;
|
|
|
|
const uint64_t nLocalHostNonce;
|
|
|
|
std::atomic<int> m_greatest_common_version{INIT_PROTO_VERSION};
|
|
|
|
|
2023-03-24 11:45:50 -03:00
|
|
|
const size_t m_recv_flood_size;
|
2021-08-24 13:54:13 -04:00
|
|
|
std::list<CNetMessage> vRecvMsg; // Used only by SocketHandler thread
|
2021-01-09 06:06:56 -03:00
|
|
|
|
2023-03-14 14:24:58 -03:00
|
|
|
Mutex m_msg_process_queue_mutex;
|
|
|
|
std::list<CNetMessage> m_msg_process_queue GUARDED_BY(m_msg_process_queue_mutex);
|
|
|
|
size_t m_msg_process_queue_size GUARDED_BY(m_msg_process_queue_mutex){0};
|
2023-03-14 14:02:09 -03:00
|
|
|
|
2021-01-09 06:06:56 -03:00
|
|
|
// Our address, as reported by the peer
|
2022-01-20 05:41:33 -03:00
|
|
|
CService addrLocal GUARDED_BY(m_addr_local_mutex);
|
2022-01-20 17:30:12 -03:00
|
|
|
mutable Mutex m_addr_local_mutex;
|
2021-01-09 06:10:12 -03:00
|
|
|
|
2022-04-07 07:43:52 -04:00
|
|
|
mapMsgTypeSize mapSendBytesPerMsgType GUARDED_BY(cs_vSend);
|
|
|
|
mapMsgTypeSize mapRecvBytesPerMsgType GUARDED_BY(cs_vRecv);
|
2022-06-08 11:26:24 -04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* If an I2P session is created per connection (for outbound transient I2P
|
|
|
|
* connections) then it is stored here so that it can be destroyed when the
|
|
|
|
* socket is closed. I2P sessions involve a data/transport socket (in `m_sock`)
|
|
|
|
* and a control socket (in `m_i2p_sam_session`). For transient sessions, once
|
|
|
|
* the data socket is closed, the control socket is not going to be used anymore
|
|
|
|
* and is just taking up resources. So better close it as soon as `m_sock` is
|
|
|
|
* closed.
|
|
|
|
* Otherwise this unique_ptr is empty.
|
|
|
|
*/
|
|
|
|
std::unique_ptr<i2p::sam::Session> m_i2p_sam_session GUARDED_BY(m_sock_mutex);
|
2019-06-13 04:39:44 -04:00
|
|
|
};
|
2012-11-15 21:41:12 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/**
|
|
|
|
* Interface for message handling
|
2019-08-07 09:56:24 -04:00
|
|
|
*/
|
2021-01-06 03:39:04 -03:00
|
|
|
class NetEventsInterface
|
|
|
|
{
|
2019-08-07 09:56:24 -04:00
|
|
|
public:
|
2022-09-06 23:57:18 -04:00
|
|
|
/** Mutex for anything that is only accessed via the msg processing thread */
|
|
|
|
static Mutex g_msgproc_mutex;
|
|
|
|
|
2021-01-06 23:27:22 -03:00
|
|
|
/** Initialize a peer (setup state, queue any initial messages) */
|
2020-07-20 09:01:05 -04:00
|
|
|
virtual void InitializeNode(CNode& node, ServiceFlags our_services) = 0;
|
2021-01-06 23:27:22 -03:00
|
|
|
|
|
|
|
/** Handle removal of a peer (clear state) */
|
2020-10-23 06:28:33 -03:00
|
|
|
virtual void FinalizeNode(const CNode& node) = 0;
|
2019-08-07 09:56:24 -04:00
|
|
|
|
2021-01-06 23:27:22 -03:00
|
|
|
/**
|
|
|
|
* Process protocol messages received from a given node
|
|
|
|
*
|
|
|
|
* @param[in] pnode The node which we have received messages from.
|
|
|
|
* @param[in] interrupt Interrupt condition for processing threads
|
|
|
|
* @return True if there is more work to be done
|
|
|
|
*/
|
2022-09-06 23:57:18 -04:00
|
|
|
virtual bool ProcessMessages(CNode* pnode, std::atomic<bool>& interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) = 0;
|
2021-01-06 23:27:22 -03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Send queued protocol messages to a given node.
|
|
|
|
*
|
|
|
|
* @param[in] pnode The node which we are sending messages to.
|
|
|
|
* @return True if there is more work to be done
|
|
|
|
*/
|
2022-09-12 23:22:18 -03:00
|
|
|
virtual bool SendMessages(CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) = 0;
|
2021-01-06 23:27:22 -03:00
|
|
|
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
protected:
|
|
|
|
/**
|
|
|
|
* Protected destructor so that instances can only be deleted by derived classes.
|
|
|
|
* If that restriction is no longer desired, this should be made public and virtual.
|
|
|
|
*/
|
|
|
|
~NetEventsInterface() = default;
|
2019-08-07 09:56:24 -04:00
|
|
|
};
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
class CConnman
|
2010-08-29 12:58:15 -04:00
|
|
|
{
|
|
|
|
public:
|
2019-06-13 04:39:44 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
struct Options
|
|
|
|
{
|
|
|
|
ServiceFlags nLocalServices = NODE_NONE;
|
2023-08-30 16:51:09 -04:00
|
|
|
int m_max_automatic_connections = 0;
|
2021-01-06 03:39:04 -03:00
|
|
|
CClientUIInterface* uiInterface = nullptr;
|
|
|
|
NetEventsInterface* m_msgproc = nullptr;
|
|
|
|
BanMan* m_banman = nullptr;
|
|
|
|
unsigned int nSendBufferMaxSize = 0;
|
|
|
|
unsigned int nReceiveFloodSize = 0;
|
|
|
|
uint64_t nMaxOutboundLimit = 0;
|
|
|
|
int64_t m_peer_connect_timeout = DEFAULT_PEER_CONNECT_TIMEOUT;
|
|
|
|
std::vector<std::string> vSeedNodes;
|
|
|
|
std::vector<NetWhitelistPermissions> vWhitelistedRange;
|
|
|
|
std::vector<NetWhitebindPermissions> vWhiteBinds;
|
|
|
|
std::vector<CService> vBinds;
|
|
|
|
std::vector<CService> onion_binds;
|
2020-10-22 15:34:31 -03:00
|
|
|
/// True if the user did not specify -bind= or -whitebind= and thus
|
|
|
|
/// we should bind on `0.0.0.0` (IPv4) and `::` (IPv6).
|
|
|
|
bool bind_on_any;
|
2021-01-06 03:39:04 -03:00
|
|
|
bool m_use_addrman_outgoing = true;
|
|
|
|
std::vector<std::string> m_specified_outgoing;
|
|
|
|
std::vector<std::string> m_added_nodes;
|
2020-12-04 14:03:05 -03:00
|
|
|
bool m_i2p_accept_incoming;
|
2021-01-06 03:39:04 -03:00
|
|
|
};
|
2016-12-31 04:05:28 -03:00
|
|
|
|
2022-04-20 02:47:29 -04:00
|
|
|
void Init(const Options& connOptions) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex, !m_total_bytes_sent_mutex)
|
2022-01-25 18:18:52 -03:00
|
|
|
{
|
|
|
|
AssertLockNotHeld(m_total_bytes_sent_mutex);
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
nLocalServices = connOptions.nLocalServices;
|
2023-08-30 16:51:09 -04:00
|
|
|
m_max_automatic_connections = connOptions.m_max_automatic_connections;
|
|
|
|
m_max_outbound_full_relay = std::min(MAX_OUTBOUND_FULL_RELAY_CONNECTIONS, m_max_automatic_connections);
|
|
|
|
m_max_outbound_block_relay = std::min(MAX_BLOCK_RELAY_ONLY_CONNECTIONS, m_max_automatic_connections - m_max_outbound_full_relay);
|
|
|
|
m_max_automatic_outbound = m_max_outbound_full_relay + m_max_outbound_block_relay + m_max_feeler;
|
|
|
|
m_max_inbound = std::max(0, m_max_automatic_connections - m_max_automatic_outbound);
|
2021-01-06 03:39:04 -03:00
|
|
|
m_use_addrman_outgoing = connOptions.m_use_addrman_outgoing;
|
2021-08-18 01:41:39 -04:00
|
|
|
m_client_interface = connOptions.uiInterface;
|
2021-01-06 03:39:04 -03:00
|
|
|
m_banman = connOptions.m_banman;
|
|
|
|
m_msgproc = connOptions.m_msgproc;
|
|
|
|
nSendBufferMaxSize = connOptions.nSendBufferMaxSize;
|
|
|
|
nReceiveFloodSize = connOptions.nReceiveFloodSize;
|
2020-07-10 12:19:11 -04:00
|
|
|
m_peer_connect_timeout = std::chrono::seconds{connOptions.m_peer_connect_timeout};
|
2021-01-06 03:39:04 -03:00
|
|
|
{
|
2022-01-25 18:05:04 -03:00
|
|
|
LOCK(m_total_bytes_sent_mutex);
|
2021-01-06 03:39:04 -03:00
|
|
|
nMaxOutboundLimit = connOptions.nMaxOutboundLimit;
|
|
|
|
}
|
|
|
|
vWhitelistedRange = connOptions.vWhitelistedRange;
|
|
|
|
{
|
2021-08-28 14:57:52 -04:00
|
|
|
LOCK(m_added_nodes_mutex);
|
2023-12-11 16:37:56 -03:00
|
|
|
// Attempt v2 connection if we support v2 - we'll reconnect with v1 if our
|
|
|
|
// peer doesn't support it or immediately disconnects us for another reason.
|
|
|
|
const bool use_v2transport(GetLocalServices() & NODE_P2P_V2);
|
2021-12-28 18:26:20 -03:00
|
|
|
for (const std::string& added_node : connOptions.m_added_nodes) {
|
2023-12-11 16:37:56 -03:00
|
|
|
m_added_node_params.push_back({added_node, use_v2transport});
|
2021-12-28 18:26:20 -03:00
|
|
|
}
|
2021-01-06 03:39:04 -03:00
|
|
|
}
|
|
|
|
m_onion_binds = connOptions.onion_binds;
|
|
|
|
}
|
2016-12-24 16:34:20 -03:00
|
|
|
|
2021-08-31 13:40:18 -04:00
|
|
|
CConnman(uint64_t seed0, uint64_t seed1, AddrMan& addrman, const NetGroupManager& netgroupman,
|
2023-09-12 08:42:36 -03:00
|
|
|
const CChainParams& params, bool network_active = true);
|
2021-08-31 13:40:18 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
~CConnman();
|
2022-04-20 02:47:29 -04:00
|
|
|
|
|
|
|
bool Start(CScheduler& scheduler, const Options& options) EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !m_added_nodes_mutex, !m_addr_fetches_mutex, !mutexMsgProc);
|
2012-11-15 21:41:12 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
void StopThreads();
|
|
|
|
void StopNodes();
|
|
|
|
void Stop()
|
|
|
|
{
|
|
|
|
StopThreads();
|
|
|
|
StopNodes();
|
|
|
|
};
|
|
|
|
|
2022-04-20 02:47:29 -04:00
|
|
|
void Interrupt() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
|
2021-01-06 03:39:04 -03:00
|
|
|
bool GetNetworkActive() const { return fNetworkActive; };
|
|
|
|
bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; };
|
|
|
|
void SetNetworkActive(bool active);
|
2023-08-21 18:14:52 -04:00
|
|
|
void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant&& grant_outbound, const char* strDest, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex);
|
2021-01-06 03:39:04 -03:00
|
|
|
bool CheckIncomingNonce(uint64_t nonce);
|
2023-05-05 05:14:51 -04:00
|
|
|
void ASMapHealthCheck();
|
2021-01-06 03:39:04 -03:00
|
|
|
|
2023-02-07 18:03:32 -03:00
|
|
|
// alias for thread safety annotations only, not defined
|
|
|
|
RecursiveMutex& GetNodesMutex() const LOCK_RETURNED(m_nodes_mutex);
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
bool ForNode(NodeId id, std::function<bool(CNode* pnode)> func);
|
|
|
|
|
2022-01-25 18:18:52 -03:00
|
|
|
void PushMessage(CNode* pnode, CSerializedNetMsg&& msg) EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
|
2021-01-06 03:39:04 -03:00
|
|
|
|
|
|
|
using NodeFn = std::function<void(CNode*)>;
|
|
|
|
void ForEachNode(const NodeFn& func)
|
|
|
|
{
|
2021-08-28 14:57:52 -04:00
|
|
|
LOCK(m_nodes_mutex);
|
|
|
|
for (auto&& node : m_nodes) {
|
2021-01-06 03:39:04 -03:00
|
|
|
if (NodeFullyConnected(node))
|
|
|
|
func(node);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void ForEachNode(const NodeFn& func) const
|
|
|
|
{
|
2021-08-28 14:57:52 -04:00
|
|
|
LOCK(m_nodes_mutex);
|
|
|
|
for (auto&& node : m_nodes) {
|
2021-01-06 03:39:04 -03:00
|
|
|
if (NodeFullyConnected(node))
|
|
|
|
func(node);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Addrman functions
|
2021-05-02 13:05:42 -04:00
|
|
|
/**
|
|
|
|
* Return all or many randomly selected addresses, optionally by network.
|
|
|
|
*
|
|
|
|
* @param[in] max_addresses Maximum number of addresses to return (0 = all).
|
|
|
|
* @param[in] max_pct Maximum percentage of addresses to return (0 = all).
|
|
|
|
* @param[in] network Select only addresses of this network (nullopt = all).
|
2023-09-29 18:23:36 -03:00
|
|
|
* @param[in] filtered Select only addresses that are considered high quality (false = all).
|
2021-05-02 13:05:42 -04:00
|
|
|
*/
|
2023-09-29 18:23:36 -03:00
|
|
|
std::vector<CAddress> GetAddresses(size_t max_addresses, size_t max_pct, std::optional<Network> network, const bool filtered = true) const;
|
2021-01-06 03:39:04 -03:00
|
|
|
/**
|
|
|
|
* Cache is used to minimize topology leaks, so it should
|
|
|
|
* be used for all non-trusted calls, for example, p2p.
|
|
|
|
* A non-malicious call (from RPC or a peer with addr permission) should
|
|
|
|
* call the function without a parameter to avoid using the cache.
|
|
|
|
*/
|
|
|
|
std::vector<CAddress> GetAddresses(CNode& requestor, size_t max_addresses, size_t max_pct);
|
2020-07-20 17:24:48 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
// This allows temporarily exceeding m_max_outbound_full_relay, with the goal of finding
|
|
|
|
// a peer that is better than all our current peers.
|
|
|
|
void SetTryNewOutboundPeer(bool flag);
|
2021-04-17 13:17:40 -04:00
|
|
|
bool GetTryNewOutboundPeer() const;
|
2020-07-20 17:24:48 -04:00
|
|
|
|
2022-04-01 13:22:53 -03:00
|
|
|
void StartExtraBlockRelayPeers();
|
2020-06-03 00:23:44 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
// Return the number of outbound peers we have in excess of our target (eg,
|
|
|
|
// if we previously called SetTryNewOutboundPeer(true), and have since set
|
|
|
|
// to false, we may have extra peers that we wish to disconnect). This may
|
|
|
|
// return a value less than (num_outbound_connections - num_outbound_slots)
|
|
|
|
// in cases where some outbound connections are not yet fully connected, or
|
|
|
|
// not yet fully disconnected.
|
2021-04-17 13:17:40 -04:00
|
|
|
int GetExtraFullOutboundCount() const;
|
2021-01-06 03:39:04 -03:00
|
|
|
// Count the number of block-relay-only peers we have over our limit.
|
2021-04-17 13:17:40 -04:00
|
|
|
int GetExtraBlockRelayCount() const;
|
2020-06-02 11:39:47 -04:00
|
|
|
|
2021-12-28 18:26:20 -03:00
|
|
|
bool AddNode(const AddedNodeParams& add) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex);
|
2022-04-20 02:47:29 -04:00
|
|
|
bool RemoveAddedNode(const std::string& node) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex);
|
p2p: do not make automatic outbound connections to addnode peers
to allocate our limited outbound slots correctly, and to ensure addnode
connections benefit from their intended protections.
Our addnode logic usually connects the addnode peers before the automatic
outbound logic does, but not always, as a connection race can occur. If an
addnode peer disconnects us and if it was the only one from its network, there
can be a race between reconnecting to it with the addnode thread, and it being
picked as automatic network-specific outbound peer. Or our internet connection
or router, or the addnode peer, could be temporarily offline, and then return
online during the automatic outbound thread. Or we could add a new manual peer
using the addnode RPC at that time.
The race can be more apparent when our node doesn't know many peers, or with
networks like cjdns that currently have few bitcoin peers.
When an addnode peer is connected as an automatic outbound peer and is the only
connection we have to a network, it can be protected by our new outbound
eviction logic and persist in the "wrong role".
Examples on mainnet using logging added in the same pull request:
2023-08-12T14:51:05.681743Z [opencon] [net.cpp:1949] [ThreadOpenConnections]
[net:debug] Not making automatic network-specific outbound-full-relay connection
to i2p peer selected for manual (addnode) connection: [geh...odq.b32.i2p]:0
2023-08-13T03:59:28.050853Z [opencon] [net.cpp:1949] [ThreadOpenConnections]
[net:debug] Not making automatic block-relay-only connection to onion peer
selected for manual (addnode) connection: kpg...aid.onion:8333
2023-08-13T16:21:26.979052Z [opencon] [net.cpp:1949] [ThreadOpenConnections]
[net:debug] Not making automatic network-specific outbound-full-relay connection
to cjdns peer selected for manual (addnode) connection: [fcc...8ce]:8333
2023-08-14T20:43:53.401271Z [opencon] [net.cpp:1949] [ThreadOpenConnections]
[net:debug] Not making automatic network-specific outbound-full-relay connection
to cjdns peer selected for manual (addnode) connection: [fc7...59e]:8333
2023-08-15T00:10:01.894147Z [opencon] [net.cpp:1949] [ThreadOpenConnections]
[net:debug] Not making automatic feeler connection to i2p peer selected for
manual (addnode) connection: geh...odq.b32.i2p:8333
Finally, there does not seem to be a reason to make block-relay or short-lived
feeler connections to addnode peers, as the addnode logic will ensure we connect
to them if they are up, within the addnode connection limit.
Fix these issues by checking if the address is an addnode peer in our automatic
outbound connection logic.
2023-11-16 12:56:03 -03:00
|
|
|
bool AddedNodesContain(const CAddress& addr) const EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex);
|
2023-07-25 15:47:36 -04:00
|
|
|
std::vector<AddedNodeInfo> GetAddedNodeInfo(bool include_connected) const EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex);
|
2020-06-03 00:23:44 -04:00
|
|
|
|
2020-06-02 12:46:41 -04:00
|
|
|
/**
|
|
|
|
* Attempts to open a connection. Currently only used from tests.
|
|
|
|
*
|
|
|
|
* @param[in] address Address of node to try connecting to
|
2021-08-23 05:42:39 -04:00
|
|
|
* @param[in] conn_type ConnectionType::OUTBOUND, ConnectionType::BLOCK_RELAY,
|
|
|
|
* ConnectionType::ADDR_FETCH or ConnectionType::FEELER
|
2022-02-04 02:35:23 -03:00
|
|
|
* @param[in] use_v2transport Set to true if node attempts to connect using BIP 324 v2 transport protocol.
|
2020-06-02 12:46:41 -04:00
|
|
|
* @return bool Returns false if there are no available
|
|
|
|
* slots for this connection:
|
|
|
|
* - conn_type not a supported ConnectionType
|
|
|
|
* - Max total outbound connection capacity filled
|
|
|
|
* - Max connection capacity for type is filled
|
|
|
|
*/
|
2022-02-04 02:35:23 -03:00
|
|
|
bool AddConnection(const std::string& address, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex);
|
2020-06-02 12:46:41 -04:00
|
|
|
|
2021-04-17 13:17:40 -04:00
|
|
|
size_t GetNodeCount(ConnectionDirection) const;
|
2023-04-03 14:42:15 -04:00
|
|
|
uint32_t GetMappedAS(const CNetAddr& addr) const;
|
2021-04-17 13:17:40 -04:00
|
|
|
void GetNodeStats(std::vector<CNodeStats>& vstats) const;
|
2021-01-06 03:39:04 -03:00
|
|
|
bool DisconnectNode(const std::string& node);
|
|
|
|
bool DisconnectNode(const CSubNet& subnet);
|
|
|
|
bool DisconnectNode(const CNetAddr& addr);
|
|
|
|
bool DisconnectNode(NodeId id);
|
2020-05-12 15:58:41 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
//! Used to convey which local services we are offering peers during node
|
|
|
|
//! connection.
|
|
|
|
//!
|
|
|
|
//! The data returned by this is used in CNode construction,
|
|
|
|
//! which is used to advertise which services we are offering
|
|
|
|
//! that peer during `net_processing.cpp:PushNodeVersion()`.
|
|
|
|
ServiceFlags GetLocalServices() const;
|
2020-07-28 16:17:16 -04:00
|
|
|
|
2022-01-25 18:18:52 -03:00
|
|
|
uint64_t GetMaxOutboundTarget() const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
|
2021-04-17 13:17:40 -04:00
|
|
|
std::chrono::seconds GetMaxOutboundTimeframe() const;
|
2020-07-28 16:39:38 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
//! check if the outbound target is reached
|
|
|
|
//! if param historicalBlockServingLimit is set true, the function will
|
|
|
|
//! response true if the limit for serving historical blocks has been reached
|
2022-01-25 18:18:52 -03:00
|
|
|
bool OutboundTargetReached(bool historicalBlockServingLimit) const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
|
2020-08-10 17:48:54 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
//! response the bytes left in the current max outbound cycle
|
|
|
|
//! in case of no limit, it will always response 0
|
2022-01-25 18:18:52 -03:00
|
|
|
uint64_t GetOutboundTargetBytesLeft() const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
|
2020-06-03 00:23:44 -04:00
|
|
|
|
2022-01-25 18:18:52 -03:00
|
|
|
std::chrono::seconds GetMaxOutboundTimeLeftInCycle() const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
|
2020-06-03 00:23:44 -04:00
|
|
|
|
2021-04-17 13:17:40 -04:00
|
|
|
uint64_t GetTotalBytesRecv() const;
|
2022-01-25 18:18:52 -03:00
|
|
|
uint64_t GetTotalBytesSent() const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
|
2020-09-30 13:19:19 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/** Get a unique deterministic randomizer. */
|
|
|
|
CSipHasher GetDeterministicRandomizer(uint64_t id) const;
|
2015-08-25 11:30:31 -03:00
|
|
|
|
2022-04-20 02:47:29 -04:00
|
|
|
void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2021-02-16 12:55:03 -03:00
|
|
|
/** Return true if we should disconnect the peer for failing an inactivity check. */
|
2020-07-10 12:19:11 -04:00
|
|
|
bool ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const;
|
2021-02-12 07:01:55 -03:00
|
|
|
|
2023-02-07 18:03:32 -03:00
|
|
|
bool MultipleManualOrFullOutboundConns(Network net) const EXCLUSIVE_LOCKS_REQUIRED(m_nodes_mutex);
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
private:
|
|
|
|
struct ListenSocket {
|
|
|
|
public:
|
2021-04-23 06:15:15 -04:00
|
|
|
std::shared_ptr<Sock> sock;
|
2021-01-06 03:39:04 -03:00
|
|
|
inline void AddSocketPermissionFlags(NetPermissionFlags& flags) const { NetPermissions::AddFlag(flags, m_permissions); }
|
2021-04-23 06:15:15 -04:00
|
|
|
ListenSocket(std::shared_ptr<Sock> sock_, NetPermissionFlags permissions_)
|
|
|
|
: sock{sock_}, m_permissions{permissions_}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
private:
|
|
|
|
NetPermissionFlags m_permissions;
|
2019-03-08 16:26:36 -03:00
|
|
|
};
|
|
|
|
|
2022-01-25 18:18:52 -03:00
|
|
|
//! returns the time left in the current max outbound cycle
|
|
|
|
//! in case of no limit, it will always return 0
|
|
|
|
std::chrono::seconds GetMaxOutboundTimeLeftInCycle_() const EXCLUSIVE_LOCKS_REQUIRED(m_total_bytes_sent_mutex);
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
bool BindListenPort(const CService& bindAddr, bilingual_str& strError, NetPermissionFlags permissions);
|
|
|
|
bool Bind(const CService& addr, unsigned int flags, NetPermissionFlags permissions);
|
2020-10-22 15:34:31 -03:00
|
|
|
bool InitBinds(const Options& options);
|
2019-04-05 14:35:15 -03:00
|
|
|
|
2023-08-22 20:42:24 -04:00
|
|
|
void ThreadOpenAddedConnections() EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex, !m_unused_i2p_sessions_mutex, !m_reconnections_mutex);
|
2022-04-20 02:47:29 -04:00
|
|
|
void AddAddrFetch(const std::string& strDest) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex);
|
2023-01-06 07:23:46 -03:00
|
|
|
void ProcessAddrFetch() EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_unused_i2p_sessions_mutex);
|
2023-08-22 20:42:24 -04:00
|
|
|
void ThreadOpenConnections(std::vector<std::string> connect) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_added_nodes_mutex, !m_nodes_mutex, !m_unused_i2p_sessions_mutex, !m_reconnections_mutex);
|
2022-04-20 02:47:29 -04:00
|
|
|
void ThreadMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc);
|
2020-11-24 07:28:52 -03:00
|
|
|
void ThreadI2PAcceptIncoming();
|
2021-01-06 03:39:04 -03:00
|
|
|
void AcceptConnection(const ListenSocket& hListenSocket);
|
2020-11-24 08:40:03 -03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Create a `CNode` object from a socket that has just been accepted and add the node to
|
2021-08-28 14:57:52 -04:00
|
|
|
* the `m_nodes` member.
|
2021-04-13 06:14:57 -04:00
|
|
|
* @param[in] sock Connected socket to communicate with the peer.
|
2022-09-01 04:50:26 -04:00
|
|
|
* @param[in] permission_flags The peer's permissions.
|
2020-11-24 08:40:03 -03:00
|
|
|
* @param[in] addr_bind The address and port at our side of the connection.
|
|
|
|
* @param[in] addr The address and port at the peer's side of the connection.
|
|
|
|
*/
|
2021-04-13 06:14:57 -04:00
|
|
|
void CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
|
2022-09-01 04:50:26 -04:00
|
|
|
NetPermissionFlags permission_flags,
|
2020-11-24 08:40:03 -03:00
|
|
|
const CAddress& addr_bind,
|
|
|
|
const CAddress& addr);
|
|
|
|
|
2023-08-22 20:42:24 -04:00
|
|
|
void DisconnectNodes() EXCLUSIVE_LOCKS_REQUIRED(!m_reconnections_mutex, !m_nodes_mutex);
|
2021-01-06 03:39:04 -03:00
|
|
|
void NotifyNumConnectionsChanged();
|
2021-01-06 08:12:51 -03:00
|
|
|
/** Return true if the peer is inactive and should be disconnected. */
|
|
|
|
bool InactivityCheck(const CNode& node) const;
|
2021-04-28 12:29:32 -04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Generate a collection of sockets to check for IO readiness.
|
|
|
|
* @param[in] nodes Select from these nodes' sockets.
|
2021-05-04 12:37:19 -04:00
|
|
|
* @return sockets to check for readiness
|
2021-04-28 12:29:32 -04:00
|
|
|
*/
|
2021-05-04 12:37:19 -04:00
|
|
|
Sock::EventsPerSock GenerateWaitSockets(Span<CNode* const> nodes);
|
2021-04-28 12:29:32 -04:00
|
|
|
|
2021-10-25 06:03:58 -03:00
|
|
|
/**
|
|
|
|
* Check connected and listening sockets for IO readiness and process them accordingly.
|
|
|
|
*/
|
2022-04-20 02:47:29 -04:00
|
|
|
void SocketHandler() EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc);
|
2021-10-25 06:03:58 -03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Do the read/write for connected sockets that are ready for IO.
|
2021-05-04 12:37:19 -04:00
|
|
|
* @param[in] nodes Nodes to process. The socket of each node is checked against `what`.
|
|
|
|
* @param[in] events_per_sock Sockets that are ready for IO.
|
2021-10-25 06:03:58 -03:00
|
|
|
*/
|
|
|
|
void SocketHandlerConnected(const std::vector<CNode*>& nodes,
|
2021-05-04 12:37:19 -04:00
|
|
|
const Sock::EventsPerSock& events_per_sock)
|
2022-04-20 02:47:29 -04:00
|
|
|
EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc);
|
2021-10-25 06:03:58 -03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Accept incoming connections, one from each read-ready listening socket.
|
2021-05-04 12:37:19 -04:00
|
|
|
* @param[in] events_per_sock Sockets that are ready for IO.
|
2021-10-25 06:03:58 -03:00
|
|
|
*/
|
2021-05-04 12:37:19 -04:00
|
|
|
void SocketHandlerListening(const Sock::EventsPerSock& events_per_sock);
|
2021-10-25 06:03:58 -03:00
|
|
|
|
2023-08-22 20:42:24 -04:00
|
|
|
void ThreadSocketHandler() EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc, !m_nodes_mutex, !m_reconnections_mutex);
|
2022-04-20 02:47:29 -04:00
|
|
|
void ThreadDNSAddressSeed() EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_nodes_mutex);
|
2020-09-01 11:40:32 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
uint64_t CalculateKeyedNetGroup(const CAddress& ad) const;
|
2016-05-22 01:55:15 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
CNode* FindNode(const CNetAddr& ip);
|
|
|
|
CNode* FindNode(const std::string& addrName);
|
|
|
|
CNode* FindNode(const CService& addr);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Determine whether we're already connected to a given address, in order to
|
|
|
|
* avoid initiating duplicate connections.
|
|
|
|
*/
|
|
|
|
bool AlreadyConnectedToAddress(const CAddress& addr);
|
|
|
|
|
|
|
|
bool AttemptToEvictConnection();
|
2023-08-21 16:55:47 -04:00
|
|
|
CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex);
|
2021-01-06 03:39:04 -03:00
|
|
|
void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr) const;
|
|
|
|
|
|
|
|
void DeleteNode(CNode* pnode);
|
2014-01-11 14:14:29 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
NodeId GetNewNodeId();
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2023-06-13 14:20:13 -04:00
|
|
|
/** (Try to) send data from node's vSendMsg. Returns (bytes_sent, data_left). */
|
|
|
|
std::pair<size_t, bool> SocketSendData(CNode& node) const EXCLUSIVE_LOCKS_REQUIRED(node.cs_vSend);
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
void DumpAddresses();
|
2019-09-10 14:09:12 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
// Network stats
|
|
|
|
void RecordBytesRecv(uint64_t bytes);
|
2022-01-25 18:18:52 -03:00
|
|
|
void RecordBytesSent(uint64_t bytes) EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex);
|
2019-09-10 14:09:12 -03:00
|
|
|
|
2022-11-30 17:55:22 -03:00
|
|
|
/**
|
|
|
|
Return reachable networks for which we have no addresses in addrman and therefore
|
|
|
|
may require loading fixed seeds.
|
|
|
|
*/
|
|
|
|
std::unordered_set<Network> GetReachableEmptyNetworks() const;
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/**
|
|
|
|
* Return vector of current BLOCK_RELAY peers.
|
|
|
|
*/
|
|
|
|
std::vector<CAddress> GetCurrentBlockRelayOnlyConns() const;
|
2017-02-06 14:04:34 -03:00
|
|
|
|
2023-02-14 19:40:14 -03:00
|
|
|
/**
|
|
|
|
* Search for a "preferred" network, a reachable network to which we
|
|
|
|
* currently don't have any OUTBOUND_FULL_RELAY or MANUAL connections.
|
|
|
|
* There needs to be at least one address in AddrMan for a preferred
|
|
|
|
* network to be picked.
|
|
|
|
*
|
|
|
|
* @param[out] network Preferred network, if found.
|
|
|
|
*
|
|
|
|
* @return bool Whether a preferred network was found.
|
|
|
|
*/
|
|
|
|
bool MaybePickPreferredNetwork(std::optional<Network>& network);
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
// Whether the node should be passed out in ForEach* callbacks
|
|
|
|
static bool NodeFullyConnected(const CNode* pnode);
|
2017-02-06 14:18:51 -03:00
|
|
|
|
2023-09-12 08:42:52 -03:00
|
|
|
uint16_t GetDefaultPort(Network net) const;
|
|
|
|
uint16_t GetDefaultPort(const std::string& addr) const;
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
// Network usage totals
|
2022-01-25 18:20:13 -03:00
|
|
|
mutable Mutex m_total_bytes_sent_mutex;
|
2021-08-28 13:51:08 -04:00
|
|
|
std::atomic<uint64_t> nTotalBytesRecv{0};
|
2022-01-25 18:05:04 -03:00
|
|
|
uint64_t nTotalBytesSent GUARDED_BY(m_total_bytes_sent_mutex) {0};
|
2020-09-30 13:07:36 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
// outbound limit & stats
|
2022-01-25 18:05:04 -03:00
|
|
|
uint64_t nMaxOutboundTotalBytesSentInCycle GUARDED_BY(m_total_bytes_sent_mutex) {0};
|
|
|
|
std::chrono::seconds nMaxOutboundCycleStartTime GUARDED_BY(m_total_bytes_sent_mutex) {0};
|
|
|
|
uint64_t nMaxOutboundLimit GUARDED_BY(m_total_bytes_sent_mutex);
|
2020-09-30 13:07:36 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
// P2P timeout in seconds
|
2020-07-10 12:19:11 -04:00
|
|
|
std::chrono::seconds m_peer_connect_timeout;
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
// Whitelisted ranges. Any node connecting from these is automatically
|
|
|
|
// whitelisted (as well as those connecting to whitelisted binds).
|
|
|
|
std::vector<NetWhitelistPermissions> vWhitelistedRange;
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
unsigned int nSendBufferMaxSize{0};
|
|
|
|
unsigned int nReceiveFloodSize{0};
|
2016-04-17 21:21:58 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
std::vector<ListenSocket> vhListenSocket;
|
|
|
|
std::atomic<bool> fNetworkActive{true};
|
|
|
|
bool fAddressesInitialized{false};
|
2021-09-10 21:16:37 -03:00
|
|
|
AddrMan& addrman;
|
2021-08-31 13:40:18 -04:00
|
|
|
const NetGroupManager& m_netgroupman;
|
2021-01-06 03:39:04 -03:00
|
|
|
std::deque<std::string> m_addr_fetches GUARDED_BY(m_addr_fetches_mutex);
|
2021-08-28 13:59:41 -04:00
|
|
|
Mutex m_addr_fetches_mutex;
|
2021-12-28 18:26:20 -03:00
|
|
|
|
|
|
|
// connection string and whether to use v2 p2p
|
|
|
|
std::vector<AddedNodeParams> m_added_node_params GUARDED_BY(m_added_nodes_mutex);
|
|
|
|
|
2021-08-28 15:02:28 -04:00
|
|
|
mutable Mutex m_added_nodes_mutex;
|
2021-08-28 14:57:52 -04:00
|
|
|
std::vector<CNode*> m_nodes GUARDED_BY(m_nodes_mutex);
|
|
|
|
std::list<CNode*> m_nodes_disconnected;
|
|
|
|
mutable RecursiveMutex m_nodes_mutex;
|
2021-01-06 03:39:04 -03:00
|
|
|
std::atomic<NodeId> nLastNodeId{0};
|
|
|
|
unsigned int nPrevNodeCount{0};
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2023-06-11 15:26:18 -04:00
|
|
|
// Stores number of full-tx connections (outbound and manual) per network
|
|
|
|
std::array<unsigned int, Network::NET_MAX> m_network_conn_counts GUARDED_BY(m_nodes_mutex) = {};
|
|
|
|
|
2020-09-30 12:08:26 -03:00
|
|
|
/**
|
2021-01-06 03:39:04 -03:00
|
|
|
* Cache responses to addr requests to minimize privacy leak.
|
|
|
|
* Attack example: scraping addrs in real-time may allow an attacker
|
|
|
|
* to infer new connections of the victim by detecting new records
|
|
|
|
* with fresh timestamps (per self-announcement).
|
2020-09-30 12:08:26 -03:00
|
|
|
*/
|
2021-01-06 03:39:04 -03:00
|
|
|
struct CachedAddrResponse {
|
|
|
|
std::vector<CAddress> m_addrs_response_cache;
|
|
|
|
std::chrono::microseconds m_cache_entry_expiration{0};
|
|
|
|
};
|
2012-11-15 21:41:12 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/**
|
|
|
|
* Addr responses stored in different caches
|
|
|
|
* per (network, local socket) prevent cross-network node identification.
|
|
|
|
* If a node for example is multi-homed under Tor and IPv6,
|
|
|
|
* a single cache (or no cache at all) would let an attacker
|
|
|
|
* to easily detect that it is the same node by comparing responses.
|
|
|
|
* Indexing by local socket prevents leakage when a node has multiple
|
|
|
|
* listening addresses on the same network.
|
|
|
|
*
|
|
|
|
* The used memory equals to 1000 CAddress records (or around 40 bytes) per
|
|
|
|
* distinct Network (up to 5) we have/had an inbound peer from,
|
|
|
|
* resulting in at most ~196 KB. Every separate local socket may
|
|
|
|
* add up to ~196 KB extra.
|
|
|
|
*/
|
|
|
|
std::map<uint64_t, CachedAddrResponse> m_addr_response_caches;
|
2017-02-06 14:18:51 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/**
|
2020-07-20 15:28:37 -04:00
|
|
|
* Services this node offers.
|
2021-01-06 03:39:04 -03:00
|
|
|
*
|
2020-07-20 15:28:37 -04:00
|
|
|
* This data is replicated in each Peer instance we create.
|
2021-01-06 03:39:04 -03:00
|
|
|
*
|
|
|
|
* This data is not marked const, but after being set it should not
|
2020-07-20 15:28:37 -04:00
|
|
|
* change.
|
2021-01-06 03:39:04 -03:00
|
|
|
*
|
2020-07-20 15:28:37 -04:00
|
|
|
* \sa Peer::our_services
|
2021-01-06 03:39:04 -03:00
|
|
|
*/
|
|
|
|
ServiceFlags nLocalServices;
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
std::unique_ptr<CSemaphore> semOutbound;
|
|
|
|
std::unique_ptr<CSemaphore> semAddnode;
|
2023-08-30 17:38:47 -04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Maximum number of automatic connections permitted, excluding manual
|
|
|
|
* connections but including inbounds. May be changed by the user and is
|
|
|
|
* potentially limited by the operating system (number of file descriptors).
|
|
|
|
*/
|
2023-08-30 16:51:09 -04:00
|
|
|
int m_max_automatic_connections;
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2023-08-30 17:38:47 -04:00
|
|
|
/*
|
|
|
|
* Maximum number of peers by connection type. Might vary from defaults
|
|
|
|
* based on -maxconnections init value.
|
|
|
|
*/
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
// How many full-relay (tx, block, addr) outbound peers we want
|
|
|
|
int m_max_outbound_full_relay;
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
// How many block-relay only outbound peers we want
|
|
|
|
// We do not relay tx or addr messages with these peers
|
|
|
|
int m_max_outbound_block_relay;
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2023-08-30 16:51:09 -04:00
|
|
|
int m_max_addnode{MAX_ADDNODE_CONNECTIONS};
|
|
|
|
int m_max_feeler{MAX_FEELER_CONNECTIONS};
|
|
|
|
int m_max_automatic_outbound;
|
2023-08-31 16:41:30 -04:00
|
|
|
int m_max_inbound;
|
2023-08-30 17:38:47 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
bool m_use_addrman_outgoing;
|
2021-08-18 01:41:39 -04:00
|
|
|
CClientUIInterface* m_client_interface;
|
2021-01-06 03:39:04 -03:00
|
|
|
NetEventsInterface* m_msgproc;
|
|
|
|
/** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */
|
|
|
|
BanMan* m_banman;
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2020-12-15 14:56:53 -03:00
|
|
|
/**
|
2021-01-06 03:39:04 -03:00
|
|
|
* Addresses that were saved during the previous clean shutdown. We'll
|
|
|
|
* attempt to make block-relay-only connections to them.
|
2020-12-15 14:56:53 -03:00
|
|
|
*/
|
2021-01-06 03:39:04 -03:00
|
|
|
std::vector<CAddress> m_anchors;
|
2020-05-20 06:05:18 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/** SipHasher seeds for deterministic randomness */
|
|
|
|
const uint64_t nSeed0, nSeed1;
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/** flag for waking the message processor. */
|
|
|
|
bool fMsgProcWake GUARDED_BY(mutexMsgProc);
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
std::condition_variable condMsgProc;
|
|
|
|
Mutex mutexMsgProc;
|
|
|
|
std::atomic<bool> flagInterruptMsgProc{false};
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2020-12-04 14:03:05 -03:00
|
|
|
/**
|
|
|
|
* This is signaled when network activity should cease.
|
|
|
|
* A pointer to it is saved in `m_i2p_sam_session`, so make sure that
|
|
|
|
* the lifetime of `interruptNet` is not shorter than
|
|
|
|
* the lifetime of `m_i2p_sam_session`.
|
|
|
|
*/
|
2021-01-06 03:39:04 -03:00
|
|
|
CThreadInterrupt interruptNet;
|
2010-08-29 12:58:15 -04:00
|
|
|
|
2020-12-04 14:03:05 -03:00
|
|
|
/**
|
|
|
|
* I2P SAM session.
|
2022-06-08 11:59:32 -04:00
|
|
|
* Used to accept incoming and make outgoing I2P connections from a persistent
|
|
|
|
* address.
|
2020-12-04 14:03:05 -03:00
|
|
|
*/
|
|
|
|
std::unique_ptr<i2p::sam::Session> m_i2p_sam_session;
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
std::thread threadDNSAddressSeed;
|
|
|
|
std::thread threadSocketHandler;
|
|
|
|
std::thread threadOpenAddedConnections;
|
|
|
|
std::thread threadOpenConnections;
|
|
|
|
std::thread threadMessageHandler;
|
2020-11-24 07:28:52 -03:00
|
|
|
std::thread threadI2PAcceptIncoming;
|
2011-09-06 17:09:04 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/** flag for deciding to connect to an extra outbound peer,
|
|
|
|
* in excess of m_max_outbound_full_relay
|
|
|
|
* This takes the place of a feeler connection */
|
|
|
|
std::atomic_bool m_try_another_outbound_peer;
|
2016-04-19 01:04:58 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/** flag for initiating extra block-relay-only peer connections.
|
|
|
|
* this should only be enabled after initial chain sync has occurred,
|
|
|
|
* as these connections are intended to be short-lived and low-bandwidth.
|
|
|
|
*/
|
|
|
|
std::atomic_bool m_start_extra_block_relay_peers{false};
|
2017-02-06 14:04:34 -03:00
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
/**
|
|
|
|
* A vector of -bind=<address>:<port>=onion arguments each of which is
|
|
|
|
* an address and port that are designated for incoming Tor connections.
|
|
|
|
*/
|
|
|
|
std::vector<CService> m_onion_binds;
|
2020-10-21 06:53:38 -03:00
|
|
|
|
2023-01-06 07:23:46 -03:00
|
|
|
/**
|
|
|
|
* Mutex protecting m_i2p_sam_sessions.
|
|
|
|
*/
|
|
|
|
Mutex m_unused_i2p_sessions_mutex;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* A pool of created I2P SAM transient sessions that should be used instead
|
|
|
|
* of creating new ones in order to reduce the load on the I2P network.
|
|
|
|
* Creating a session in I2P is not cheap, thus if this is not empty, then
|
|
|
|
* pick an entry from it instead of creating a new session. If connecting to
|
|
|
|
* a host fails, then the created session is put to this pool for reuse.
|
|
|
|
*/
|
|
|
|
std::queue<std::unique_ptr<i2p::sam::Session>> m_unused_i2p_sessions GUARDED_BY(m_unused_i2p_sessions_mutex);
|
|
|
|
|
2023-08-22 20:42:24 -04:00
|
|
|
/**
|
|
|
|
* Mutex protecting m_reconnections.
|
|
|
|
*/
|
|
|
|
Mutex m_reconnections_mutex;
|
|
|
|
|
|
|
|
/** Struct for entries in m_reconnections. */
|
|
|
|
struct ReconnectionInfo
|
|
|
|
{
|
|
|
|
CAddress addr_connect;
|
|
|
|
CSemaphoreGrant grant;
|
|
|
|
std::string destination;
|
|
|
|
ConnectionType conn_type;
|
|
|
|
bool use_v2transport;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* List of reconnections we have to make.
|
|
|
|
*/
|
|
|
|
std::list<ReconnectionInfo> m_reconnections GUARDED_BY(m_reconnections_mutex);
|
|
|
|
|
|
|
|
/** Attempt reconnections, if m_reconnections non-empty. */
|
|
|
|
void PerformReconnections() EXCLUSIVE_LOCKS_REQUIRED(!m_reconnections_mutex, !m_unused_i2p_sessions_mutex);
|
|
|
|
|
2023-01-06 07:23:46 -03:00
|
|
|
/**
|
|
|
|
* Cap on the size of `m_unused_i2p_sessions`, to ensure it does not
|
|
|
|
* unexpectedly use too much memory.
|
|
|
|
*/
|
|
|
|
static constexpr size_t MAX_UNUSED_I2P_SESSIONS_SIZE{10};
|
|
|
|
|
2021-04-26 10:22:07 -04:00
|
|
|
/**
|
2021-08-28 14:57:52 -04:00
|
|
|
* RAII helper to atomically create a copy of `m_nodes` and add a reference
|
2021-04-26 10:22:07 -04:00
|
|
|
* to each of the nodes. The nodes are released when this object is destroyed.
|
|
|
|
*/
|
|
|
|
class NodesSnapshot
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
explicit NodesSnapshot(const CConnman& connman, bool shuffle)
|
|
|
|
{
|
|
|
|
{
|
2021-08-28 14:57:52 -04:00
|
|
|
LOCK(connman.m_nodes_mutex);
|
|
|
|
m_nodes_copy = connman.m_nodes;
|
2021-04-26 10:22:07 -04:00
|
|
|
for (auto& node : m_nodes_copy) {
|
|
|
|
node->AddRef();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (shuffle) {
|
|
|
|
Shuffle(m_nodes_copy.begin(), m_nodes_copy.end(), FastRandomContext{});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
~NodesSnapshot()
|
|
|
|
{
|
|
|
|
for (auto& node : m_nodes_copy) {
|
|
|
|
node->Release();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::vector<CNode*>& Nodes() const
|
|
|
|
{
|
|
|
|
return m_nodes_copy;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::vector<CNode*> m_nodes_copy;
|
|
|
|
};
|
|
|
|
|
2023-09-12 08:42:36 -03:00
|
|
|
const CChainParams& m_params;
|
|
|
|
|
2021-01-06 03:39:04 -03:00
|
|
|
friend struct ConnmanTestMsg;
|
2010-08-29 12:58:15 -04:00
|
|
|
};
|
|
|
|
|
2021-07-22 12:23:21 -04:00
|
|
|
/** Defaults to `CaptureMessageToFile()`, but can be overridden by unit tests. */
|
|
|
|
extern std::function<void(const CAddress& addr,
|
|
|
|
const std::string& msg_type,
|
2021-07-29 11:47:15 -04:00
|
|
|
Span<const unsigned char> data,
|
2021-07-22 12:23:21 -04:00
|
|
|
bool is_incoming)>
|
|
|
|
CaptureMessage;
|
2020-07-13 13:20:47 -04:00
|
|
|
|
2014-08-28 16:21:03 -04:00
|
|
|
#endif // BITCOIN_NET_H
|