mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-01-25 02:33:24 -03:00
Merge bitcoin/bitcoin#29006: test: fix v2 transport intermittent test failure (#29002)
00e0658e77
test: fix v2 transport intermittent test failure (#29002) (Sebastian Falbesoner) Pull request description: This PR improves the following fragile construct for detection of a new connection to the node under test in `p2p_v2_transport.py`:6d5790956f/test/functional/p2p_v2_transport.py (L154-L156)
Only relying on the number of peers for that suffers from race conditions, as unrelated previous peers could disconnect at anytime in-between. In the test run in #29002, the following happens: - `getpeerinfo()` is called the first time -> assigned to `num_peers` - **previous peer disconnects**, the node's peer count is now `num_peers - 1` (in most test runs, this happens before the first getpeerinfo call) - new peer connects, the node's peer count is now `num_peers` - the condition that the node's peer count is `num_peers + 1` is never true, test fails Use the more robust approach of watching for an increased highest peer id instead (again using the `getpeerinfo` RPC call), with a newly introduced context manager method `TestNode.wait_for_new_peer()`. Note that for the opposite case of a disconnect, no new method is introduced; this is currently used only once in the test and is also simpler. Still happy to take suggestions for alternative solutions. Fixes #29002. ACKs for top commit: kevkevinpal: Concept ACK [00e0658
](00e0658e77
) maflcko: Ok, lgtm ACK00e0658e77
stratospher: ACK00e0658
. Tree-SHA512: 0118b87f54ea5e6e080ff44f29d6af6674c757a588534b3add040da435f4359e71bf85bc0a5eb7170f99cc9956e1a03c35cce653d642d31eed41bbed1f94f44f
This commit is contained in:
commit
03042fb6bb
2 changed files with 26 additions and 8 deletions
|
@ -133,9 +133,8 @@ class V2TransportTest(BitcoinTestFramework):
|
|||
V1_PREFIX = MAGIC_BYTES["regtest"] + b"version\x00\x00\x00\x00\x00"
|
||||
assert_equal(len(V1_PREFIX), 16)
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
num_peers = len(self.nodes[0].getpeerinfo())
|
||||
s.connect(("127.0.0.1", p2p_port(0)))
|
||||
self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == num_peers + 1)
|
||||
with self.nodes[0].wait_for_new_peer():
|
||||
s.connect(("127.0.0.1", p2p_port(0)))
|
||||
s.sendall(V1_PREFIX[:-1])
|
||||
assert_equal(self.nodes[0].getpeerinfo()[-1]["transport_protocol_type"], "detecting")
|
||||
s.sendall(bytes([V1_PREFIX[-1]])) # send out last prefix byte
|
||||
|
@ -144,22 +143,23 @@ class V2TransportTest(BitcoinTestFramework):
|
|||
# Check wrong network prefix detection (hits if the next 12 bytes correspond to a v1 version message)
|
||||
wrong_network_magic_prefix = MAGIC_BYTES["signet"] + V1_PREFIX[4:]
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.connect(("127.0.0.1", p2p_port(0)))
|
||||
with self.nodes[0].wait_for_new_peer():
|
||||
s.connect(("127.0.0.1", p2p_port(0)))
|
||||
with self.nodes[0].assert_debug_log(["V2 transport error: V1 peer with wrong MessageStart"]):
|
||||
s.sendall(wrong_network_magic_prefix + b"somepayload")
|
||||
|
||||
# Check detection of missing garbage terminator (hits after fixed amount of data if terminator never matches garbage)
|
||||
MAX_KEY_GARB_AND_GARBTERM_LEN = 64 + 4095 + 16
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
num_peers = len(self.nodes[0].getpeerinfo())
|
||||
s.connect(("127.0.0.1", p2p_port(0)))
|
||||
self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == num_peers + 1)
|
||||
with self.nodes[0].wait_for_new_peer():
|
||||
s.connect(("127.0.0.1", p2p_port(0)))
|
||||
s.sendall(b'\x00' * (MAX_KEY_GARB_AND_GARBTERM_LEN - 1))
|
||||
self.wait_until(lambda: self.nodes[0].getpeerinfo()[-1]["bytesrecv"] == MAX_KEY_GARB_AND_GARBTERM_LEN - 1)
|
||||
with self.nodes[0].assert_debug_log(["V2 transport error: missing garbage terminator"]):
|
||||
peer_id = self.nodes[0].getpeerinfo()[-1]["id"]
|
||||
s.sendall(b'\x00') # send out last byte
|
||||
# should disconnect immediately
|
||||
self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == num_peers)
|
||||
self.wait_until(lambda: not peer_id in [p["id"] for p in self.nodes[0].getpeerinfo()])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -519,6 +519,24 @@ class TestNode():
|
|||
'Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(
|
||||
str(expected_msgs), print_log))
|
||||
|
||||
@contextlib.contextmanager
|
||||
def wait_for_new_peer(self, timeout=5):
|
||||
"""
|
||||
Wait until the node is connected to at least one new peer. We detect this
|
||||
by watching for an increased highest peer id, using the `getpeerinfo` RPC call.
|
||||
Note that the simpler approach of only accounting for the number of peers
|
||||
suffers from race conditions, as disconnects from unrelated previous peers
|
||||
could happen anytime in-between.
|
||||
"""
|
||||
def get_highest_peer_id():
|
||||
peer_info = self.getpeerinfo()
|
||||
return peer_info[-1]["id"] if peer_info else -1
|
||||
|
||||
initial_peer_id = get_highest_peer_id()
|
||||
yield
|
||||
wait_until_helper_internal(lambda: get_highest_peer_id() > initial_peer_id,
|
||||
timeout=timeout, timeout_factor=self.timeout_factor)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def profile_with_perf(self, profile_name: str):
|
||||
"""
|
||||
|
|
Loading…
Add table
Reference in a new issue