mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-04-29 14:59:39 -04:00
Merge b7ff6a611a
into 51d76634fb
This commit is contained in:
commit
be04eb440f
2 changed files with 27 additions and 28 deletions
|
@ -1397,9 +1397,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co
|
||||||
|
|
||||||
// Bootstrap quickly by guessing a parent of our best tip is the forking point.
|
// Bootstrap quickly by guessing a parent of our best tip is the forking point.
|
||||||
// Guessing wrong in either direction is not a problem.
|
// Guessing wrong in either direction is not a problem.
|
||||||
// Also reset pindexLastCommonBlock after a snapshot was loaded, so that blocks after the snapshot will be prioritised for download.
|
if (state->pindexLastCommonBlock == nullptr) {
|
||||||
if (state->pindexLastCommonBlock == nullptr ||
|
|
||||||
(snap_base && state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) {
|
|
||||||
state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())];
|
state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1409,6 +1407,12 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co
|
||||||
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
|
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
// If our tip has advanced beyond pindexLastCommonBlock, move it ahead to the tip. We don't need to download any blocks in between, and skipping ahead here
|
||||||
|
// allows us to determine nWindowEnd better.
|
||||||
|
if (m_chainman.ActiveHeight() > state->pindexLastCommonBlock->nHeight && state->pindexBestKnownBlock->GetAncestor(m_chainman.ActiveHeight()) == m_chainman.ActiveTip()) {
|
||||||
|
state->pindexLastCommonBlock = m_chainman.ActiveTip();
|
||||||
|
}
|
||||||
|
|
||||||
const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
|
const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
|
||||||
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
|
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
|
||||||
// linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
|
// linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
|
||||||
|
|
|
@ -29,15 +29,15 @@ from test_framework.util import (
|
||||||
|
|
||||||
|
|
||||||
class P2PStaller(P2PDataStore):
|
class P2PStaller(P2PDataStore):
|
||||||
def __init__(self, stall_block):
|
def __init__(self, stall_blocks):
|
||||||
self.stall_block = stall_block
|
self.stall_blocks = stall_blocks
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
def on_getdata(self, message):
|
def on_getdata(self, message):
|
||||||
for inv in message.inv:
|
for inv in message.inv:
|
||||||
self.getdata_requests.append(inv.hash)
|
self.getdata_requests.append(inv.hash)
|
||||||
if (inv.type & MSG_TYPE_MASK) == MSG_BLOCK:
|
if (inv.type & MSG_TYPE_MASK) == MSG_BLOCK:
|
||||||
if (inv.hash != self.stall_block):
|
if (inv.hash not in self.stall_blocks):
|
||||||
self.send_without_ping(msg_block(self.block_store[inv.hash]))
|
self.send_without_ping(msg_block(self.block_store[inv.hash]))
|
||||||
|
|
||||||
def on_getheaders(self, message):
|
def on_getheaders(self, message):
|
||||||
|
@ -51,7 +51,7 @@ class P2PIBDStallingTest(BitcoinTestFramework):
|
||||||
|
|
||||||
def run_test(self):
|
def run_test(self):
|
||||||
NUM_BLOCKS = 1025
|
NUM_BLOCKS = 1025
|
||||||
NUM_PEERS = 4
|
NUM_PEERS = 5
|
||||||
node = self.nodes[0]
|
node = self.nodes[0]
|
||||||
tip = int(node.getbestblockhash(), 16)
|
tip = int(node.getbestblockhash(), 16)
|
||||||
blocks = []
|
blocks = []
|
||||||
|
@ -66,8 +66,10 @@ class P2PIBDStallingTest(BitcoinTestFramework):
|
||||||
block_time += 1
|
block_time += 1
|
||||||
height += 1
|
height += 1
|
||||||
block_dict[blocks[-1].sha256] = blocks[-1]
|
block_dict[blocks[-1].sha256] = blocks[-1]
|
||||||
stall_block = blocks[0].sha256
|
|
||||||
|
|
||||||
|
stall_index = 0
|
||||||
|
second_stall_index = 500
|
||||||
|
stall_blocks = [blocks[stall_index].sha256, blocks[second_stall_index].sha256]
|
||||||
headers_message = msg_headers()
|
headers_message = msg_headers()
|
||||||
headers_message.headers = [CBlockHeader(b) for b in blocks[:NUM_BLOCKS-1]]
|
headers_message.headers = [CBlockHeader(b) for b in blocks[:NUM_BLOCKS-1]]
|
||||||
peers = []
|
peers = []
|
||||||
|
@ -76,14 +78,12 @@ class P2PIBDStallingTest(BitcoinTestFramework):
|
||||||
self.mocktime = int(time.time()) + 1
|
self.mocktime = int(time.time()) + 1
|
||||||
node.setmocktime(self.mocktime)
|
node.setmocktime(self.mocktime)
|
||||||
for id in range(NUM_PEERS):
|
for id in range(NUM_PEERS):
|
||||||
peers.append(node.add_outbound_p2p_connection(P2PStaller(stall_block), p2p_idx=id, connection_type="outbound-full-relay"))
|
peers.append(node.add_outbound_p2p_connection(P2PStaller(stall_blocks), p2p_idx=id, connection_type="outbound-full-relay"))
|
||||||
peers[-1].block_store = block_dict
|
peers[-1].block_store = block_dict
|
||||||
peers[-1].send_and_ping(headers_message)
|
peers[-1].send_and_ping(headers_message)
|
||||||
|
|
||||||
# Need to wait until 1023 blocks are received - the magic total bytes number is a workaround in lack of an rpc
|
# Wait until all blocks are received (except for the stall blocks), so that no other blocks are in flight.
|
||||||
# returning the number of downloaded (but not connected) blocks.
|
self.wait_until(lambda: sum(len(peer['inflight']) for peer in node.getpeerinfo()) == len(stall_blocks))
|
||||||
bytes_recv = 172761 if not self.options.v2transport else 169692
|
|
||||||
self.wait_until(lambda: self.total_bytes_recv_for_blocks() == bytes_recv)
|
|
||||||
|
|
||||||
self.all_sync_send_with_ping(peers)
|
self.all_sync_send_with_ping(peers)
|
||||||
# If there was a peer marked for stalling, it would get disconnected
|
# If there was a peer marked for stalling, it would get disconnected
|
||||||
|
@ -104,7 +104,7 @@ class P2PIBDStallingTest(BitcoinTestFramework):
|
||||||
node.setmocktime(self.mocktime)
|
node.setmocktime(self.mocktime)
|
||||||
peers[0].wait_for_disconnect()
|
peers[0].wait_for_disconnect()
|
||||||
assert_equal(node.num_test_p2p_connections(), NUM_PEERS - 1)
|
assert_equal(node.num_test_p2p_connections(), NUM_PEERS - 1)
|
||||||
self.wait_until(lambda: self.is_block_requested(peers, stall_block))
|
self.wait_until(lambda: self.is_block_requested(peers, stall_blocks[0]))
|
||||||
# Make sure that SendMessages() is invoked, which assigns the missing block
|
# Make sure that SendMessages() is invoked, which assigns the missing block
|
||||||
# to another peer and starts the stalling logic for them
|
# to another peer and starts the stalling logic for them
|
||||||
self.all_sync_send_with_ping(peers)
|
self.all_sync_send_with_ping(peers)
|
||||||
|
@ -119,7 +119,7 @@ class P2PIBDStallingTest(BitcoinTestFramework):
|
||||||
self.mocktime += 2
|
self.mocktime += 2
|
||||||
node.setmocktime(self.mocktime)
|
node.setmocktime(self.mocktime)
|
||||||
self.wait_until(lambda: sum(x.is_connected for x in node.p2ps) == NUM_PEERS - 2)
|
self.wait_until(lambda: sum(x.is_connected for x in node.p2ps) == NUM_PEERS - 2)
|
||||||
self.wait_until(lambda: self.is_block_requested(peers, stall_block))
|
self.wait_until(lambda: self.is_block_requested(peers, stall_blocks[0]))
|
||||||
self.all_sync_send_with_ping(peers)
|
self.all_sync_send_with_ping(peers)
|
||||||
|
|
||||||
self.log.info("Check that the stalling timeout gets doubled to 8 seconds for the next staller")
|
self.log.info("Check that the stalling timeout gets doubled to 8 seconds for the next staller")
|
||||||
|
@ -132,24 +132,19 @@ class P2PIBDStallingTest(BitcoinTestFramework):
|
||||||
self.mocktime += 2
|
self.mocktime += 2
|
||||||
node.setmocktime(self.mocktime)
|
node.setmocktime(self.mocktime)
|
||||||
self.wait_until(lambda: sum(x.is_connected for x in node.p2ps) == NUM_PEERS - 3)
|
self.wait_until(lambda: sum(x.is_connected for x in node.p2ps) == NUM_PEERS - 3)
|
||||||
self.wait_until(lambda: self.is_block_requested(peers, stall_block))
|
self.wait_until(lambda: self.is_block_requested(peers, stall_blocks[0]))
|
||||||
self.all_sync_send_with_ping(peers)
|
self.all_sync_send_with_ping(peers)
|
||||||
|
|
||||||
self.log.info("Provide the withheld block and check that stalling timeout gets reduced back to 2 seconds")
|
self.log.info("Provide the first withheld block and check that stalling timeout gets reduced back to 2 seconds")
|
||||||
with node.assert_debug_log(expected_msgs=['Decreased stalling timeout to 2 seconds']):
|
with node.assert_debug_log(expected_msgs=['Decreased stalling timeout to 2 seconds'], unexpected_msgs=['Stall started']):
|
||||||
for p in peers:
|
for p in peers:
|
||||||
if p.is_connected and (stall_block in p.getdata_requests):
|
if p.is_connected and (stall_blocks[0] in p.getdata_requests):
|
||||||
p.send_without_ping(msg_block(block_dict[stall_block]))
|
p.send_without_ping(msg_block(block_dict[stall_blocks[0]]))
|
||||||
|
self.all_sync_send_with_ping(peers)
|
||||||
|
|
||||||
self.log.info("Check that all outstanding blocks get connected")
|
self.log.info("Check that all outstanding blocks up to the second stall block get connected")
|
||||||
self.wait_until(lambda: node.getblockcount() == NUM_BLOCKS)
|
self.wait_until(lambda: node.getblockcount() == second_stall_index)
|
||||||
|
|
||||||
def total_bytes_recv_for_blocks(self):
|
|
||||||
total = 0
|
|
||||||
for info in self.nodes[0].getpeerinfo():
|
|
||||||
if ("block" in info["bytesrecv_per_msg"].keys()):
|
|
||||||
total += info["bytesrecv_per_msg"]["block"]
|
|
||||||
return total
|
|
||||||
|
|
||||||
def all_sync_send_with_ping(self, peers):
|
def all_sync_send_with_ping(self, peers):
|
||||||
for p in peers:
|
for p in peers:
|
||||||
|
|
Loading…
Add table
Reference in a new issue