mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-01-10 20:03:34 -03:00
rewrite DisconnectedBlockTransactions as a list + map
And encapsulate underlying data structures to avoid misuse. It's better to use stdlib instead of boost when we can achieve the same thing. Behavior change: the number returned by DynamicMemoryUsage for the same transactions is higher (due to change in usage or more accurate accounting), which effectively decreases the maximum amount of transactions kept for resubmission in a reorg. Co-authored-by: Cory Fields <cory-nospam-@coryfields.com>
This commit is contained in:
parent
79ce9f0aa4
commit
2765d6f343
4 changed files with 83 additions and 68 deletions
|
@ -82,7 +82,7 @@ static void Reorg(const ReorgTxns& reorg)
|
|||
disconnectpool.removeForBlock(reorg.connected_txns_2);
|
||||
|
||||
// Sanity Check
|
||||
assert(disconnectpool.queuedTx.size() == BLOCK_VTX_COUNT - reorg.num_shared);
|
||||
assert(disconnectpool.size() == BLOCK_VTX_COUNT - reorg.num_shared);
|
||||
|
||||
disconnectpool.clear();
|
||||
}
|
||||
|
|
100
src/txmempool.h
100
src/txmempool.h
|
@ -850,29 +850,23 @@ public:
|
|||
* Instead, store the disconnected transactions (in order!) as we go, remove any
|
||||
* that are included in blocks in the new chain, and then process the remaining
|
||||
* still-unconfirmed transactions at the end.
|
||||
*
|
||||
* Order of queuedTx:
|
||||
* The front of the list should be the most recently-confirmed transactions (transactions at the
|
||||
* end of vtx of blocks closer to the tip). If memory usage grows too large, we trim from the front
|
||||
* of the list. After trimming, transactions can be re-added to the mempool from the back of the
|
||||
* list to the front without running into missing inputs.
|
||||
*/
|
||||
class DisconnectedBlockTransactions {
|
||||
private:
|
||||
/** Cached dynamic memory usage for the CTransactions (memory for the shared pointers is
|
||||
* included in the container calculations). */
|
||||
uint64_t cachedInnerUsage = 0;
|
||||
std::list<CTransactionRef> queuedTx;
|
||||
using TxList = decltype(queuedTx);
|
||||
std::unordered_map<uint256, TxList::iterator, SaltedTxidHasher> iters_by_txid;
|
||||
|
||||
// multi_index tag names
|
||||
struct txid_index {};
|
||||
struct insertion_order {};
|
||||
|
||||
struct DisconnectedBlockTransactions {
|
||||
typedef boost::multi_index_container<
|
||||
CTransactionRef,
|
||||
boost::multi_index::indexed_by<
|
||||
// sorted by txid
|
||||
boost::multi_index::hashed_unique<
|
||||
boost::multi_index::tag<txid_index>,
|
||||
mempoolentry_txid,
|
||||
SaltedTxidHasher
|
||||
>,
|
||||
// sorted by order in the blockchain
|
||||
boost::multi_index::sequenced<
|
||||
boost::multi_index::tag<insertion_order>
|
||||
>
|
||||
>
|
||||
> indexed_disconnected_transactions;
|
||||
|
||||
public:
|
||||
// It's almost certainly a logic bug if we don't clear out queuedTx before
|
||||
// destruction, as we add to it while disconnecting blocks, and then we
|
||||
// need to re-process remaining transactions to ensure mempool consistency.
|
||||
|
@ -881,18 +875,17 @@ struct DisconnectedBlockTransactions {
|
|||
// to be refactored such that this assumption is no longer true (for
|
||||
// instance if there was some other way we cleaned up the mempool after a
|
||||
// reorg, besides draining this object).
|
||||
~DisconnectedBlockTransactions() { assert(queuedTx.empty()); }
|
||||
|
||||
indexed_disconnected_transactions queuedTx;
|
||||
uint64_t cachedInnerUsage = 0;
|
||||
|
||||
// Estimate the overhead of queuedTx to be 6 pointers + an allocation, as
|
||||
// no exact formula for boost::multi_index_contained is implemented.
|
||||
size_t DynamicMemoryUsage() const {
|
||||
return memusage::MallocUsage(sizeof(CTransactionRef) + 6 * sizeof(void*)) * queuedTx.size() + cachedInnerUsage;
|
||||
~DisconnectedBlockTransactions() {
|
||||
assert(queuedTx.empty());
|
||||
assert(iters_by_txid.empty());
|
||||
assert(cachedInnerUsage == 0);
|
||||
}
|
||||
|
||||
/** Add transactions from the block, iterating through vtx in reverse order. Callers should call
|
||||
size_t DynamicMemoryUsage() const {
|
||||
return cachedInnerUsage + memusage::DynamicUsage(iters_by_txid) + memusage::DynamicUsage(queuedTx);
|
||||
}
|
||||
|
||||
/** Add transactions from the block, iterating through vtx in reverse order. Callers should call
|
||||
* this function for blocks in descending order by block height.
|
||||
* We assume that callers never pass multiple transactions with the same txid, otherwise things
|
||||
* can go very wrong in removeForBlock due to queuedTx containing an item without a
|
||||
|
@ -900,40 +893,61 @@ struct DisconnectedBlockTransactions {
|
|||
*/
|
||||
void AddTransactionsFromBlock(const std::vector<CTransactionRef>& vtx)
|
||||
{
|
||||
iters_by_txid.reserve(iters_by_txid.size() + vtx.size());
|
||||
for (auto block_it = vtx.rbegin(); block_it != vtx.rend(); ++block_it) {
|
||||
queuedTx.insert(*block_it);
|
||||
cachedInnerUsage += RecursiveDynamicUsage(*block_it);
|
||||
auto it = queuedTx.insert(queuedTx.end(), *block_it);
|
||||
iters_by_txid.emplace((*block_it)->GetHash(), it);
|
||||
cachedInnerUsage += RecursiveDynamicUsage(**block_it);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove entries based on txid_index, and update memory usage.
|
||||
/** Remove any entries that are in this block. */
|
||||
void removeForBlock(const std::vector<CTransactionRef>& vtx)
|
||||
{
|
||||
// Short-circuit in the common case of a block being added to the tip
|
||||
if (queuedTx.empty()) {
|
||||
return;
|
||||
}
|
||||
for (auto const &tx : vtx) {
|
||||
auto it = queuedTx.find(tx->GetHash());
|
||||
if (it != queuedTx.end()) {
|
||||
cachedInnerUsage -= RecursiveDynamicUsage(*it);
|
||||
queuedTx.erase(it);
|
||||
for (const auto& tx : vtx) {
|
||||
auto iter = iters_by_txid.find(tx->GetHash());
|
||||
if (iter != iters_by_txid.end()) {
|
||||
auto list_iter = iter->second;
|
||||
iters_by_txid.erase(iter);
|
||||
cachedInnerUsage -= RecursiveDynamicUsage(**list_iter);
|
||||
queuedTx.erase(list_iter);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove an entry by insertion_order index, and update memory usage.
|
||||
void removeEntry(indexed_disconnected_transactions::index<insertion_order>::type::iterator entry)
|
||||
/** Remove the first entry and update memory usage. */
|
||||
CTransactionRef take_first()
|
||||
{
|
||||
cachedInnerUsage -= RecursiveDynamicUsage(*entry);
|
||||
queuedTx.get<insertion_order>().erase(entry);
|
||||
CTransactionRef first_tx;
|
||||
if (!queuedTx.empty()) {
|
||||
first_tx = queuedTx.front();
|
||||
cachedInnerUsage -= RecursiveDynamicUsage(*queuedTx.front());
|
||||
iters_by_txid.erase(queuedTx.front()->GetHash());
|
||||
queuedTx.pop_front();
|
||||
}
|
||||
return first_tx;
|
||||
}
|
||||
|
||||
size_t size() const { return queuedTx.size(); }
|
||||
|
||||
void clear()
|
||||
{
|
||||
cachedInnerUsage = 0;
|
||||
iters_by_txid.clear();
|
||||
queuedTx.clear();
|
||||
}
|
||||
|
||||
/** Clear all data structures and return the list of transactions. */
|
||||
std::list<CTransactionRef> take()
|
||||
{
|
||||
std::list<CTransactionRef> ret = std::move(queuedTx);
|
||||
clear();
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // BITCOIN_TXMEMPOOL_H
|
||||
|
|
|
@ -295,28 +295,30 @@ void Chainstate::MaybeUpdateMempoolForReorg(
|
|||
AssertLockHeld(cs_main);
|
||||
AssertLockHeld(m_mempool->cs);
|
||||
std::vector<uint256> vHashUpdate;
|
||||
// disconnectpool's insertion_order index sorts the entries from
|
||||
// oldest to newest, but the oldest entry will be the last tx from the
|
||||
// latest mined block that was disconnected.
|
||||
// Iterate disconnectpool in reverse, so that we add transactions
|
||||
// back to the mempool starting with the earliest transaction that had
|
||||
// been previously seen in a block.
|
||||
auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin();
|
||||
while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
|
||||
// ignore validation errors in resurrected transactions
|
||||
if (!fAddToMempool || (*it)->IsCoinBase() ||
|
||||
AcceptToMemoryPool(*this, *it, GetTime(),
|
||||
/*bypass_limits=*/true, /*test_accept=*/false).m_result_type !=
|
||||
MempoolAcceptResult::ResultType::VALID) {
|
||||
// If the transaction doesn't make it in to the mempool, remove any
|
||||
// transactions that depend on it (which would now be orphans).
|
||||
m_mempool->removeRecursive(**it, MemPoolRemovalReason::REORG);
|
||||
} else if (m_mempool->exists(GenTxid::Txid((*it)->GetHash()))) {
|
||||
vHashUpdate.push_back((*it)->GetHash());
|
||||
{
|
||||
// disconnectpool is ordered so that the front is the most recently-confirmed
|
||||
// transaction (the last tx of the block at the tip) in the disconnected chain.
|
||||
// Iterate disconnectpool in reverse, so that we add transactions
|
||||
// back to the mempool starting with the earliest transaction that had
|
||||
// been previously seen in a block.
|
||||
const auto queuedTx = disconnectpool.take();
|
||||
auto it = queuedTx.rbegin();
|
||||
while (it != queuedTx.rend()) {
|
||||
// ignore validation errors in resurrected transactions
|
||||
if (!fAddToMempool || (*it)->IsCoinBase() ||
|
||||
AcceptToMemoryPool(*this, *it, GetTime(),
|
||||
/*bypass_limits=*/true, /*test_accept=*/false).m_result_type !=
|
||||
MempoolAcceptResult::ResultType::VALID) {
|
||||
// If the transaction doesn't make it in to the mempool, remove any
|
||||
// transactions that depend on it (which would now be orphans).
|
||||
m_mempool->removeRecursive(**it, MemPoolRemovalReason::REORG);
|
||||
} else if (m_mempool->exists(GenTxid::Txid((*it)->GetHash()))) {
|
||||
vHashUpdate.push_back((*it)->GetHash());
|
||||
}
|
||||
++it;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
disconnectpool.queuedTx.clear();
|
||||
|
||||
// AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
|
||||
// no in-mempool children, which is generally not true when adding
|
||||
// previously-confirmed transactions back to the mempool.
|
||||
|
@ -2724,9 +2726,8 @@ bool Chainstate::DisconnectTip(BlockValidationState& state, DisconnectedBlockTra
|
|||
disconnectpool->AddTransactionsFromBlock(block.vtx);
|
||||
while (disconnectpool->DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE * 1000) {
|
||||
// Drop the earliest entry, and remove its children from the mempool.
|
||||
auto it = disconnectpool->queuedTx.get<insertion_order>().begin();
|
||||
m_mempool->removeRecursive(**it, MemPoolRemovalReason::REORG);
|
||||
disconnectpool->removeEntry(it);
|
||||
auto ptx = disconnectpool->take_first();
|
||||
m_mempool->removeRecursive(*ptx, MemPoolRemovalReason::REORG);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ class CBlockTreeDB;
|
|||
class CTxMemPool;
|
||||
class ChainstateManager;
|
||||
struct ChainTxData;
|
||||
struct DisconnectedBlockTransactions;
|
||||
class DisconnectedBlockTransactions;
|
||||
struct PrecomputedTransactionData;
|
||||
struct LockPoints;
|
||||
struct AssumeutxoData;
|
||||
|
|
Loading…
Reference in a new issue