mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-01-10 20:03:34 -03:00
Limit the number of orphan blocks
In case the total number of orphan blocks in memory exceeds a limit (currently set to 750), a random orphan block (which is not depended on by another orphan block) is dropped. This means it will need to be downloaded again, but it won't consume memory until then.
This commit is contained in:
parent
15ec451554
commit
bbde1e99c8
2 changed files with 29 additions and 1 deletions
28
src/main.cpp
28
src/main.cpp
|
@ -1054,6 +1054,31 @@ uint256 static GetOrphanRoot(const uint256& hash)
|
||||||
} while(true);
|
} while(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove a random orphan block (which does not have any dependent orphans).
|
||||||
|
void static PruneOrphanBlocks()
|
||||||
|
{
|
||||||
|
if (mapOrphanBlocksByPrev.size() <= MAX_ORPHAN_BLOCKS)
|
||||||
|
return;
|
||||||
|
|
||||||
|
// Pick a random orphan block.
|
||||||
|
int pos = insecure_rand() % mapOrphanBlocksByPrev.size();
|
||||||
|
std::multimap<uint256, COrphanBlock*>::iterator it = mapOrphanBlocksByPrev.begin();
|
||||||
|
while (pos--) it++;
|
||||||
|
|
||||||
|
// As long as this block has other orphans depending on it, move to one of those successors.
|
||||||
|
do {
|
||||||
|
std::multimap<uint256, COrphanBlock*>::iterator it2 = mapOrphanBlocksByPrev.find(it->second->hashBlock);
|
||||||
|
if (it2 == mapOrphanBlocksByPrev.end())
|
||||||
|
break;
|
||||||
|
it = it2;
|
||||||
|
} while(1);
|
||||||
|
|
||||||
|
uint256 hash = it->second->hashBlock;
|
||||||
|
delete it->second;
|
||||||
|
mapOrphanBlocksByPrev.erase(it);
|
||||||
|
mapOrphanBlocks.erase(hash);
|
||||||
|
}
|
||||||
|
|
||||||
int64_t GetBlockValue(int nHeight, int64_t nFees)
|
int64_t GetBlockValue(int nHeight, int64_t nFees)
|
||||||
{
|
{
|
||||||
int64_t nSubsidy = 50 * COIN;
|
int64_t nSubsidy = 50 * COIN;
|
||||||
|
@ -2373,10 +2398,11 @@ bool ProcessBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, CDiskBl
|
||||||
// If we don't already have its previous block, shunt it off to holding area until we get it
|
// If we don't already have its previous block, shunt it off to holding area until we get it
|
||||||
if (pblock->hashPrevBlock != 0 && !mapBlockIndex.count(pblock->hashPrevBlock))
|
if (pblock->hashPrevBlock != 0 && !mapBlockIndex.count(pblock->hashPrevBlock))
|
||||||
{
|
{
|
||||||
LogPrintf("ProcessBlock: ORPHAN BLOCK, prev=%s\n", pblock->hashPrevBlock.ToString());
|
LogPrintf("ProcessBlock: ORPHAN BLOCK %lu, prev=%s\n", (unsigned long)mapOrphanBlocks.size(), pblock->hashPrevBlock.ToString());
|
||||||
|
|
||||||
// Accept orphans as long as there is a node to request its parents from
|
// Accept orphans as long as there is a node to request its parents from
|
||||||
if (pfrom) {
|
if (pfrom) {
|
||||||
|
PruneOrphanBlocks();
|
||||||
COrphanBlock* pblock2 = new COrphanBlock();
|
COrphanBlock* pblock2 = new COrphanBlock();
|
||||||
{
|
{
|
||||||
CDataStream ss(SER_DISK, CLIENT_VERSION);
|
CDataStream ss(SER_DISK, CLIENT_VERSION);
|
||||||
|
|
|
@ -45,6 +45,8 @@ static const unsigned int MAX_STANDARD_TX_SIZE = 100000;
|
||||||
static const unsigned int MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50;
|
static const unsigned int MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50;
|
||||||
/** The maximum number of orphan transactions kept in memory */
|
/** The maximum number of orphan transactions kept in memory */
|
||||||
static const unsigned int MAX_ORPHAN_TRANSACTIONS = MAX_BLOCK_SIZE/100;
|
static const unsigned int MAX_ORPHAN_TRANSACTIONS = MAX_BLOCK_SIZE/100;
|
||||||
|
/** The maximum number of orphan blocks kept in memory */
|
||||||
|
static const unsigned int MAX_ORPHAN_BLOCKS = 750;
|
||||||
/** The maximum size of a blk?????.dat file (since 0.8) */
|
/** The maximum size of a blk?????.dat file (since 0.8) */
|
||||||
static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB
|
static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB
|
||||||
/** The pre-allocation chunk size for blk?????.dat files (since 0.8) */
|
/** The pre-allocation chunk size for blk?????.dat files (since 0.8) */
|
||||||
|
|
Loading…
Reference in a new issue