2021-04-02 19:17:00 +02:00
|
|
|
// Copyright (c) 2011-2021 The Bitcoin Core developers
|
|
|
|
// Distributed under the MIT software license, see the accompanying
|
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
2021-04-02 20:42:05 +02:00
|
|
|
#include <node/blockstorage.h>
|
2021-04-02 19:17:00 +02:00
|
|
|
|
2021-04-02 20:42:05 +02:00
|
|
|
#include <chain.h>
|
2021-04-02 19:17:00 +02:00
|
|
|
#include <chainparams.h>
|
2021-04-18 17:09:48 +02:00
|
|
|
#include <clientversion.h>
|
|
|
|
#include <consensus/validation.h>
|
2021-04-02 20:42:05 +02:00
|
|
|
#include <flatfile.h>
|
2021-04-02 19:17:00 +02:00
|
|
|
#include <fs.h>
|
2021-04-18 17:09:48 +02:00
|
|
|
#include <hash.h>
|
2021-04-02 20:42:05 +02:00
|
|
|
#include <pow.h>
|
2022-01-02 17:05:43 +01:00
|
|
|
#include <reverse_iterator.h>
|
2021-04-02 19:17:00 +02:00
|
|
|
#include <shutdown.h>
|
2021-04-02 20:42:05 +02:00
|
|
|
#include <signet.h>
|
|
|
|
#include <streams.h>
|
2021-04-18 17:09:48 +02:00
|
|
|
#include <undo.h>
|
2021-10-01 13:53:59 +00:00
|
|
|
#include <util/syscall_sandbox.h>
|
2021-04-02 19:17:00 +02:00
|
|
|
#include <util/system.h>
|
|
|
|
#include <validation.h>
|
|
|
|
|
2021-11-12 10:06:00 -05:00
|
|
|
namespace node {
|
2021-04-18 09:46:01 +02:00
|
|
|
std::atomic_bool fImporting(false);
|
|
|
|
std::atomic_bool fReindex(false);
|
|
|
|
bool fHavePruned = false;
|
|
|
|
bool fPruneMode = false;
|
|
|
|
uint64_t nPruneTarget = 0;
|
|
|
|
|
2022-03-15 19:17:36 -04:00
|
|
|
bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
|
|
|
|
{
|
|
|
|
// First sort by most total work, ...
|
|
|
|
if (pa->nChainWork > pb->nChainWork) return false;
|
|
|
|
if (pa->nChainWork < pb->nChainWork) return true;
|
|
|
|
|
|
|
|
// ... then by earliest time received, ...
|
|
|
|
if (pa->nSequenceId < pb->nSequenceId) return false;
|
|
|
|
if (pa->nSequenceId > pb->nSequenceId) return true;
|
|
|
|
|
|
|
|
// Use pointer address as tie breaker (should only happen with blocks
|
|
|
|
// loaded from disk, as those all have id 0).
|
|
|
|
if (pa < pb) return false;
|
|
|
|
if (pa > pb) return true;
|
|
|
|
|
|
|
|
// Identical blocks.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-04-19 08:45:35 +02:00
|
|
|
static FILE* OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false);
|
2021-04-18 17:09:48 +02:00
|
|
|
static FlatFileSeq BlockFileSeq();
|
|
|
|
static FlatFileSeq UndoFileSeq();
|
|
|
|
|
2021-01-08 18:56:48 -05:00
|
|
|
CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash)
|
|
|
|
{
|
|
|
|
AssertLockHeld(cs_main);
|
|
|
|
BlockMap::iterator it = m_block_index.find(hash);
|
|
|
|
return it == m_block_index.end() ? nullptr : &it->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
const CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
|
2022-01-02 17:05:43 +01:00
|
|
|
{
|
|
|
|
AssertLockHeld(cs_main);
|
|
|
|
BlockMap::const_iterator it = m_block_index.find(hash);
|
2021-01-08 18:56:48 -05:00
|
|
|
return it == m_block_index.end() ? nullptr : &it->second;
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block)
|
|
|
|
{
|
|
|
|
AssertLockHeld(cs_main);
|
|
|
|
|
2022-01-19 13:55:40 -05:00
|
|
|
auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
|
|
|
|
if (!inserted) {
|
|
|
|
return &mi->second;
|
2022-01-02 16:59:07 +01:00
|
|
|
}
|
2022-01-19 13:55:40 -05:00
|
|
|
CBlockIndex* pindexNew = &(*mi).second;
|
2022-01-02 17:05:43 +01:00
|
|
|
|
|
|
|
// We assign the sequence id to blocks only when the full data is available,
|
|
|
|
// to avoid miners withholding blocks but broadcasting headers, to get a
|
|
|
|
// competitive advantage.
|
2022-01-19 13:55:40 -05:00
|
|
|
pindexNew->nSequenceId = 0;
|
2021-01-08 18:56:48 -05:00
|
|
|
|
2022-01-02 17:05:43 +01:00
|
|
|
pindexNew->phashBlock = &((*mi).first);
|
|
|
|
BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
|
2022-01-02 16:59:07 +01:00
|
|
|
if (miPrev != m_block_index.end()) {
|
2021-01-08 18:56:48 -05:00
|
|
|
pindexNew->pprev = &(*miPrev).second;
|
2022-01-02 17:05:43 +01:00
|
|
|
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
|
|
|
|
pindexNew->BuildSkip();
|
|
|
|
}
|
|
|
|
pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
|
|
|
|
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
|
|
|
|
pindexNew->RaiseValidity(BLOCK_VALID_TREE);
|
|
|
|
if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
|
|
|
|
pindexBestHeader = pindexNew;
|
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_blockindex.insert(pindexNew);
|
2022-01-02 17:05:43 +01:00
|
|
|
|
|
|
|
return pindexNew;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockManager::PruneOneBlockFile(const int fileNumber)
|
|
|
|
{
|
|
|
|
AssertLockHeld(cs_main);
|
|
|
|
LOCK(cs_LastBlockFile);
|
|
|
|
|
2021-01-08 18:56:48 -05:00
|
|
|
for (auto& entry : m_block_index) {
|
|
|
|
CBlockIndex* pindex = &entry.second;
|
2022-01-02 17:05:43 +01:00
|
|
|
if (pindex->nFile == fileNumber) {
|
|
|
|
pindex->nStatus &= ~BLOCK_HAVE_DATA;
|
|
|
|
pindex->nStatus &= ~BLOCK_HAVE_UNDO;
|
|
|
|
pindex->nFile = 0;
|
|
|
|
pindex->nDataPos = 0;
|
|
|
|
pindex->nUndoPos = 0;
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_blockindex.insert(pindex);
|
2022-01-02 17:05:43 +01:00
|
|
|
|
|
|
|
// Prune from m_blocks_unlinked -- any block we prune would have
|
|
|
|
// to be downloaded again in order to consider its chain, at which
|
|
|
|
// point it would be considered as a candidate for
|
|
|
|
// m_blocks_unlinked or setBlockIndexCandidates.
|
|
|
|
auto range = m_blocks_unlinked.equal_range(pindex->pprev);
|
|
|
|
while (range.first != range.second) {
|
2022-01-02 16:59:07 +01:00
|
|
|
std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
|
2022-01-02 17:05:43 +01:00
|
|
|
range.first++;
|
|
|
|
if (_it->second == pindex) {
|
|
|
|
m_blocks_unlinked.erase(_it);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
m_blockfile_info[fileNumber].SetNull();
|
|
|
|
m_dirty_fileinfo.insert(fileNumber);
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
|
|
|
|
{
|
|
|
|
assert(fPruneMode && nManualPruneHeight > 0);
|
|
|
|
|
|
|
|
LOCK2(cs_main, cs_LastBlockFile);
|
|
|
|
if (chain_tip_height < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
|
|
|
|
unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
|
|
|
|
int count = 0;
|
2022-01-05 15:44:16 +01:00
|
|
|
for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) {
|
|
|
|
if (m_blockfile_info[fileNumber].nSize == 0 || m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
|
2022-01-02 17:05:43 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
PruneOneBlockFile(fileNumber);
|
|
|
|
setFilesToPrune.insert(fileNumber);
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd)
|
|
|
|
{
|
|
|
|
LOCK2(cs_main, cs_LastBlockFile);
|
|
|
|
if (chain_tip_height < 0 || nPruneTarget == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if ((uint64_t)chain_tip_height <= nPruneAfterHeight) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int nLastBlockWeCanPrune{(unsigned)std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP))};
|
|
|
|
uint64_t nCurrentUsage = CalculateCurrentUsage();
|
|
|
|
// We don't check to prune until after we've allocated new space for files
|
|
|
|
// So we should leave a buffer under our target to account for another allocation
|
|
|
|
// before the next pruning.
|
|
|
|
uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
|
|
|
|
uint64_t nBytesToPrune;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
if (nCurrentUsage + nBuffer >= nPruneTarget) {
|
|
|
|
// On a prune event, the chainstate DB is flushed.
|
|
|
|
// To avoid excessive prune events negating the benefit of high dbcache
|
|
|
|
// values, we should not prune too rapidly.
|
|
|
|
// So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
|
|
|
|
if (is_ibd) {
|
|
|
|
// Since this is only relevant during IBD, we use a fixed 10%
|
|
|
|
nBuffer += nPruneTarget / 10;
|
|
|
|
}
|
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) {
|
|
|
|
nBytesToPrune = m_blockfile_info[fileNumber].nSize + m_blockfile_info[fileNumber].nUndoSize;
|
2022-01-02 17:05:43 +01:00
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
if (m_blockfile_info[fileNumber].nSize == 0) {
|
2022-01-02 17:05:43 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nCurrentUsage + nBuffer < nPruneTarget) { // are we below our target?
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
|
2022-01-05 15:44:16 +01:00
|
|
|
if (m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
|
2022-01-02 17:05:43 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
PruneOneBlockFile(fileNumber);
|
|
|
|
// Queue up the files for removal
|
|
|
|
setFilesToPrune.insert(fileNumber);
|
|
|
|
nCurrentUsage -= nBytesToPrune;
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
|
|
|
|
nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
|
|
|
|
((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
|
|
|
|
nLastBlockWeCanPrune, count);
|
|
|
|
}
|
|
|
|
|
2022-01-02 16:59:07 +01:00
|
|
|
CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash)
|
2022-01-02 17:05:43 +01:00
|
|
|
{
|
|
|
|
AssertLockHeld(cs_main);
|
|
|
|
|
2022-01-02 16:59:07 +01:00
|
|
|
if (hash.IsNull()) {
|
2022-01-02 17:05:43 +01:00
|
|
|
return nullptr;
|
2022-01-02 16:59:07 +01:00
|
|
|
}
|
2022-01-02 17:05:43 +01:00
|
|
|
|
2022-03-03 15:05:15 -05:00
|
|
|
const auto [mi, inserted]{m_block_index.try_emplace(hash)};
|
2022-01-13 12:37:06 -05:00
|
|
|
CBlockIndex* pindex = &(*mi).second;
|
|
|
|
if (inserted) {
|
|
|
|
pindex->phashBlock = &((*mi).first);
|
2022-01-02 16:59:07 +01:00
|
|
|
}
|
2022-01-13 12:37:06 -05:00
|
|
|
return pindex;
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
|
2022-03-07 21:42:27 -05:00
|
|
|
bool BlockManager::LoadBlockIndex(const Consensus::Params& consensus_params)
|
2022-01-02 17:05:43 +01:00
|
|
|
{
|
|
|
|
if (!m_block_tree_db->LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate nChainWork
|
2022-03-07 21:32:12 -05:00
|
|
|
std::vector<CBlockIndex*> vSortedByHeight;
|
2022-01-02 17:05:43 +01:00
|
|
|
vSortedByHeight.reserve(m_block_index.size());
|
2022-01-13 12:44:19 -05:00
|
|
|
for (auto& [_, block_index] : m_block_index) {
|
2022-03-07 21:32:12 -05:00
|
|
|
vSortedByHeight.push_back(&block_index);
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
2022-03-07 21:32:12 -05:00
|
|
|
sort(vSortedByHeight.begin(), vSortedByHeight.end(),
|
|
|
|
[](const CBlockIndex* pa, const CBlockIndex* pb) {
|
|
|
|
return pa->nHeight < pb->nHeight;
|
|
|
|
});
|
2022-01-02 17:05:43 +01:00
|
|
|
|
2022-03-07 21:32:12 -05:00
|
|
|
for (CBlockIndex* pindex : vSortedByHeight) {
|
2022-01-02 17:05:43 +01:00
|
|
|
if (ShutdownRequested()) return false;
|
|
|
|
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
|
|
|
|
pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
|
|
|
|
|
|
|
|
// We can link the chain of blocks for which we've received transactions at some point, or
|
|
|
|
// blocks that are assumed-valid on the basis of snapshot load (see
|
|
|
|
// PopulateAndValidateSnapshot()).
|
|
|
|
// Pruned nodes may have deleted the block.
|
|
|
|
if (pindex->nTx > 0) {
|
|
|
|
if (pindex->pprev) {
|
|
|
|
if (pindex->pprev->nChainTx > 0) {
|
|
|
|
pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
|
|
|
|
} else {
|
|
|
|
pindex->nChainTx = 0;
|
|
|
|
m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pindex->nChainTx = pindex->nTx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
|
|
|
|
pindex->nStatus |= BLOCK_FAILED_CHILD;
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_blockindex.insert(pindex);
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
2022-01-02 16:59:07 +01:00
|
|
|
if (pindex->pprev) {
|
2022-01-02 17:05:43 +01:00
|
|
|
pindex->BuildSkip();
|
2022-01-02 16:59:07 +01:00
|
|
|
}
|
2022-01-02 17:05:43 +01:00
|
|
|
if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
|
|
|
|
pindexBestHeader = pindex;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-01-02 16:59:07 +01:00
|
|
|
void BlockManager::Unload()
|
|
|
|
{
|
2022-01-02 17:05:43 +01:00
|
|
|
m_blocks_unlinked.clear();
|
|
|
|
|
|
|
|
m_block_index.clear();
|
2022-01-05 16:23:55 +01:00
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
m_blockfile_info.clear();
|
|
|
|
m_last_blockfile = 0;
|
|
|
|
m_dirty_blockindex.clear();
|
|
|
|
m_dirty_fileinfo.clear();
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
|
2022-01-05 15:06:56 +01:00
|
|
|
bool BlockManager::WriteBlockIndexDB()
|
|
|
|
{
|
2022-01-07 13:10:18 +01:00
|
|
|
AssertLockHeld(::cs_main);
|
2022-01-05 15:06:56 +01:00
|
|
|
std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
|
2022-01-05 15:44:16 +01:00
|
|
|
vFiles.reserve(m_dirty_fileinfo.size());
|
|
|
|
for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
|
|
|
|
vFiles.push_back(std::make_pair(*it, &m_blockfile_info[*it]));
|
|
|
|
m_dirty_fileinfo.erase(it++);
|
2022-01-05 15:06:56 +01:00
|
|
|
}
|
|
|
|
std::vector<const CBlockIndex*> vBlocks;
|
2022-01-05 15:44:16 +01:00
|
|
|
vBlocks.reserve(m_dirty_blockindex.size());
|
|
|
|
for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
|
2022-01-05 15:06:56 +01:00
|
|
|
vBlocks.push_back(*it);
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_blockindex.erase(it++);
|
2022-01-05 15:06:56 +01:00
|
|
|
}
|
2022-01-05 15:44:16 +01:00
|
|
|
if (!m_block_tree_db->WriteBatchSync(vFiles, m_last_blockfile, vBlocks)) {
|
2022-01-05 15:06:56 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
|
2022-03-07 21:42:27 -05:00
|
|
|
bool BlockManager::LoadBlockIndexDB()
|
2022-01-02 17:05:43 +01:00
|
|
|
{
|
2022-03-07 21:42:27 -05:00
|
|
|
if (!LoadBlockIndex(::Params().GetConsensus())) {
|
2022-01-02 17:05:43 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load block file info
|
2022-01-05 15:44:16 +01:00
|
|
|
m_block_tree_db->ReadLastBlockFile(m_last_blockfile);
|
|
|
|
m_blockfile_info.resize(m_last_blockfile + 1);
|
|
|
|
LogPrintf("%s: last block file = %i\n", __func__, m_last_blockfile);
|
|
|
|
for (int nFile = 0; nFile <= m_last_blockfile; nFile++) {
|
|
|
|
m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
|
|
|
|
}
|
|
|
|
LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[m_last_blockfile].ToString());
|
|
|
|
for (int nFile = m_last_blockfile + 1; true; nFile++) {
|
2022-01-02 17:05:43 +01:00
|
|
|
CBlockFileInfo info;
|
|
|
|
if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
|
2022-01-05 15:44:16 +01:00
|
|
|
m_blockfile_info.push_back(info);
|
2022-01-02 17:05:43 +01:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check presence of blk files
|
|
|
|
LogPrintf("Checking all blk files are present...\n");
|
|
|
|
std::set<int> setBlkDataFiles;
|
2022-01-13 12:44:19 -05:00
|
|
|
for (const auto& [_, block_index] : m_block_index) {
|
2022-03-03 15:05:15 -05:00
|
|
|
if (block_index.nStatus & BLOCK_HAVE_DATA) {
|
|
|
|
setBlkDataFiles.insert(block_index.nFile);
|
2022-01-02 17:05:43 +01:00
|
|
|
}
|
|
|
|
}
|
2022-01-02 16:59:07 +01:00
|
|
|
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
|
2022-01-02 17:05:43 +01:00
|
|
|
FlatFilePos pos(*it, 0);
|
|
|
|
if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check whether we have ever pruned block & undo files
|
|
|
|
m_block_tree_db->ReadFlag("prunedblockfiles", fHavePruned);
|
2022-01-02 16:59:07 +01:00
|
|
|
if (fHavePruned) {
|
2022-01-02 17:05:43 +01:00
|
|
|
LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
|
2022-01-02 16:59:07 +01:00
|
|
|
}
|
2022-01-02 17:05:43 +01:00
|
|
|
|
|
|
|
// Check whether we need to continue reindexing
|
|
|
|
bool fReindexing = false;
|
|
|
|
m_block_tree_db->ReadReindexing(fReindexing);
|
2022-01-02 16:59:07 +01:00
|
|
|
if (fReindexing) fReindex = true;
|
2022-01-02 17:05:43 +01:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-03-02 15:42:57 +10:00
|
|
|
const CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
|
2022-01-02 17:05:43 +01:00
|
|
|
{
|
|
|
|
const MapCheckpoints& checkpoints = data.mapCheckpoints;
|
|
|
|
|
2022-01-02 16:59:07 +01:00
|
|
|
for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints)) {
|
2022-01-02 17:05:43 +01:00
|
|
|
const uint256& hash = i.second;
|
2022-03-02 15:42:57 +10:00
|
|
|
const CBlockIndex* pindex = LookupBlockIndex(hash);
|
2022-01-02 17:05:43 +01:00
|
|
|
if (pindex) {
|
|
|
|
return pindex;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2021-04-18 09:46:01 +02:00
|
|
|
bool IsBlockPruned(const CBlockIndex* pblockindex)
|
|
|
|
{
|
2022-01-18 12:51:03 +01:00
|
|
|
AssertLockHeld(::cs_main);
|
2021-04-18 09:46:01 +02:00
|
|
|
return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're using -prune with -reindex, then delete block files that will be ignored by the
|
|
|
|
// reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
|
|
|
|
// is missing, do the same here to delete any later block files after a gap. Also delete all
|
2022-01-05 15:44:16 +01:00
|
|
|
// rev files since they'll be rewritten by the reindex anyway. This ensures that m_blockfile_info
|
2021-04-18 09:46:01 +02:00
|
|
|
// is in sync with what's actually on disk by the time we start downloading, so that pruning
|
|
|
|
// works correctly.
|
|
|
|
void CleanupBlockRevFiles()
|
|
|
|
{
|
|
|
|
std::map<std::string, fs::path> mapBlockFiles;
|
|
|
|
|
|
|
|
// Glob all blk?????.dat and rev?????.dat files from the blocks directory.
|
|
|
|
// Remove the rev files immediately and insert the blk file paths into an
|
|
|
|
// ordered map keyed by block file index.
|
|
|
|
LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
|
|
|
|
fs::path blocksdir = gArgs.GetBlocksDirPath();
|
|
|
|
for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) {
|
2021-09-10 00:17:20 -04:00
|
|
|
const std::string path = fs::PathToString(it->path().filename());
|
2021-04-18 09:46:01 +02:00
|
|
|
if (fs::is_regular_file(*it) &&
|
2021-09-10 00:17:20 -04:00
|
|
|
path.length() == 12 &&
|
|
|
|
path.substr(8,4) == ".dat")
|
2021-04-18 09:46:01 +02:00
|
|
|
{
|
2021-09-10 00:17:20 -04:00
|
|
|
if (path.substr(0, 3) == "blk") {
|
|
|
|
mapBlockFiles[path.substr(3, 5)] = it->path();
|
|
|
|
} else if (path.substr(0, 3) == "rev") {
|
2021-04-18 09:46:01 +02:00
|
|
|
remove(it->path());
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 09:46:01 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove all block files that aren't part of a contiguous set starting at
|
|
|
|
// zero by walking the ordered map (keys are block file indices) by
|
|
|
|
// keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
|
|
|
|
// start removing block files.
|
|
|
|
int nContigCounter = 0;
|
|
|
|
for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
|
2021-09-30 14:18:50 +00:00
|
|
|
if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) {
|
2021-04-18 09:46:01 +02:00
|
|
|
nContigCounter++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
remove(item.second);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-04 14:04:30 +01:00
|
|
|
CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n)
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
|
|
|
LOCK(cs_LastBlockFile);
|
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
return &m_blockfile_info.at(n);
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
|
|
|
|
{
|
|
|
|
// Open history file to append
|
|
|
|
CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
|
2021-04-19 08:45:35 +02:00
|
|
|
if (fileout.IsNull()) {
|
2021-04-18 17:09:48 +02:00
|
|
|
return error("%s: OpenUndoFile failed", __func__);
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 17:09:48 +02:00
|
|
|
|
|
|
|
// Write index header
|
|
|
|
unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
|
|
|
|
fileout << messageStart << nSize;
|
|
|
|
|
|
|
|
// Write undo data
|
|
|
|
long fileOutPos = ftell(fileout.Get());
|
2021-04-19 08:45:35 +02:00
|
|
|
if (fileOutPos < 0) {
|
2021-04-18 17:09:48 +02:00
|
|
|
return error("%s: ftell failed", __func__);
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 17:09:48 +02:00
|
|
|
pos.nPos = (unsigned int)fileOutPos;
|
|
|
|
fileout << blockundo;
|
|
|
|
|
|
|
|
// calculate & write checksum
|
|
|
|
CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
|
|
|
|
hasher << hashBlock;
|
|
|
|
hasher << blockundo;
|
|
|
|
fileout << hasher.GetHash();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
|
|
|
|
{
|
2021-09-09 18:48:22 +02:00
|
|
|
const FlatFilePos pos{WITH_LOCK(::cs_main, return pindex->GetUndoPos())};
|
|
|
|
|
2021-04-18 17:09:48 +02:00
|
|
|
if (pos.IsNull()) {
|
|
|
|
return error("%s: no undo data available", __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open history file to read
|
|
|
|
CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
|
2021-04-19 08:45:35 +02:00
|
|
|
if (filein.IsNull()) {
|
2021-04-18 17:09:48 +02:00
|
|
|
return error("%s: OpenUndoFile failed", __func__);
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 17:09:48 +02:00
|
|
|
|
|
|
|
// Read block
|
|
|
|
uint256 hashChecksum;
|
|
|
|
CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data
|
|
|
|
try {
|
|
|
|
verifier << pindex->pprev->GetBlockHash();
|
|
|
|
verifier >> blockundo;
|
|
|
|
filein >> hashChecksum;
|
2021-04-19 08:45:35 +02:00
|
|
|
} catch (const std::exception& e) {
|
2021-04-18 17:09:48 +02:00
|
|
|
return error("%s: Deserialize or I/O error - %s", __func__, e.what());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify checksum
|
2021-04-19 08:45:35 +02:00
|
|
|
if (hashChecksum != verifier.GetHash()) {
|
2021-04-18 17:09:48 +02:00
|
|
|
return error("%s: Checksum mismatch", __func__);
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 17:09:48 +02:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-01-04 14:04:30 +01:00
|
|
|
void BlockManager::FlushUndoFile(int block_file, bool finalize)
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
2022-01-05 15:44:16 +01:00
|
|
|
FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
|
2021-04-18 17:09:48 +02:00
|
|
|
if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
|
|
|
|
AbortNode("Flushing undo file to disk failed. This is likely the result of an I/O error.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-04 14:04:30 +01:00
|
|
|
void BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo)
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
|
|
|
LOCK(cs_LastBlockFile);
|
2022-01-05 15:44:16 +01:00
|
|
|
FlatFilePos block_pos_old(m_last_blockfile, m_blockfile_info[m_last_blockfile].nSize);
|
2021-04-18 17:09:48 +02:00
|
|
|
if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
|
|
|
|
AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error.");
|
|
|
|
}
|
|
|
|
// we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
|
|
|
|
// e.g. during IBD or a sync after a node going offline
|
2022-01-05 15:44:16 +01:00
|
|
|
if (!fFinalize || finalize_undo) FlushUndoFile(m_last_blockfile, finalize_undo);
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
2022-01-04 14:04:30 +01:00
|
|
|
uint64_t BlockManager::CalculateCurrentUsage()
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
|
|
|
LOCK(cs_LastBlockFile);
|
|
|
|
|
|
|
|
uint64_t retval = 0;
|
2022-01-05 15:44:16 +01:00
|
|
|
for (const CBlockFileInfo& file : m_blockfile_info) {
|
2021-04-18 17:09:48 +02:00
|
|
|
retval += file.nSize + file.nUndoSize;
|
|
|
|
}
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
|
|
|
|
{
|
|
|
|
for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
|
|
|
|
FlatFilePos pos(*it, 0);
|
|
|
|
fs::remove(BlockFileSeq().FileName(pos));
|
|
|
|
fs::remove(UndoFileSeq().FileName(pos));
|
2021-10-04 14:44:41 +10:00
|
|
|
LogPrint(BCLog::BLOCKSTORE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static FlatFileSeq BlockFileSeq()
|
|
|
|
{
|
|
|
|
return FlatFileSeq(gArgs.GetBlocksDirPath(), "blk", gArgs.GetBoolArg("-fastprune", false) ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static FlatFileSeq UndoFileSeq()
|
|
|
|
{
|
|
|
|
return FlatFileSeq(gArgs.GetBlocksDirPath(), "rev", UNDOFILE_CHUNK_SIZE);
|
|
|
|
}
|
|
|
|
|
2021-04-19 08:45:35 +02:00
|
|
|
FILE* OpenBlockFile(const FlatFilePos& pos, bool fReadOnly)
|
|
|
|
{
|
2021-04-18 17:09:48 +02:00
|
|
|
return BlockFileSeq().Open(pos, fReadOnly);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Open an undo file (rev?????.dat) */
|
2021-04-19 08:45:35 +02:00
|
|
|
static FILE* OpenUndoFile(const FlatFilePos& pos, bool fReadOnly)
|
|
|
|
{
|
2021-04-18 17:09:48 +02:00
|
|
|
return UndoFileSeq().Open(pos, fReadOnly);
|
|
|
|
}
|
|
|
|
|
2021-04-19 08:45:35 +02:00
|
|
|
fs::path GetBlockPosFilename(const FlatFilePos& pos)
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
|
|
|
return BlockFileSeq().FileName(pos);
|
|
|
|
}
|
|
|
|
|
2022-01-04 14:04:30 +01:00
|
|
|
bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, CChain& active_chain, uint64_t nTime, bool fKnown)
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
|
|
|
LOCK(cs_LastBlockFile);
|
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
unsigned int nFile = fKnown ? pos.nFile : m_last_blockfile;
|
|
|
|
if (m_blockfile_info.size() <= nFile) {
|
|
|
|
m_blockfile_info.resize(nFile + 1);
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool finalize_undo = false;
|
|
|
|
if (!fKnown) {
|
2022-01-05 15:44:16 +01:00
|
|
|
while (m_blockfile_info[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) {
|
2021-04-18 17:09:48 +02:00
|
|
|
// when the undo file is keeping up with the block file, we want to flush it explicitly
|
|
|
|
// when it is lagging behind (more blocks arrive than are being connected), we let the
|
|
|
|
// undo block write case handle it
|
2022-01-05 15:44:16 +01:00
|
|
|
finalize_undo = (m_blockfile_info[nFile].nHeightLast == (unsigned int)active_chain.Tip()->nHeight);
|
2021-04-18 17:09:48 +02:00
|
|
|
nFile++;
|
2022-01-05 15:44:16 +01:00
|
|
|
if (m_blockfile_info.size() <= nFile) {
|
|
|
|
m_blockfile_info.resize(nFile + 1);
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
pos.nFile = nFile;
|
2022-01-05 15:44:16 +01:00
|
|
|
pos.nPos = m_blockfile_info[nFile].nSize;
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
if ((int)nFile != m_last_blockfile) {
|
2021-04-18 17:09:48 +02:00
|
|
|
if (!fKnown) {
|
2022-01-05 15:44:16 +01:00
|
|
|
LogPrint(BCLog::BLOCKSTORE, "Leaving block file %i: %s\n", m_last_blockfile, m_blockfile_info[m_last_blockfile].ToString());
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
FlushBlockFile(!fKnown, finalize_undo);
|
2022-01-05 15:44:16 +01:00
|
|
|
m_last_blockfile = nFile;
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
m_blockfile_info[nFile].AddBlock(nHeight, nTime);
|
2021-04-19 08:45:35 +02:00
|
|
|
if (fKnown) {
|
2022-01-05 15:44:16 +01:00
|
|
|
m_blockfile_info[nFile].nSize = std::max(pos.nPos + nAddSize, m_blockfile_info[nFile].nSize);
|
2021-04-19 08:45:35 +02:00
|
|
|
} else {
|
2022-01-05 15:44:16 +01:00
|
|
|
m_blockfile_info[nFile].nSize += nAddSize;
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 17:09:48 +02:00
|
|
|
|
|
|
|
if (!fKnown) {
|
|
|
|
bool out_of_space;
|
|
|
|
size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
|
|
|
|
if (out_of_space) {
|
|
|
|
return AbortNode("Disk space is too low!", _("Disk space is too low!"));
|
|
|
|
}
|
|
|
|
if (bytes_allocated != 0 && fPruneMode) {
|
2022-01-05 15:44:16 +01:00
|
|
|
m_check_for_pruning = true;
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_fileinfo.insert(nFile);
|
2021-04-18 17:09:48 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-01-04 14:04:30 +01:00
|
|
|
bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
|
|
|
pos.nFile = nFile;
|
|
|
|
|
|
|
|
LOCK(cs_LastBlockFile);
|
|
|
|
|
2022-01-05 15:44:16 +01:00
|
|
|
pos.nPos = m_blockfile_info[nFile].nUndoSize;
|
|
|
|
m_blockfile_info[nFile].nUndoSize += nAddSize;
|
|
|
|
m_dirty_fileinfo.insert(nFile);
|
2021-04-18 17:09:48 +02:00
|
|
|
|
|
|
|
bool out_of_space;
|
|
|
|
size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
|
|
|
|
if (out_of_space) {
|
|
|
|
return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
|
|
|
|
}
|
|
|
|
if (bytes_allocated != 0 && fPruneMode) {
|
2022-01-05 15:44:16 +01:00
|
|
|
m_check_for_pruning = true;
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
|
|
|
|
static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessageHeader::MessageStartChars& messageStart)
|
|
|
|
{
|
|
|
|
// Open history file to append
|
|
|
|
CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
|
2021-04-02 19:27:59 +02:00
|
|
|
if (fileout.IsNull()) {
|
2021-04-02 20:42:05 +02:00
|
|
|
return error("WriteBlockToDisk: OpenBlockFile failed");
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
|
|
|
|
// Write index header
|
|
|
|
unsigned int nSize = GetSerializeSize(block, fileout.GetVersion());
|
|
|
|
fileout << messageStart << nSize;
|
|
|
|
|
|
|
|
// Write block
|
|
|
|
long fileOutPos = ftell(fileout.Get());
|
2021-04-02 19:27:59 +02:00
|
|
|
if (fileOutPos < 0) {
|
2021-04-02 20:42:05 +02:00
|
|
|
return error("WriteBlockToDisk: ftell failed");
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
pos.nPos = (unsigned int)fileOutPos;
|
|
|
|
fileout << block;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-01-04 14:04:30 +01:00
|
|
|
bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
|
2021-04-18 17:09:48 +02:00
|
|
|
{
|
2021-10-21 16:56:34 +02:00
|
|
|
AssertLockHeld(::cs_main);
|
2021-04-18 17:09:48 +02:00
|
|
|
// Write undo information to disk
|
|
|
|
if (pindex->GetUndoPos().IsNull()) {
|
|
|
|
FlatFilePos _pos;
|
2021-04-19 08:45:35 +02:00
|
|
|
if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40)) {
|
2021-04-18 17:09:48 +02:00
|
|
|
return error("ConnectBlock(): FindUndoPos failed");
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
|
|
|
if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart())) {
|
2021-04-18 17:09:48 +02:00
|
|
|
return AbortNode(state, "Failed to write undo data");
|
2021-04-19 08:45:35 +02:00
|
|
|
}
|
2021-04-18 17:09:48 +02:00
|
|
|
// rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
|
|
|
|
// we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
|
|
|
|
// in the block file info as below; note that this does not catch the case where the undo writes are keeping up
|
|
|
|
// with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
|
|
|
|
// the FindBlockPos function
|
2022-01-05 15:44:16 +01:00
|
|
|
if (_pos.nFile < m_last_blockfile && static_cast<uint32_t>(pindex->nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) {
|
2021-04-18 17:09:48 +02:00
|
|
|
FlushUndoFile(_pos.nFile, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// update nUndoPos in block index
|
|
|
|
pindex->nUndoPos = _pos.nPos;
|
|
|
|
pindex->nStatus |= BLOCK_HAVE_UNDO;
|
2022-01-05 15:44:16 +01:00
|
|
|
m_dirty_blockindex.insert(pindex);
|
2021-04-18 17:09:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-04-02 20:42:05 +02:00
|
|
|
bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams)
|
|
|
|
{
|
|
|
|
block.SetNull();
|
|
|
|
|
|
|
|
// Open history file to read
|
|
|
|
CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
|
2021-04-02 19:27:59 +02:00
|
|
|
if (filein.IsNull()) {
|
2021-04-02 20:42:05 +02:00
|
|
|
return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString());
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
|
|
|
|
// Read block
|
|
|
|
try {
|
|
|
|
filein >> block;
|
2021-04-02 19:27:59 +02:00
|
|
|
} catch (const std::exception& e) {
|
2021-04-02 20:42:05 +02:00
|
|
|
return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the header
|
2021-04-02 19:27:59 +02:00
|
|
|
if (!CheckProofOfWork(block.GetHash(), block.nBits, consensusParams)) {
|
2021-04-02 20:42:05 +02:00
|
|
|
return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
|
|
|
|
// Signet only: check block solution
|
|
|
|
if (consensusParams.signet_blocks && !CheckSignetBlockSolution(block, consensusParams)) {
|
|
|
|
return error("ReadBlockFromDisk: Errors in block solution at %s", pos.ToString());
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
|
|
|
|
{
|
2021-09-05 11:32:59 +02:00
|
|
|
const FlatFilePos block_pos{WITH_LOCK(cs_main, return pindex->GetBlockPos())};
|
2021-04-02 20:42:05 +02:00
|
|
|
|
2021-09-05 11:32:59 +02:00
|
|
|
if (!ReadBlockFromDisk(block, block_pos, consensusParams)) {
|
2021-04-02 20:42:05 +02:00
|
|
|
return false;
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
|
|
|
if (block.GetHash() != pindex->GetBlockHash()) {
|
2021-04-02 20:42:05 +02:00
|
|
|
return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
|
2021-09-05 11:32:59 +02:00
|
|
|
pindex->ToString(), block_pos.ToString());
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start)
|
|
|
|
{
|
|
|
|
FlatFilePos hpos = pos;
|
|
|
|
hpos.nPos -= 8; // Seek back 8 bytes for meta header
|
|
|
|
CAutoFile filein(OpenBlockFile(hpos, true), SER_DISK, CLIENT_VERSION);
|
|
|
|
if (filein.IsNull()) {
|
|
|
|
return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString());
|
|
|
|
}
|
|
|
|
|
|
|
|
try {
|
|
|
|
CMessageHeader::MessageStartChars blk_start;
|
|
|
|
unsigned int blk_size;
|
|
|
|
|
|
|
|
filein >> blk_start >> blk_size;
|
|
|
|
|
|
|
|
if (memcmp(blk_start, message_start, CMessageHeader::MESSAGE_START_SIZE)) {
|
|
|
|
return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(),
|
|
|
|
HexStr(blk_start),
|
|
|
|
HexStr(message_start));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (blk_size > MAX_SIZE) {
|
|
|
|
return error("%s: Block data is larger than maximum deserialization size for %s: %s versus %s", __func__, pos.ToString(),
|
|
|
|
blk_size, MAX_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
block.resize(blk_size); // Zeroing of memory is intentional here
|
2022-01-02 11:31:25 +01:00
|
|
|
filein.read(MakeWritableByteSpan(block));
|
2021-04-02 20:42:05 +02:00
|
|
|
} catch (const std::exception& e) {
|
|
|
|
return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString());
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
|
2022-01-04 14:04:30 +01:00
|
|
|
FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp)
|
2021-04-02 20:42:05 +02:00
|
|
|
{
|
|
|
|
unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
|
|
|
|
FlatFilePos blockPos;
|
2021-04-02 19:27:59 +02:00
|
|
|
if (dbp != nullptr) {
|
2021-04-02 20:42:05 +02:00
|
|
|
blockPos = *dbp;
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2021-04-02 20:42:05 +02:00
|
|
|
if (!FindBlockPos(blockPos, nBlockSize + 8, nHeight, active_chain, block.GetBlockTime(), dbp != nullptr)) {
|
|
|
|
error("%s: FindBlockPos failed", __func__);
|
|
|
|
return FlatFilePos();
|
|
|
|
}
|
|
|
|
if (dbp == nullptr) {
|
|
|
|
if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) {
|
|
|
|
AbortNode("Failed to write block");
|
|
|
|
return FlatFilePos();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return blockPos;
|
|
|
|
}
|
|
|
|
|
2021-04-02 19:17:00 +02:00
|
|
|
struct CImportingNow {
|
|
|
|
CImportingNow()
|
|
|
|
{
|
|
|
|
assert(fImporting == false);
|
|
|
|
fImporting = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
~CImportingNow()
|
|
|
|
{
|
|
|
|
assert(fImporting == true);
|
|
|
|
fImporting = false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImportFiles, const ArgsManager& args)
|
|
|
|
{
|
2021-10-01 13:53:59 +00:00
|
|
|
SetSyscallSandboxPolicy(SyscallSandboxPolicy::INITIALIZATION_LOAD_BLOCKS);
|
2021-04-02 19:17:00 +02:00
|
|
|
ScheduleBatchPriority();
|
|
|
|
|
|
|
|
{
|
|
|
|
CImportingNow imp;
|
|
|
|
|
|
|
|
// -reindex
|
|
|
|
if (fReindex) {
|
|
|
|
int nFile = 0;
|
|
|
|
while (true) {
|
|
|
|
FlatFilePos pos(nFile, 0);
|
2021-04-02 19:27:59 +02:00
|
|
|
if (!fs::exists(GetBlockPosFilename(pos))) {
|
2021-04-02 19:17:00 +02:00
|
|
|
break; // No block files left to reindex
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2021-04-02 19:17:00 +02:00
|
|
|
FILE* file = OpenBlockFile(pos, true);
|
2021-04-02 19:27:59 +02:00
|
|
|
if (!file) {
|
2021-04-02 19:17:00 +02:00
|
|
|
break; // This error is logged in OpenBlockFile
|
2021-04-02 19:27:59 +02:00
|
|
|
}
|
2021-04-02 19:17:00 +02:00
|
|
|
LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile);
|
2021-04-27 22:54:53 +02:00
|
|
|
chainman.ActiveChainstate().LoadExternalBlockFile(file, &pos);
|
2021-04-02 19:17:00 +02:00
|
|
|
if (ShutdownRequested()) {
|
|
|
|
LogPrintf("Shutdown requested. Exit %s\n", __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
nFile++;
|
|
|
|
}
|
2021-07-01 10:24:58 +02:00
|
|
|
WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
|
2021-04-02 19:17:00 +02:00
|
|
|
fReindex = false;
|
|
|
|
LogPrintf("Reindexing finished\n");
|
|
|
|
// To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
|
2021-04-27 22:54:53 +02:00
|
|
|
chainman.ActiveChainstate().LoadGenesisBlock();
|
2021-04-02 19:17:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// -loadblock=
|
|
|
|
for (const fs::path& path : vImportFiles) {
|
|
|
|
FILE* file = fsbridge::fopen(path, "rb");
|
|
|
|
if (file) {
|
2021-09-10 00:17:20 -04:00
|
|
|
LogPrintf("Importing blocks file %s...\n", fs::PathToString(path));
|
2021-04-27 22:54:53 +02:00
|
|
|
chainman.ActiveChainstate().LoadExternalBlockFile(file);
|
2021-04-02 19:17:00 +02:00
|
|
|
if (ShutdownRequested()) {
|
|
|
|
LogPrintf("Shutdown requested. Exit %s\n", __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
2021-09-10 00:17:20 -04:00
|
|
|
LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
|
2021-04-02 19:17:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// scan for better chains in the block chain database, that are not yet connected in the active best chain
|
|
|
|
|
|
|
|
// We can't hold cs_main during ActivateBestChain even though we're accessing
|
|
|
|
// the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
|
|
|
|
// the relevant pointers before the ABC call.
|
|
|
|
for (CChainState* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
|
|
|
|
BlockValidationState state;
|
2021-04-27 22:54:53 +02:00
|
|
|
if (!chainstate->ActivateBestChain(state, nullptr)) {
|
2021-04-02 19:17:00 +02:00
|
|
|
LogPrintf("Failed to connect best block (%s)\n", state.ToString());
|
|
|
|
StartShutdown();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (args.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) {
|
|
|
|
LogPrintf("Stopping after block import\n");
|
|
|
|
StartShutdown();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} // End scope of CImportingNow
|
|
|
|
chainman.ActiveChainstate().LoadMempool(args);
|
|
|
|
}
|
2021-11-12 10:06:00 -05:00
|
|
|
} // namespace node
|