blockstorage: segment normal/assumedvalid blockfiles

When using an assumedvalid (snapshot) chainstate along with a background
chainstate, we are syncing two very different regions of the chain
simultaneously. If we use the same blockfile space for both of these
syncs, wildly different height blocks will be stored alongside one
another, making pruning ineffective.

This change implements a separate blockfile cursor for the assumedvalid
chainstate when one is in use.
This commit is contained in:
James O'Beirne 2023-05-03 14:55:03 -04:00
parent 4c3b8ca35c
commit 7fcd21544a
3 changed files with 179 additions and 47 deletions

View file

@ -10,6 +10,7 @@
#include <dbwrapper.h> #include <dbwrapper.h>
#include <flatfile.h> #include <flatfile.h>
#include <hash.h> #include <hash.h>
#include <kernel/chain.h>
#include <kernel/chainparams.h> #include <kernel/chainparams.h>
#include <kernel/messagestartchars.h> #include <kernel/messagestartchars.h>
#include <logging.h> #include <logging.h>
@ -273,7 +274,7 @@ void BlockManager::FindFilesToPruneManual(
const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight); const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight);
int count = 0; int count = 0;
for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) { for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
const auto& fileinfo = m_blockfile_info[fileNumber]; const auto& fileinfo = m_blockfile_info[fileNumber];
if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) { if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
continue; continue;
@ -325,7 +326,7 @@ void BlockManager::FindFilesToPrune(
nBuffer += target / 10; nBuffer += target / 10;
} }
for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) { for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
const auto& fileinfo = m_blockfile_info[fileNumber]; const auto& fileinfo = m_blockfile_info[fileNumber];
nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize; nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize;
@ -385,19 +386,25 @@ bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockha
return false; return false;
} }
int snapshot_height = -1;
if (snapshot_blockhash) { if (snapshot_blockhash) {
const AssumeutxoData au_data = *Assert(GetParams().AssumeutxoForBlockhash(*snapshot_blockhash)); const AssumeutxoData au_data = *Assert(GetParams().AssumeutxoForBlockhash(*snapshot_blockhash));
snapshot_height = au_data.height; m_snapshot_height = au_data.height;
CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)}; CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)};
// Since nChainTx (responsible for estiamted progress) isn't persisted // Since nChainTx (responsible for estimated progress) isn't persisted
// to disk, we must bootstrap the value for assumedvalid chainstates // to disk, we must bootstrap the value for assumedvalid chainstates
// from the hardcoded assumeutxo chainparams. // from the hardcoded assumeutxo chainparams.
base->nChainTx = au_data.nChainTx; base->nChainTx = au_data.nChainTx;
LogPrintf("[snapshot] set nChainTx=%d for %s\n", au_data.nChainTx, snapshot_blockhash->ToString()); LogPrintf("[snapshot] set nChainTx=%d for %s\n", au_data.nChainTx, snapshot_blockhash->ToString());
} else {
// If this isn't called with a snapshot blockhash, make sure the cached snapshot height
// is null. This is relevant during snapshot completion, when the blockman may be loaded
// with a height that then needs to be cleared after the snapshot is fully validated.
m_snapshot_height.reset();
} }
Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value());
// Calculate nChainWork // Calculate nChainWork
std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()}; std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
std::sort(vSortedByHeight.begin(), vSortedByHeight.end(), std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
@ -414,7 +421,7 @@ bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockha
// Pruned nodes may have deleted the block. // Pruned nodes may have deleted the block.
if (pindex->nTx > 0) { if (pindex->nTx > 0) {
if (pindex->pprev) { if (pindex->pprev) {
if (snapshot_blockhash && pindex->nHeight == snapshot_height && if (m_snapshot_height && pindex->nHeight == *m_snapshot_height &&
pindex->GetBlockHash() == *snapshot_blockhash) { pindex->GetBlockHash() == *snapshot_blockhash) {
// Should have been set above; don't disturb it with code below. // Should have been set above; don't disturb it with code below.
Assert(pindex->nChainTx > 0); Assert(pindex->nChainTx > 0);
@ -455,7 +462,8 @@ bool BlockManager::WriteBlockIndexDB()
vBlocks.push_back(*it); vBlocks.push_back(*it);
m_dirty_blockindex.erase(it++); m_dirty_blockindex.erase(it++);
} }
if (!m_block_tree_db->WriteBatchSync(vFiles, m_last_blockfile, vBlocks)) { int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) {
return false; return false;
} }
return true; return true;
@ -466,16 +474,17 @@ bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_block
if (!LoadBlockIndex(snapshot_blockhash)) { if (!LoadBlockIndex(snapshot_blockhash)) {
return false; return false;
} }
int max_blockfile_num{0};
// Load block file info // Load block file info
m_block_tree_db->ReadLastBlockFile(m_last_blockfile); m_block_tree_db->ReadLastBlockFile(max_blockfile_num);
m_blockfile_info.resize(m_last_blockfile + 1); m_blockfile_info.resize(max_blockfile_num + 1);
LogPrintf("%s: last block file = %i\n", __func__, m_last_blockfile); LogPrintf("%s: last block file = %i\n", __func__, max_blockfile_num);
for (int nFile = 0; nFile <= m_last_blockfile; nFile++) { for (int nFile = 0; nFile <= max_blockfile_num; nFile++) {
m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]); m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
} }
LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[m_last_blockfile].ToString()); LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[max_blockfile_num].ToString());
for (int nFile = m_last_blockfile + 1; true; nFile++) { for (int nFile = max_blockfile_num + 1; true; nFile++) {
CBlockFileInfo info; CBlockFileInfo info;
if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) { if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
m_blockfile_info.push_back(info); m_blockfile_info.push_back(info);
@ -499,6 +508,15 @@ bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_block
} }
} }
{
// Initialize the blockfile cursors.
LOCK(cs_LastBlockFile);
for (size_t i = 0; i < m_blockfile_info.size(); ++i) {
const auto last_height_in_file = m_blockfile_info[i].nHeightLast;
m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0};
}
}
// Check whether we have ever pruned block & undo files // Check whether we have ever pruned block & undo files
m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned); m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
if (m_have_pruned) { if (m_have_pruned) {
@ -516,12 +534,13 @@ bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_block
void BlockManager::ScanAndUnlinkAlreadyPrunedFiles() void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
{ {
AssertLockHeld(::cs_main); AssertLockHeld(::cs_main);
int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
if (!m_have_pruned) { if (!m_have_pruned) {
return; return;
} }
std::set<int> block_files_to_prune; std::set<int> block_files_to_prune;
for (int file_number = 0; file_number < m_last_blockfile; file_number++) { for (int file_number = 0; file_number < max_blockfile; file_number++) {
if (m_blockfile_info[file_number].nSize == 0) { if (m_blockfile_info[file_number].nSize == 0) {
block_files_to_prune.insert(file_number); block_files_to_prune.insert(file_number);
} }
@ -696,7 +715,7 @@ bool BlockManager::FlushUndoFile(int block_file, bool finalize)
return true; return true;
} }
bool BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo) bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
{ {
bool success = true; bool success = true;
LOCK(cs_LastBlockFile); LOCK(cs_LastBlockFile);
@ -708,9 +727,9 @@ bool BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo)
// have populated `m_blockfile_info` via LoadBlockIndexDB(). // have populated `m_blockfile_info` via LoadBlockIndexDB().
return true; return true;
} }
assert(static_cast<int>(m_blockfile_info.size()) > m_last_blockfile); assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num);
FlatFilePos block_pos_old(m_last_blockfile, m_blockfile_info[m_last_blockfile].nSize); FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) { if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
m_opts.notifications.flushError("Flushing block file to disk failed. This is likely the result of an I/O error."); m_opts.notifications.flushError("Flushing block file to disk failed. This is likely the result of an I/O error.");
success = false; success = false;
@ -718,13 +737,33 @@ bool BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo)
// we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks, // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
// e.g. during IBD or a sync after a node going offline // e.g. during IBD or a sync after a node going offline
if (!fFinalize || finalize_undo) { if (!fFinalize || finalize_undo) {
if (!FlushUndoFile(m_last_blockfile, finalize_undo)) { if (!FlushUndoFile(blockfile_num, finalize_undo)) {
success = false; success = false;
} }
} }
return success; return success;
} }
BlockfileType BlockManager::BlockfileTypeForHeight(int height)
{
if (!m_snapshot_height) {
return BlockfileType::NORMAL;
}
return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL;
}
bool BlockManager::FlushChainstateBlockFile(int tip_height)
{
LOCK(cs_LastBlockFile);
auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)];
if (cursor) {
// The cursor may not exist after a snapshot has been loaded but before any
// blocks have been downloaded.
return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false);
}
return false;
}
uint64_t BlockManager::CalculateCurrentUsage() uint64_t BlockManager::CalculateCurrentUsage()
{ {
LOCK(cs_LastBlockFile); LOCK(cs_LastBlockFile);
@ -779,8 +818,19 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne
{ {
LOCK(cs_LastBlockFile); LOCK(cs_LastBlockFile);
unsigned int nFile = fKnown ? pos.nFile : m_last_blockfile; const BlockfileType chain_type = BlockfileTypeForHeight(nHeight);
if (m_blockfile_info.size() <= nFile) {
if (!m_blockfile_cursors[chain_type]) {
// If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
assert(chain_type == BlockfileType::ASSUMED);
const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1};
m_blockfile_cursors[chain_type] = new_cursor;
LogPrint(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor);
}
const int last_blockfile = m_blockfile_cursors[chain_type]->file_num;
int nFile = fKnown ? pos.nFile : last_blockfile;
if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
m_blockfile_info.resize(nFile + 1); m_blockfile_info.resize(nFile + 1);
} }
@ -797,13 +847,20 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne
} }
} }
assert(nAddSize < max_blockfile_size); assert(nAddSize < max_blockfile_size);
while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) { while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
// when the undo file is keeping up with the block file, we want to flush it explicitly // when the undo file is keeping up with the block file, we want to flush it explicitly
// when it is lagging behind (more blocks arrive than are being connected), we let the // when it is lagging behind (more blocks arrive than are being connected), we let the
// undo block write case handle it // undo block write case handle it
finalize_undo = (m_blockfile_info[nFile].nHeightLast == m_undo_height_in_last_blockfile); finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) ==
nFile++; Assert(m_blockfile_cursors[chain_type])->undo_height);
if (m_blockfile_info.size() <= nFile) {
// Try the next unclaimed blockfile number
nFile = this->MaxBlockfileNum() + 1;
// Set to increment MaxBlockfileNum() for next iteration
m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
m_blockfile_info.resize(nFile + 1); m_blockfile_info.resize(nFile + 1);
} }
} }
@ -811,9 +868,10 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne
pos.nPos = m_blockfile_info[nFile].nSize; pos.nPos = m_blockfile_info[nFile].nSize;
} }
if ((int)nFile != m_last_blockfile) { if (nFile != last_blockfile) {
if (!fKnown) { if (!fKnown) {
LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s\n", m_last_blockfile, m_blockfile_info[m_last_blockfile].ToString()); LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
} }
// Do not propagate the return code. The flush concerns a previous block // Do not propagate the return code. The flush concerns a previous block
@ -823,13 +881,13 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne
// data may be inconsistent after a crash if the flush is called during // data may be inconsistent after a crash if the flush is called during
// a reindex. A flush error might also leave some of the data files // a reindex. A flush error might also leave some of the data files
// untrimmed. // untrimmed.
if (!FlushBlockFile(!fKnown, finalize_undo)) { if (!FlushBlockFile(last_blockfile, !fKnown, finalize_undo)) {
LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning,
"Failed to flush previous block file %05i (finalize=%i, finalize_undo=%i) before opening new block file %05i\n", "Failed to flush previous block file %05i (finalize=%i, finalize_undo=%i) before opening new block file %05i\n",
m_last_blockfile, !fKnown, finalize_undo, nFile); last_blockfile, !fKnown, finalize_undo, nFile);
} }
m_last_blockfile = nFile; // No undo data yet in the new file, so reset our undo-height tracking.
m_undo_height_in_last_blockfile = 0; // No undo data yet in the new file, so reset our undo-height tracking. m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
} }
m_blockfile_info[nFile].AddBlock(nHeight, nTime); m_blockfile_info[nFile].AddBlock(nHeight, nTime);
@ -903,6 +961,9 @@ bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const
bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
{ {
AssertLockHeld(::cs_main); AssertLockHeld(::cs_main);
const BlockfileType type = BlockfileTypeForHeight(block.nHeight);
auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type]));
// Write undo information to disk // Write undo information to disk
if (block.GetUndoPos().IsNull()) { if (block.GetUndoPos().IsNull()) {
FlatFilePos _pos; FlatFilePos _pos;
@ -917,7 +978,7 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid
// in the block file info as below; note that this does not catch the case where the undo writes are keeping up // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
// with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
// the FindBlockPos function // the FindBlockPos function
if (_pos.nFile < m_last_blockfile && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) { if (_pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) {
// Do not propagate the return code, a failed flush here should not // Do not propagate the return code, a failed flush here should not
// be an indication for a failed write. If it were propagated here, // be an indication for a failed write. If it were propagated here,
// the caller would assume the undo data not to be written, when in // the caller would assume the undo data not to be written, when in
@ -926,8 +987,8 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid
if (!FlushUndoFile(_pos.nFile, true)) { if (!FlushUndoFile(_pos.nFile, true)) {
LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", _pos.nFile); LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", _pos.nFile);
} }
} else if (_pos.nFile == m_last_blockfile && static_cast<uint32_t>(block.nHeight) > m_undo_height_in_last_blockfile) { } else if (_pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) {
m_undo_height_in_last_blockfile = block.nHeight; cursor.undo_height = block.nHeight;
} }
// update nUndoPos in block index // update nUndoPos in block index
block.nUndoPos = _pos.nPos; block.nUndoPos = _pos.nPos;
@ -1126,4 +1187,18 @@ void ImportBlocks(ChainstateManager& chainman, std::vector<fs::path> vImportFile
} }
} // End scope of ImportingNow } // End scope of ImportingNow
} }
std::ostream& operator<<(std::ostream& os, const BlockfileType& type) {
switch(type) {
case BlockfileType::NORMAL: os << "normal"; break;
case BlockfileType::ASSUMED: os << "assumed"; break;
default: os.setstate(std::ios_base::failbit);
}
return os;
}
std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) {
os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);
return os;
}
} // namespace node } // namespace node

View file

@ -9,6 +9,7 @@
#include <chain.h> #include <chain.h>
#include <dbwrapper.h> #include <dbwrapper.h>
#include <kernel/blockmanager_opts.h> #include <kernel/blockmanager_opts.h>
#include <kernel/chain.h>
#include <kernel/chainparams.h> #include <kernel/chainparams.h>
#include <kernel/cs_main.h> #include <kernel/cs_main.h>
#include <kernel/messagestartchars.h> #include <kernel/messagestartchars.h>
@ -36,7 +37,6 @@ class CBlockUndo;
class CChainParams; class CChainParams;
class Chainstate; class Chainstate;
class ChainstateManager; class ChainstateManager;
enum class ChainstateRole;
struct CCheckpointData; struct CCheckpointData;
struct FlatFilePos; struct FlatFilePos;
namespace Consensus { namespace Consensus {
@ -98,6 +98,35 @@ struct PruneLockInfo {
int height_first{std::numeric_limits<int>::max()}; //! Height of earliest block that should be kept and not pruned int height_first{std::numeric_limits<int>::max()}; //! Height of earliest block that should be kept and not pruned
}; };
enum BlockfileType {
// Values used as array indexes - do not change carelessly.
NORMAL = 0,
ASSUMED = 1,
NUM_TYPES = 2,
};
std::ostream& operator<<(std::ostream& os, const BlockfileType& type);
struct BlockfileCursor {
// The latest blockfile number.
int file_num{0};
// Track the height of the highest block in file_num whose undo
// data has been written. Block data is written to block files in download
// order, but is written to undo files in validation order, which is
// usually in order by height. To avoid wasting disk space, undo files will
// be trimmed whenever the corresponding block file is finalized and
// the height of the highest block written to the block file equals the
// height of the highest block written to the undo file. This is a
// heuristic and can sometimes preemptively trim undo files that will write
// more data later, and sometimes fail to trim undo files that can't have
// more data written later.
int undo_height{0};
};
std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor);
/** /**
* Maintains a tree of blocks (stored in `m_block_index`) which is consulted * Maintains a tree of blocks (stored in `m_block_index`) which is consulted
* to determine where the most-work tip is. * to determine where the most-work tip is.
@ -122,12 +151,13 @@ private:
EXCLUSIVE_LOCKS_REQUIRED(cs_main); EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Return false if block file or undo file flushing fails. */ /** Return false if block file or undo file flushing fails. */
[[nodiscard]] bool FlushBlockFile(bool fFinalize = false, bool finalize_undo = false); [[nodiscard]] bool FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo);
/** Return false if undo file flushing fails. */ /** Return false if undo file flushing fails. */
[[nodiscard]] bool FlushUndoFile(int block_file, bool finalize = false); [[nodiscard]] bool FlushUndoFile(int block_file, bool finalize = false);
[[nodiscard]] bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown); [[nodiscard]] bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown);
[[nodiscard]] bool FlushChainstateBlockFile(int tip_height);
bool FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize); bool FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize);
FlatFileSeq BlockFileSeq() const; FlatFileSeq BlockFileSeq() const;
@ -169,19 +199,29 @@ private:
RecursiveMutex cs_LastBlockFile; RecursiveMutex cs_LastBlockFile;
std::vector<CBlockFileInfo> m_blockfile_info; std::vector<CBlockFileInfo> m_blockfile_info;
int m_last_blockfile = 0;
// Track the height of the highest block in m_last_blockfile whose undo //! Since assumedvalid chainstates may be syncing a range of the chain that is very
// data has been written. Block data is written to block files in download //! far away from the normal/background validation process, we should segment blockfiles
// order, but is written to undo files in validation order, which is //! for assumed chainstates. Otherwise, we might have wildly different height ranges
// usually in order by height. To avoid wasting disk space, undo files will //! mixed into the same block files, which would impair our ability to prune
// be trimmed whenever the corresponding block file is finalized and //! effectively.
// the height of the highest block written to the block file equals the //!
// height of the highest block written to the undo file. This is a //! This data structure maintains separate blockfile number cursors for each
// heuristic and can sometimes preemptively trim undo files that will write //! BlockfileType. The ASSUMED state is initialized, when necessary, in FindBlockPos().
// more data later, and sometimes fail to trim undo files that can't have //!
// more data written later. //! The first element is the NORMAL cursor, second is ASSUMED.
unsigned int m_undo_height_in_last_blockfile = 0; std::array<std::optional<BlockfileCursor>, BlockfileType::NUM_TYPES>
m_blockfile_cursors GUARDED_BY(cs_LastBlockFile) = {
BlockfileCursor{},
std::nullopt,
};
int MaxBlockfileNum() const EXCLUSIVE_LOCKS_REQUIRED(cs_LastBlockFile)
{
static const BlockfileCursor empty_cursor;
const auto& normal = m_blockfile_cursors[BlockfileType::NORMAL].value_or(empty_cursor);
const auto& assumed = m_blockfile_cursors[BlockfileType::ASSUMED].value_or(empty_cursor);
return std::max(normal.file_num, assumed.file_num);
}
/** Global flag to indicate we should check to see if there are /** Global flag to indicate we should check to see if there are
* block/undo files that should be deleted. Set on startup * block/undo files that should be deleted. Set on startup
@ -205,6 +245,8 @@ private:
*/ */
std::unordered_map<std::string, PruneLockInfo> m_prune_locks GUARDED_BY(::cs_main); std::unordered_map<std::string, PruneLockInfo> m_prune_locks GUARDED_BY(::cs_main);
BlockfileType BlockfileTypeForHeight(int height);
const kernel::BlockManagerOpts m_opts; const kernel::BlockManagerOpts m_opts;
public: public:
@ -220,6 +262,20 @@ public:
BlockMap m_block_index GUARDED_BY(cs_main); BlockMap m_block_index GUARDED_BY(cs_main);
/**
* The height of the base block of an assumeutxo snapshot, if one is in use.
*
* This controls how blockfiles are segmented by chainstate type to avoid
* comingling different height regions of the chain when an assumedvalid chainstate
* is in use. If heights are drastically different in the same blockfile, pruning
* suffers.
*
* This is set during ActivateSnapshot() or upon LoadBlockIndex() if a snapshot
* had been previously loaded. After the snapshot is validated, this is unset to
* restore normal LoadBlockIndex behavior.
*/
std::optional<int> m_snapshot_height;
std::vector<CBlockIndex*> GetAllBlockIndices() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); std::vector<CBlockIndex*> GetAllBlockIndices() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
/** /**

View file

@ -2601,7 +2601,7 @@ bool Chainstate::FlushStateToDisk(
// First make sure all block and undo data is flushed to disk. // First make sure all block and undo data is flushed to disk.
// TODO: Handle return error, or add detailed comment why it is // TODO: Handle return error, or add detailed comment why it is
// safe to not return an error upon failure. // safe to not return an error upon failure.
if (!m_blockman.FlushBlockFile()) { if (!m_blockman.FlushChainstateBlockFile(m_chain.Height())) {
LogPrintLevel(BCLog::VALIDATION, BCLog::Level::Warning, "%s: Failed to flush block file.\n", __func__); LogPrintLevel(BCLog::VALIDATION, BCLog::Level::Warning, "%s: Failed to flush block file.\n", __func__);
} }
} }
@ -5269,6 +5269,7 @@ bool ChainstateManager::ActivateSnapshot(
assert(chaintip_loaded); assert(chaintip_loaded);
m_active_chainstate = m_snapshot_chainstate.get(); m_active_chainstate = m_snapshot_chainstate.get();
m_blockman.m_snapshot_height = this->GetSnapshotBaseHeight();
LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString()); LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString());
LogPrintf("[snapshot] (%.2f MB)\n", LogPrintf("[snapshot] (%.2f MB)\n",