2020-06-15 14:29:29 -04:00
// Copyright (c) 2009-2010 Satoshi Nakamoto
2021-12-30 19:36:57 +02:00
// Copyright (c) 2009-2021 The Bitcoin Core developers
2020-06-15 14:29:29 -04:00
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
2022-06-28 16:54:38 +01:00
# include <compat/compat.h>
2020-06-11 08:58:46 +02:00
# include <fs.h>
2020-06-15 14:29:29 -04:00
# include <wallet/bdb.h>
# include <wallet/db.h>
# include <util/strencodings.h>
# include <util/translation.h>
# include <stdint.h>
# ifndef WIN32
# include <sys/stat.h>
# endif
2021-11-12 11:13:29 -05:00
namespace wallet {
2020-06-15 14:29:29 -04:00
namespace {
//! Make sure database has a unique fileid within the environment. If it
//! doesn't, throw an error. BDB caches do not work properly when more than one
//! open database has the same fileid (values written to one database may show
//! up in reads to other databases).
//!
//! BerkeleyDB generates unique fileids by default
//! (https://docs.oracle.com/cd/E17275_01/html/programmer_reference/program_copy.html),
//! so bitcoin should never create different databases with the same fileid, but
//! this error can be triggered if users manually copy database files.
void CheckUniqueFileid ( const BerkeleyEnvironment & env , const std : : string & filename , Db & db , WalletDatabaseFileId & fileid )
{
if ( env . IsMock ( ) ) return ;
int ret = db . get_mpf ( ) - > get_fileid ( fileid . value ) ;
if ( ret ! = 0 ) {
2020-07-16 14:24:34 -04:00
throw std : : runtime_error ( strprintf ( " BerkeleyDatabase: Can't open database %s (get_fileid failed with %d) " , filename , ret ) ) ;
2020-06-15 14:29:29 -04:00
}
for ( const auto & item : env . m_fileids ) {
if ( fileid = = item . second & & & fileid ! = & item . second ) {
2020-07-16 14:24:34 -04:00
throw std : : runtime_error ( strprintf ( " BerkeleyDatabase: Can't open database %s (duplicates fileid %s from %s) " , filename ,
2020-06-24 17:26:47 +02:00
HexStr ( item . second . value ) , item . first ) ) ;
2020-06-15 14:29:29 -04:00
}
}
}
RecursiveMutex cs_db ;
std : : map < std : : string , std : : weak_ptr < BerkeleyEnvironment > > g_dbenvs GUARDED_BY ( cs_db ) ; //!< Map from directory name to db environment.
} // namespace
bool WalletDatabaseFileId : : operator = = ( const WalletDatabaseFileId & rhs ) const
{
return memcmp ( value , & rhs . value , sizeof ( value ) ) = = 0 ;
}
/**
2020-10-30 16:41:23 -04:00
* @ param [ in ] env_directory Path to environment directory
2020-06-15 14:29:29 -04:00
* @ return A shared pointer to the BerkeleyEnvironment object for the wallet directory , never empty because ~ BerkeleyEnvironment
* erases the weak pointer from the g_dbenvs map .
* @ post A new BerkeleyEnvironment weak pointer is inserted into g_dbenvs if the directory path key was not already in the map .
*/
2021-12-02 21:21:05 +01:00
std : : shared_ptr < BerkeleyEnvironment > GetBerkeleyEnv ( const fs : : path & env_directory , bool use_shared_memory )
2020-06-15 14:29:29 -04:00
{
LOCK ( cs_db ) ;
2021-09-10 00:17:20 -04:00
auto inserted = g_dbenvs . emplace ( fs : : PathToString ( env_directory ) , std : : weak_ptr < BerkeleyEnvironment > ( ) ) ;
2020-06-15 14:29:29 -04:00
if ( inserted . second ) {
2021-12-02 21:21:05 +01:00
auto env = std : : make_shared < BerkeleyEnvironment > ( env_directory , use_shared_memory ) ;
2020-06-15 14:29:29 -04:00
inserted . first - > second = env ;
return env ;
}
return inserted . first - > second . lock ( ) ;
}
//
// BerkeleyBatch
//
void BerkeleyEnvironment : : Close ( )
{
if ( ! fDbEnvInit )
return ;
fDbEnvInit = false ;
for ( auto & db : m_databases ) {
BerkeleyDatabase & database = db . second . get ( ) ;
2020-05-13 14:09:26 -04:00
assert ( database . m_refcount < = 0 ) ;
2020-06-15 14:29:29 -04:00
if ( database . m_db ) {
database . m_db - > close ( 0 ) ;
database . m_db . reset ( ) ;
}
}
FILE * error_file = nullptr ;
dbenv - > get_errfile ( & error_file ) ;
int ret = dbenv - > close ( 0 ) ;
if ( ret ! = 0 )
LogPrintf ( " BerkeleyEnvironment::Close: Error %d closing database environment: %s \n " , ret , DbEnv : : strerror ( ret ) ) ;
if ( ! fMockDb )
2022-05-12 15:44:24 +02:00
DbEnv ( ( uint32_t ) 0 ) . remove ( strPath . c_str ( ) , 0 ) ;
2020-06-15 14:29:29 -04:00
if ( error_file ) fclose ( error_file ) ;
2021-09-10 00:17:20 -04:00
UnlockDirectory ( fs : : PathFromString ( strPath ) , " .walletlock " ) ;
2020-06-15 14:29:29 -04:00
}
void BerkeleyEnvironment : : Reset ( )
{
dbenv . reset ( new DbEnv ( DB_CXX_NO_EXCEPTIONS ) ) ;
fDbEnvInit = false ;
fMockDb = false ;
}
2021-12-02 21:21:05 +01:00
BerkeleyEnvironment : : BerkeleyEnvironment ( const fs : : path & dir_path , bool use_shared_memory ) : strPath ( fs : : PathToString ( dir_path ) ) , m_use_shared_memory ( use_shared_memory )
2020-06-15 14:29:29 -04:00
{
Reset ( ) ;
}
BerkeleyEnvironment : : ~ BerkeleyEnvironment ( )
{
LOCK ( cs_db ) ;
g_dbenvs . erase ( strPath ) ;
Close ( ) ;
}
2020-05-07 14:30:04 -04:00
bool BerkeleyEnvironment : : Open ( bilingual_str & err )
2020-06-15 14:29:29 -04:00
{
if ( fDbEnvInit ) {
return true ;
}
2021-09-10 00:17:20 -04:00
fs : : path pathIn = fs : : PathFromString ( strPath ) ;
2020-06-15 14:29:29 -04:00
TryCreateDirectories ( pathIn ) ;
if ( ! LockDirectory ( pathIn , " .walletlock " ) ) {
2021-10-19 23:53:23 +03:00
LogPrintf ( " Cannot obtain a lock on wallet directory %s. Another instance may be using it. \n " , strPath ) ;
2021-09-10 00:17:20 -04:00
err = strprintf ( _ ( " Error initializing wallet database environment %s! " ) , fs : : quoted ( fs : : PathToString ( Directory ( ) ) ) ) ;
2020-06-15 14:29:29 -04:00
return false ;
}
fs : : path pathLogDir = pathIn / " database " ;
TryCreateDirectories ( pathLogDir ) ;
fs : : path pathErrorFile = pathIn / " db.log " ;
2021-09-10 00:17:20 -04:00
LogPrintf ( " BerkeleyEnvironment::Open: LogDir=%s ErrorFile=%s \n " , fs : : PathToString ( pathLogDir ) , fs : : PathToString ( pathErrorFile ) ) ;
2020-06-15 14:29:29 -04:00
unsigned int nEnvFlags = 0 ;
2021-12-02 21:21:05 +01:00
if ( ! m_use_shared_memory ) {
2020-06-15 14:29:29 -04:00
nEnvFlags | = DB_PRIVATE ;
2021-12-02 21:21:05 +01:00
}
2020-06-15 14:29:29 -04:00
2021-09-10 00:17:20 -04:00
dbenv - > set_lg_dir ( fs : : PathToString ( pathLogDir ) . c_str ( ) ) ;
2020-06-15 14:29:29 -04:00
dbenv - > set_cachesize ( 0 , 0x100000 , 1 ) ; // 1 MiB should be enough for just the wallet
dbenv - > set_lg_bsize ( 0x10000 ) ;
dbenv - > set_lg_max ( 1048576 ) ;
dbenv - > set_lk_max_locks ( 40000 ) ;
dbenv - > set_lk_max_objects ( 40000 ) ;
dbenv - > set_errfile ( fsbridge : : fopen ( pathErrorFile , " a " ) ) ; /// debug
dbenv - > set_flags ( DB_AUTO_COMMIT , 1 ) ;
dbenv - > set_flags ( DB_TXN_WRITE_NOSYNC , 1 ) ;
dbenv - > log_set_config ( DB_LOG_AUTO_REMOVE , 1 ) ;
int ret = dbenv - > open ( strPath . c_str ( ) ,
DB_CREATE |
DB_INIT_LOCK |
DB_INIT_LOG |
DB_INIT_MPOOL |
DB_INIT_TXN |
DB_THREAD |
DB_RECOVER |
nEnvFlags ,
S_IRUSR | S_IWUSR ) ;
if ( ret ! = 0 ) {
LogPrintf ( " BerkeleyEnvironment::Open: Error %d opening database environment: %s \n " , ret , DbEnv : : strerror ( ret ) ) ;
int ret2 = dbenv - > close ( 0 ) ;
if ( ret2 ! = 0 ) {
LogPrintf ( " BerkeleyEnvironment::Open: Error %d closing failed database environment: %s \n " , ret2 , DbEnv : : strerror ( ret2 ) ) ;
}
Reset ( ) ;
2021-09-10 00:17:20 -04:00
err = strprintf ( _ ( " Error initializing wallet database environment %s! " ) , fs : : quoted ( fs : : PathToString ( Directory ( ) ) ) ) ;
2020-05-07 14:30:04 -04:00
if ( ret = = DB_RUNRECOVERY ) {
err + = Untranslated ( " " ) + _ ( " This error could occur if this wallet was not shutdown cleanly and was last loaded using a build with a newer version of Berkeley DB. If so, please use the software that last loaded this wallet " ) ;
2020-06-15 14:29:29 -04:00
}
2020-05-07 14:30:04 -04:00
return false ;
2020-06-15 14:29:29 -04:00
}
fDbEnvInit = true ;
fMockDb = false ;
return true ;
}
//! Construct an in-memory mock Berkeley environment for testing
2021-12-02 21:21:05 +01:00
BerkeleyEnvironment : : BerkeleyEnvironment ( ) : m_use_shared_memory ( false )
2020-06-15 14:29:29 -04:00
{
Reset ( ) ;
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::MakeMock \n " ) ;
dbenv - > set_cachesize ( 1 , 0 , 1 ) ;
dbenv - > set_lg_bsize ( 10485760 * 4 ) ;
dbenv - > set_lg_max ( 10485760 ) ;
dbenv - > set_lk_max_locks ( 10000 ) ;
dbenv - > set_lk_max_objects ( 10000 ) ;
dbenv - > set_flags ( DB_AUTO_COMMIT , 1 ) ;
dbenv - > log_set_config ( DB_LOG_IN_MEMORY , 1 ) ;
int ret = dbenv - > open ( nullptr ,
DB_CREATE |
DB_INIT_LOCK |
DB_INIT_LOG |
DB_INIT_MPOOL |
DB_INIT_TXN |
DB_THREAD |
DB_PRIVATE ,
S_IRUSR | S_IWUSR ) ;
if ( ret > 0 ) {
throw std : : runtime_error ( strprintf ( " BerkeleyEnvironment::MakeMock: Error %d opening database environment. " , ret ) ) ;
}
fDbEnvInit = true ;
fMockDb = true ;
}
BerkeleyBatch : : SafeDbt : : SafeDbt ( )
{
m_dbt . set_flags ( DB_DBT_MALLOC ) ;
}
BerkeleyBatch : : SafeDbt : : SafeDbt ( void * data , size_t size )
: m_dbt ( data , size )
{
}
BerkeleyBatch : : SafeDbt : : ~ SafeDbt ( )
{
if ( m_dbt . get_data ( ) ! = nullptr ) {
// Clear memory, e.g. in case it was a private key
memory_cleanse ( m_dbt . get_data ( ) , m_dbt . get_size ( ) ) ;
// under DB_DBT_MALLOC, data is malloced by the Dbt, but must be
// freed by the caller.
// https://docs.oracle.com/cd/E17275_01/html/api_reference/C/dbt.html
if ( m_dbt . get_flags ( ) & DB_DBT_MALLOC ) {
free ( m_dbt . get_data ( ) ) ;
}
}
}
const void * BerkeleyBatch : : SafeDbt : : get_data ( ) const
{
return m_dbt . get_data ( ) ;
}
2022-05-12 15:44:24 +02:00
uint32_t BerkeleyBatch : : SafeDbt : : get_size ( ) const
2020-06-15 14:29:29 -04:00
{
return m_dbt . get_size ( ) ;
}
BerkeleyBatch : : SafeDbt : : operator Dbt * ( )
{
return & m_dbt ;
}
2020-06-15 14:37:29 -04:00
bool BerkeleyDatabase : : Verify ( bilingual_str & errorStr )
2020-06-15 14:29:29 -04:00
{
fs : : path walletDir = env - > Directory ( ) ;
2022-03-03 14:40:18 -05:00
fs : : path file_path = walletDir / m_filename ;
2020-06-15 14:29:29 -04:00
LogPrintf ( " Using BerkeleyDB version %s \n " , BerkeleyDatabaseVersion ( ) ) ;
2021-09-10 00:17:20 -04:00
LogPrintf ( " Using wallet %s \n " , fs : : PathToString ( file_path ) ) ;
2020-06-15 14:29:29 -04:00
2020-05-07 14:30:04 -04:00
if ( ! env - > Open ( errorStr ) ) {
2020-06-15 14:29:29 -04:00
return false ;
}
2020-06-15 14:37:29 -04:00
if ( fs : : exists ( file_path ) )
2020-06-15 14:29:29 -04:00
{
2020-05-13 14:09:26 -04:00
assert ( m_refcount = = 0 ) ;
2020-07-16 13:19:37 -04:00
Db db ( env - > dbenv . get ( ) , 0 ) ;
2022-03-03 14:40:18 -05:00
const std : : string strFile = fs : : PathToString ( m_filename ) ;
2020-07-16 13:19:37 -04:00
int result = db . verify ( strFile . c_str ( ) , nullptr , nullptr , 0 ) ;
if ( result ! = 0 ) {
2021-09-10 00:17:20 -04:00
errorStr = strprintf ( _ ( " %s corrupt. Try using the wallet tool bitcoin-wallet to salvage or restoring a backup. " ) , fs : : quoted ( fs : : PathToString ( file_path ) ) ) ;
2020-06-15 14:29:29 -04:00
return false ;
}
}
// also return true if files does not exists
return true ;
}
void BerkeleyEnvironment : : CheckpointLSN ( const std : : string & strFile )
{
dbenv - > txn_checkpoint ( 0 , 0 , 0 ) ;
if ( fMockDb )
return ;
dbenv - > lsn_reset ( strFile . c_str ( ) , 0 ) ;
}
2020-06-15 17:59:24 -04:00
BerkeleyDatabase : : ~ BerkeleyDatabase ( )
{
if ( env ) {
LOCK ( cs_db ) ;
2022-03-03 14:40:18 -05:00
env - > CloseDb ( m_filename ) ;
2020-07-16 14:24:34 -04:00
assert ( ! m_db ) ;
2022-03-03 14:40:18 -05:00
size_t erased = env - > m_databases . erase ( m_filename ) ;
2020-06-15 17:59:24 -04:00
assert ( erased = = 1 ) ;
2022-03-03 14:40:18 -05:00
env - > m_fileids . erase ( fs : : PathToString ( m_filename ) ) ;
2020-06-15 17:59:24 -04:00
}
}
2020-06-15 14:29:29 -04:00
2020-10-12 17:10:10 +07:00
BerkeleyBatch : : BerkeleyBatch ( BerkeleyDatabase & database , const bool read_only , bool fFlushOnCloseIn ) : pdb ( nullptr ) , activeTxn ( nullptr ) , m_cursor ( nullptr ) , m_database ( database )
2020-06-15 14:29:29 -04:00
{
2020-05-10 22:41:34 -04:00
database . AddRef ( ) ;
2020-10-12 17:10:10 +07:00
database . Open ( ) ;
fReadOnly = read_only ;
2020-06-15 14:29:29 -04:00
fFlushOnClose = fFlushOnCloseIn ;
env = database . env . get ( ) ;
2020-05-10 22:41:34 -04:00
pdb = database . m_db . get ( ) ;
2022-03-03 14:40:18 -05:00
strFile = fs : : PathToString ( database . m_filename ) ;
2020-05-10 22:41:34 -04:00
}
2020-10-12 17:10:10 +07:00
void BerkeleyDatabase : : Open ( )
2020-05-10 22:41:34 -04:00
{
2020-10-12 17:10:10 +07:00
unsigned int nFlags = DB_THREAD | DB_CREATE ;
2020-06-15 14:29:29 -04:00
{
LOCK ( cs_db ) ;
2020-05-07 14:30:04 -04:00
bilingual_str open_err ;
if ( ! env - > Open ( open_err ) )
2020-05-10 22:41:34 -04:00
throw std : : runtime_error ( " BerkeleyDatabase: Failed to open database environment. " ) ;
2020-06-15 14:29:29 -04:00
2020-05-10 22:41:34 -04:00
if ( m_db = = nullptr ) {
2020-06-15 14:29:29 -04:00
int ret ;
2021-03-10 17:28:08 +08:00
std : : unique_ptr < Db > pdb_temp = std : : make_unique < Db > ( env - > dbenv . get ( ) , 0 ) ;
2022-03-03 14:40:18 -05:00
const std : : string strFile = fs : : PathToString ( m_filename ) ;
2020-06-15 14:29:29 -04:00
bool fMockDb = env - > IsMock ( ) ;
if ( fMockDb ) {
DbMpoolFile * mpf = pdb_temp - > get_mpf ( ) ;
ret = mpf - > set_flags ( DB_MPOOL_NOFILE , 1 ) ;
if ( ret ! = 0 ) {
2020-05-10 22:41:34 -04:00
throw std : : runtime_error ( strprintf ( " BerkeleyDatabase: Failed to configure for no temp file backing for database %s " , strFile ) ) ;
2020-06-15 14:29:29 -04:00
}
}
ret = pdb_temp - > open ( nullptr , // Txn pointer
2020-05-10 22:41:34 -04:00
fMockDb ? nullptr : strFile . c_str ( ) , // Filename
fMockDb ? strFile . c_str ( ) : " main " , // Logical db name
2020-06-15 14:29:29 -04:00
DB_BTREE , // Database type
nFlags , // Flags
0 ) ;
if ( ret ! = 0 ) {
2020-05-10 22:41:34 -04:00
throw std : : runtime_error ( strprintf ( " BerkeleyDatabase: Error %d, can't open database %s " , ret , strFile ) ) ;
2020-06-15 14:29:29 -04:00
}
// Call CheckUniqueFileid on the containing BDB environment to
// avoid BDB data consistency bugs that happen when different data
// files in the same environment have the same fileid.
2020-07-16 14:24:34 -04:00
CheckUniqueFileid ( * env , strFile , * pdb_temp , this - > env - > m_fileids [ strFile ] ) ;
2020-06-15 14:29:29 -04:00
2020-05-10 22:41:34 -04:00
m_db . reset ( pdb_temp . release ( ) ) ;
2020-06-15 14:29:29 -04:00
}
}
}
void BerkeleyBatch : : Flush ( )
{
if ( activeTxn )
return ;
// Flush database activity from memory pool to disk log
unsigned int nMinutes = 0 ;
if ( fReadOnly )
nMinutes = 1 ;
if ( env ) { // env is nullptr for dummy databases (i.e. in tests). Don't actually flush if env is nullptr so we don't segfault
2021-12-02 21:21:05 +01:00
env - > dbenv - > txn_checkpoint ( nMinutes ? m_database . m_max_log_mb * 1024 : 0 , nMinutes , 0 ) ;
2020-06-15 14:29:29 -04:00
}
}
void BerkeleyDatabase : : IncrementUpdateCounter ( )
{
+ + nUpdateCounter ;
}
2020-05-10 22:41:34 -04:00
BerkeleyBatch : : ~ BerkeleyBatch ( )
{
Close ( ) ;
m_database . RemoveRef ( ) ;
}
2020-06-15 14:29:29 -04:00
void BerkeleyBatch : : Close ( )
{
if ( ! pdb )
return ;
if ( activeTxn )
activeTxn - > abort ( ) ;
activeTxn = nullptr ;
pdb = nullptr ;
2020-05-14 21:17:01 -04:00
CloseCursor ( ) ;
2020-06-15 14:29:29 -04:00
if ( fFlushOnClose )
Flush ( ) ;
}
2022-03-03 14:40:18 -05:00
void BerkeleyEnvironment : : CloseDb ( const fs : : path & filename )
2020-06-15 14:29:29 -04:00
{
{
LOCK ( cs_db ) ;
2022-03-03 14:40:18 -05:00
auto it = m_databases . find ( filename ) ;
2020-06-15 14:29:29 -04:00
assert ( it ! = m_databases . end ( ) ) ;
BerkeleyDatabase & database = it - > second . get ( ) ;
if ( database . m_db ) {
// Close the database handle
database . m_db - > close ( 0 ) ;
database . m_db . reset ( ) ;
}
}
}
void BerkeleyEnvironment : : ReloadDbEnv ( )
{
// Make sure that no Db's are in use
AssertLockNotHeld ( cs_db ) ;
std : : unique_lock < RecursiveMutex > lock ( cs_db ) ;
m_db_in_use . wait ( lock , [ this ] ( ) {
2020-05-13 14:09:26 -04:00
for ( auto & db : m_databases ) {
if ( db . second . get ( ) . m_refcount > 0 ) return false ;
2020-06-15 14:29:29 -04:00
}
return true ;
} ) ;
2022-03-03 14:40:18 -05:00
std : : vector < fs : : path > filenames ;
2020-06-15 14:29:29 -04:00
for ( auto it : m_databases ) {
filenames . push_back ( it . first ) ;
}
// Close the individual Db's
2022-03-03 14:40:18 -05:00
for ( const fs : : path & filename : filenames ) {
2020-06-15 14:29:29 -04:00
CloseDb ( filename ) ;
}
// Reset the environment
Flush ( true ) ; // This will flush and close the environment
Reset ( ) ;
2020-05-07 14:30:04 -04:00
bilingual_str open_err ;
Open ( open_err ) ;
2020-06-15 14:29:29 -04:00
}
2020-06-15 15:31:02 -04:00
bool BerkeleyDatabase : : Rewrite ( const char * pszSkip )
2020-06-15 14:29:29 -04:00
{
while ( true ) {
{
LOCK ( cs_db ) ;
2022-03-03 14:40:18 -05:00
const std : : string strFile = fs : : PathToString ( m_filename ) ;
2020-05-13 14:09:26 -04:00
if ( m_refcount < = 0 ) {
2020-06-15 14:29:29 -04:00
// Flush log data to the dat file
2022-03-03 14:40:18 -05:00
env - > CloseDb ( m_filename ) ;
2020-06-15 14:29:29 -04:00
env - > CheckpointLSN ( strFile ) ;
2020-05-13 14:09:26 -04:00
m_refcount = - 1 ;
2020-06-15 14:29:29 -04:00
bool fSuccess = true ;
LogPrintf ( " BerkeleyBatch::Rewrite: Rewriting %s... \n " , strFile ) ;
std : : string strFileRes = strFile + " .rewrite " ;
{ // surround usage of db with extra {}
2020-10-12 17:10:10 +07:00
BerkeleyBatch db ( * this , true ) ;
2021-03-10 17:28:08 +08:00
std : : unique_ptr < Db > pdbCopy = std : : make_unique < Db > ( env - > dbenv . get ( ) , 0 ) ;
2020-06-15 14:29:29 -04:00
int ret = pdbCopy - > open ( nullptr , // Txn pointer
strFileRes . c_str ( ) , // Filename
" main " , // Logical db name
DB_BTREE , // Database type
DB_CREATE , // Flags
0 ) ;
if ( ret > 0 ) {
LogPrintf ( " BerkeleyBatch::Rewrite: Can't create database file %s \n " , strFileRes ) ;
fSuccess = false ;
}
2020-05-14 21:17:01 -04:00
if ( db . StartCursor ( ) ) {
2020-06-15 14:29:29 -04:00
while ( fSuccess ) {
CDataStream ssKey ( SER_DISK , CLIENT_VERSION ) ;
CDataStream ssValue ( SER_DISK , CLIENT_VERSION ) ;
2020-05-14 21:17:01 -04:00
bool complete ;
bool ret1 = db . ReadAtCursor ( ssKey , ssValue , complete ) ;
if ( complete ) {
2020-06-15 14:29:29 -04:00
break ;
2020-05-14 21:17:01 -04:00
} else if ( ! ret1 ) {
2020-06-15 14:29:29 -04:00
fSuccess = false ;
break ;
}
if ( pszSkip & &
2020-11-23 16:15:50 +01:00
strncmp ( ( const char * ) ssKey . data ( ) , pszSkip , std : : min ( ssKey . size ( ) , strlen ( pszSkip ) ) ) = = 0 )
2020-06-15 14:29:29 -04:00
continue ;
2020-11-23 16:15:50 +01:00
if ( strncmp ( ( const char * ) ssKey . data ( ) , " \x07 version " , 8 ) = = 0 ) {
2020-06-15 14:29:29 -04:00
// Update version:
ssValue . clear ( ) ;
ssValue < < CLIENT_VERSION ;
}
Dbt datKey ( ssKey . data ( ) , ssKey . size ( ) ) ;
Dbt datValue ( ssValue . data ( ) , ssValue . size ( ) ) ;
int ret2 = pdbCopy - > put ( nullptr , & datKey , & datValue , DB_NOOVERWRITE ) ;
if ( ret2 > 0 )
fSuccess = false ;
}
2020-05-14 21:17:01 -04:00
db . CloseCursor ( ) ;
}
2020-06-15 14:29:29 -04:00
if ( fSuccess ) {
db . Close ( ) ;
2022-03-03 14:40:18 -05:00
env - > CloseDb ( m_filename ) ;
2020-06-15 14:29:29 -04:00
if ( pdbCopy - > close ( 0 ) )
fSuccess = false ;
} else {
pdbCopy - > close ( 0 ) ;
}
}
if ( fSuccess ) {
Db dbA ( env - > dbenv . get ( ) , 0 ) ;
if ( dbA . remove ( strFile . c_str ( ) , nullptr , 0 ) )
fSuccess = false ;
Db dbB ( env - > dbenv . get ( ) , 0 ) ;
if ( dbB . rename ( strFileRes . c_str ( ) , nullptr , strFile . c_str ( ) , 0 ) )
fSuccess = false ;
}
if ( ! fSuccess )
LogPrintf ( " BerkeleyBatch::Rewrite: Failed to rewrite database file %s \n " , strFileRes ) ;
return fSuccess ;
}
}
UninterruptibleSleep ( std : : chrono : : milliseconds { 100 } ) ;
}
}
void BerkeleyEnvironment : : Flush ( bool fShutdown )
{
int64_t nStart = GetTimeMillis ( ) ;
// Flush log data to the actual data file on all files that are not in use
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::Flush: [%s] Flush(%s)%s \n " , strPath , fShutdown ? " true " : " false " , fDbEnvInit ? " " : " database not started " ) ;
if ( ! fDbEnvInit )
return ;
{
LOCK ( cs_db ) ;
2020-05-13 14:09:26 -04:00
bool no_dbs_accessed = true ;
for ( auto & db_it : m_databases ) {
2022-03-03 14:40:18 -05:00
const fs : : path & filename = db_it . first ;
2020-05-13 14:09:26 -04:00
int nRefCount = db_it . second . get ( ) . m_refcount ;
if ( nRefCount < 0 ) continue ;
2022-03-03 14:40:18 -05:00
const std : : string strFile = fs : : PathToString ( filename ) ;
2020-06-15 14:29:29 -04:00
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::Flush: Flushing %s (refcount = %d)... \n " , strFile , nRefCount ) ;
if ( nRefCount = = 0 ) {
// Move log data to the dat file
2022-03-03 14:40:18 -05:00
CloseDb ( filename ) ;
2020-06-15 14:29:29 -04:00
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::Flush: %s checkpoint \n " , strFile ) ;
dbenv - > txn_checkpoint ( 0 , 0 , 0 ) ;
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::Flush: %s detach \n " , strFile ) ;
if ( ! fMockDb )
dbenv - > lsn_reset ( strFile . c_str ( ) , 0 ) ;
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::Flush: %s closed \n " , strFile ) ;
2020-05-13 14:09:26 -04:00
nRefCount = - 1 ;
} else {
no_dbs_accessed = false ;
}
2020-06-15 14:29:29 -04:00
}
LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment::Flush: Flush(%s)%s took %15dms \n " , fShutdown ? " true " : " false " , fDbEnvInit ? " " : " database not started " , GetTimeMillis ( ) - nStart ) ;
if ( fShutdown ) {
char * * listp ;
2020-05-13 14:09:26 -04:00
if ( no_dbs_accessed ) {
2020-06-15 14:29:29 -04:00
dbenv - > log_archive ( & listp , DB_ARCH_REMOVE ) ;
Close ( ) ;
if ( ! fMockDb ) {
2021-09-10 00:17:20 -04:00
fs : : remove_all ( fs : : PathFromString ( strPath ) / " database " ) ;
2020-06-15 14:29:29 -04:00
}
}
}
}
}
2020-06-15 14:39:26 -04:00
bool BerkeleyDatabase : : PeriodicFlush ( )
2020-06-15 14:29:29 -04:00
{
2020-05-27 16:15:50 -04:00
// Don't flush if we can't acquire the lock.
2020-06-15 14:29:29 -04:00
TRY_LOCK ( cs_db , lockDb ) ;
2020-05-27 16:15:50 -04:00
if ( ! lockDb ) return false ;
2020-06-15 14:29:29 -04:00
2020-05-27 16:15:50 -04:00
// Don't flush if any databases are in use
2020-05-13 14:09:26 -04:00
for ( auto & it : env - > m_databases ) {
if ( it . second . get ( ) . m_refcount > 0 ) return false ;
2020-05-27 16:15:50 -04:00
}
2020-06-15 14:29:29 -04:00
2020-05-27 16:15:50 -04:00
// Don't flush if there haven't been any batch writes for this database.
2020-05-13 14:09:26 -04:00
if ( m_refcount < 0 ) return false ;
2020-06-15 14:29:29 -04:00
2022-03-03 14:40:18 -05:00
const std : : string strFile = fs : : PathToString ( m_filename ) ;
2020-05-27 16:15:50 -04:00
LogPrint ( BCLog : : WALLETDB , " Flushing %s \n " , strFile ) ;
int64_t nStart = GetTimeMillis ( ) ;
// Flush wallet file so it's self contained
2022-03-03 14:40:18 -05:00
env - > CloseDb ( m_filename ) ;
2020-05-27 16:15:50 -04:00
env - > CheckpointLSN ( strFile ) ;
2020-05-13 14:09:26 -04:00
m_refcount = - 1 ;
2020-06-15 14:29:29 -04:00
2020-05-27 16:15:50 -04:00
LogPrint ( BCLog : : WALLETDB , " Flushed %s %dms \n " , strFile , GetTimeMillis ( ) - nStart ) ;
return true ;
2020-06-15 14:29:29 -04:00
}
bool BerkeleyDatabase : : Backup ( const std : : string & strDest ) const
{
2022-03-03 14:40:18 -05:00
const std : : string strFile = fs : : PathToString ( m_filename ) ;
2020-06-15 14:29:29 -04:00
while ( true )
{
{
LOCK ( cs_db ) ;
2020-05-13 14:09:26 -04:00
if ( m_refcount < = 0 )
2020-06-15 14:29:29 -04:00
{
// Flush log data to the dat file
2022-03-03 14:40:18 -05:00
env - > CloseDb ( m_filename ) ;
2020-06-15 14:29:29 -04:00
env - > CheckpointLSN ( strFile ) ;
// Copy wallet file
2022-03-03 14:40:18 -05:00
fs : : path pathSrc = env - > Directory ( ) / m_filename ;
2021-09-10 00:17:20 -04:00
fs : : path pathDest ( fs : : PathFromString ( strDest ) ) ;
2020-06-15 14:29:29 -04:00
if ( fs : : is_directory ( pathDest ) )
2022-03-03 14:40:18 -05:00
pathDest / = m_filename ;
2020-06-15 14:29:29 -04:00
try {
2020-06-11 08:58:46 +02:00
if ( fs : : exists ( pathDest ) & & fs : : equivalent ( pathSrc , pathDest ) ) {
2021-09-10 00:17:20 -04:00
LogPrintf ( " cannot backup to wallet source file %s \n " , fs : : PathToString ( pathDest ) ) ;
2020-06-15 14:29:29 -04:00
return false ;
}
2020-06-11 08:58:46 +02:00
fs : : copy_file ( pathSrc , pathDest , fs : : copy_options : : overwrite_existing ) ;
2021-09-10 00:17:20 -04:00
LogPrintf ( " copied %s to %s \n " , strFile , fs : : PathToString ( pathDest ) ) ;
2020-06-15 14:29:29 -04:00
return true ;
} catch ( const fs : : filesystem_error & e ) {
2021-09-10 00:17:20 -04:00
LogPrintf ( " error copying %s to %s - %s \n " , strFile , fs : : PathToString ( pathDest ) , fsbridge : : get_filesystem_error_message ( e ) ) ;
2020-06-15 14:29:29 -04:00
return false ;
}
}
}
UninterruptibleSleep ( std : : chrono : : milliseconds { 100 } ) ;
}
}
2020-06-15 17:59:24 -04:00
void BerkeleyDatabase : : Flush ( )
2020-06-15 14:29:29 -04:00
{
2020-05-28 17:30:50 -04:00
env - > Flush ( false ) ;
2020-06-15 17:59:24 -04:00
}
void BerkeleyDatabase : : Close ( )
{
2020-05-28 17:30:50 -04:00
env - > Flush ( true ) ;
2020-06-15 14:29:29 -04:00
}
void BerkeleyDatabase : : ReloadDbEnv ( )
{
2020-05-28 17:30:50 -04:00
env - > ReloadDbEnv ( ) ;
2020-06-15 14:29:29 -04:00
}
2020-05-14 21:17:01 -04:00
bool BerkeleyBatch : : StartCursor ( )
2020-06-15 14:29:29 -04:00
{
2020-05-14 21:17:01 -04:00
assert ( ! m_cursor ) ;
2020-06-15 14:29:29 -04:00
if ( ! pdb )
2020-05-14 21:17:01 -04:00
return false ;
int ret = pdb - > cursor ( nullptr , & m_cursor , 0 ) ;
return ret = = 0 ;
2020-06-15 14:29:29 -04:00
}
2020-05-14 21:17:01 -04:00
bool BerkeleyBatch : : ReadAtCursor ( CDataStream & ssKey , CDataStream & ssValue , bool & complete )
2020-06-15 14:29:29 -04:00
{
2020-05-14 21:17:01 -04:00
complete = false ;
if ( m_cursor = = nullptr ) return false ;
2020-06-15 14:29:29 -04:00
// Read at cursor
SafeDbt datKey ;
SafeDbt datValue ;
2020-05-14 21:17:01 -04:00
int ret = m_cursor - > get ( datKey , datValue , DB_NEXT ) ;
if ( ret = = DB_NOTFOUND ) {
complete = true ;
}
2020-06-15 14:29:29 -04:00
if ( ret ! = 0 )
2020-05-14 21:17:01 -04:00
return false ;
2020-06-15 14:29:29 -04:00
else if ( datKey . get_data ( ) = = nullptr | | datValue . get_data ( ) = = nullptr )
2020-05-14 21:17:01 -04:00
return false ;
2020-06-15 14:29:29 -04:00
// Convert to streams
ssKey . SetType ( SER_DISK ) ;
ssKey . clear ( ) ;
2022-04-26 09:41:00 +01:00
ssKey . write ( { AsBytePtr ( datKey . get_data ( ) ) , datKey . get_size ( ) } ) ;
2020-06-15 14:29:29 -04:00
ssValue . SetType ( SER_DISK ) ;
ssValue . clear ( ) ;
2022-04-26 09:41:00 +01:00
ssValue . write ( { AsBytePtr ( datValue . get_data ( ) ) , datValue . get_size ( ) } ) ;
2020-05-14 21:17:01 -04:00
return true ;
}
void BerkeleyBatch : : CloseCursor ( )
{
if ( ! m_cursor ) return ;
m_cursor - > close ( ) ;
m_cursor = nullptr ;
2020-06-15 14:29:29 -04:00
}
bool BerkeleyBatch : : TxnBegin ( )
{
if ( ! pdb | | activeTxn )
return false ;
DbTxn * ptxn = env - > TxnBegin ( ) ;
if ( ! ptxn )
return false ;
activeTxn = ptxn ;
return true ;
}
bool BerkeleyBatch : : TxnCommit ( )
{
if ( ! pdb | | ! activeTxn )
return false ;
int ret = activeTxn - > commit ( 0 ) ;
activeTxn = nullptr ;
return ( ret = = 0 ) ;
}
bool BerkeleyBatch : : TxnAbort ( )
{
if ( ! pdb | | ! activeTxn )
return false ;
int ret = activeTxn - > abort ( ) ;
activeTxn = nullptr ;
return ( ret = = 0 ) ;
}
2021-01-17 17:21:21 +01:00
bool BerkeleyDatabaseSanityCheck ( )
{
int major , minor ;
DbEnv : : version ( & major , & minor , nullptr ) ;
/* If the major version differs, or the minor version of library is *older*
* than the header that was compiled against , flag an error .
*/
if ( major ! = DB_VERSION_MAJOR | | minor < DB_VERSION_MINOR ) {
LogPrintf ( " BerkeleyDB database version conflict: header version is %d.%d, library version is %d.%d \n " ,
DB_VERSION_MAJOR , DB_VERSION_MINOR , major , minor ) ;
return false ;
}
return true ;
}
2020-06-15 14:29:29 -04:00
std : : string BerkeleyDatabaseVersion ( )
{
return DbEnv : : version ( nullptr , nullptr , nullptr ) ;
}
2020-06-15 15:42:53 -04:00
2020-06-20 08:55:07 -04:00
bool BerkeleyBatch : : ReadKey ( CDataStream & & key , CDataStream & value )
2020-06-15 15:42:53 -04:00
{
if ( ! pdb )
return false ;
SafeDbt datKey ( key . data ( ) , key . size ( ) ) ;
SafeDbt datValue ;
int ret = pdb - > get ( activeTxn , datKey , datValue , 0 ) ;
if ( ret = = 0 & & datValue . get_data ( ) ! = nullptr ) {
2022-04-26 09:41:00 +01:00
value . write ( { AsBytePtr ( datValue . get_data ( ) ) , datValue . get_size ( ) } ) ;
2020-06-15 15:42:53 -04:00
return true ;
}
return false ;
}
2020-06-20 08:55:07 -04:00
bool BerkeleyBatch : : WriteKey ( CDataStream & & key , CDataStream & & value , bool overwrite )
2020-06-15 15:42:53 -04:00
{
if ( ! pdb )
2020-06-18 11:28:39 -04:00
return false ;
2020-06-15 15:42:53 -04:00
if ( fReadOnly )
assert ( ! " Write called on database in read-only mode " ) ;
SafeDbt datKey ( key . data ( ) , key . size ( ) ) ;
SafeDbt datValue ( value . data ( ) , value . size ( ) ) ;
int ret = pdb - > put ( activeTxn , datKey , datValue , ( overwrite ? 0 : DB_NOOVERWRITE ) ) ;
return ( ret = = 0 ) ;
}
2020-06-20 08:55:07 -04:00
bool BerkeleyBatch : : EraseKey ( CDataStream & & key )
2020-06-15 15:42:53 -04:00
{
if ( ! pdb )
return false ;
if ( fReadOnly )
assert ( ! " Erase called on database in read-only mode " ) ;
SafeDbt datKey ( key . data ( ) , key . size ( ) ) ;
int ret = pdb - > del ( activeTxn , datKey , 0 ) ;
return ( ret = = 0 | | ret = = DB_NOTFOUND ) ;
}
2020-06-20 08:55:07 -04:00
bool BerkeleyBatch : : HasKey ( CDataStream & & key )
2020-06-15 15:42:53 -04:00
{
if ( ! pdb )
return false ;
SafeDbt datKey ( key . data ( ) , key . size ( ) ) ;
int ret = pdb - > exists ( activeTxn , datKey , 0 ) ;
return ret = = 0 ;
}
2020-06-15 16:54:58 -04:00
2020-06-19 20:51:07 -04:00
void BerkeleyDatabase : : AddRef ( )
{
LOCK ( cs_db ) ;
2020-05-13 14:09:26 -04:00
if ( m_refcount < 0 ) {
m_refcount = 1 ;
} else {
m_refcount + + ;
}
2020-06-19 20:51:07 -04:00
}
void BerkeleyDatabase : : RemoveRef ( )
{
2020-05-13 14:09:26 -04:00
LOCK ( cs_db ) ;
m_refcount - - ;
2020-05-10 22:41:34 -04:00
if ( env ) env - > m_db_in_use . notify_all ( ) ;
2020-06-19 20:51:07 -04:00
}
2020-10-12 17:10:10 +07:00
std : : unique_ptr < DatabaseBatch > BerkeleyDatabase : : MakeBatch ( bool flush_on_close )
2020-06-15 16:54:58 -04:00
{
2021-03-10 17:28:08 +08:00
return std : : make_unique < BerkeleyBatch > ( * this , false , flush_on_close ) ;
2020-06-15 16:54:58 -04:00
}
2020-08-04 16:40:31 -04:00
std : : unique_ptr < BerkeleyDatabase > MakeBerkeleyDatabase ( const fs : : path & path , const DatabaseOptions & options , DatabaseStatus & status , bilingual_str & error )
{
2020-10-30 16:41:23 -04:00
fs : : path data_file = BDBDataFile ( path ) ;
2020-08-04 16:40:31 -04:00
std : : unique_ptr < BerkeleyDatabase > db ;
{
LOCK ( cs_db ) ; // Lock env.m_databases until insert in BerkeleyDatabase constructor
2022-03-03 14:40:18 -05:00
fs : : path data_filename = data_file . filename ( ) ;
2021-12-02 21:21:05 +01:00
std : : shared_ptr < BerkeleyEnvironment > env = GetBerkeleyEnv ( data_file . parent_path ( ) , options . use_shared_memory ) ;
2020-08-04 16:40:31 -04:00
if ( env - > m_databases . count ( data_filename ) ) {
2021-09-10 00:17:20 -04:00
error = Untranslated ( strprintf ( " Refusing to load database. Data file '%s' is already loaded. " , fs : : PathToString ( env - > Directory ( ) / data_filename ) ) ) ;
2020-08-04 16:40:31 -04:00
status = DatabaseStatus : : FAILED_ALREADY_LOADED ;
return nullptr ;
}
2021-12-02 21:21:05 +01:00
db = std : : make_unique < BerkeleyDatabase > ( std : : move ( env ) , std : : move ( data_filename ) , options ) ;
2020-08-04 16:40:31 -04:00
}
if ( options . verify & & ! db - > Verify ( error ) ) {
status = DatabaseStatus : : FAILED_VERIFY ;
return nullptr ;
}
status = DatabaseStatus : : SUCCESS ;
return db ;
}
2021-11-12 11:13:29 -05:00
} // namespace wallet