From 416dcacb332297ac9a832cdad6bfa1e6739a52f7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 12 Oct 2016 22:18:07 +0100 Subject: [PATCH] Fix testbackupstorepatch for BackupFileSystem. * Make testbackupstorepatch do housekeeping in-process to make debugging easier. * Check return value of housekeeping to ensure that no errors were detected. * Initialise and update file size in blocks in patch-combining transactions. * Take locks in testbackupstorepatch to hopefully prevent race conditions leading to store corruption and unreliable tests when using housekeeping out-of-process. * Eliminate some copy-pasted duplicate code in BackupFileSystem and in housekeeping. * Delay RemoveASAP deletions to avoid false positive mismatches of block and reference counts being reported as errors during housekeeping. --- lib/backupstore/BackupFileSystem.cpp | 70 +++-- lib/backupstore/BackupFileSystem.h | 2 + lib/backupstore/HousekeepStoreAccount.cpp | 204 +++++++------- lib/backupstore/HousekeepStoreAccount.h | 3 + .../backupstorepatch/testbackupstorepatch.cpp | 265 ++++++++++++++---- 5 files changed, 354 insertions(+), 190 deletions(-) diff --git a/lib/backupstore/BackupFileSystem.cpp b/lib/backupstore/BackupFileSystem.cpp index 04cba3748..68416d40c 100644 --- a/lib/backupstore/BackupFileSystem.cpp +++ b/lib/backupstore/BackupFileSystem.cpp @@ -95,8 +95,8 @@ BackupStoreInfo& BackupFileSystem::GetBackupStoreInfo(bool ReadOnly, bool Refres void BackupFileSystem::RefCountDatabaseBeforeCommit(BackupStoreRefCountDatabase& refcount_db) { ASSERT(&refcount_db == mapTemporaryRefCountDatabase.get()); - // This is the temporary database, so it is about to be commithas already been - // committed, so we need to close the permanent database first. + // This is the temporary database, so it is about to be committed and become the permanent + // database, so we need to close the current permanent database (if any) first. mapPermanentRefCountDatabase.reset(); } @@ -436,11 +436,16 @@ class RaidPutFileCompleteTransaction : public BackupFileSystem::Transaction : mStoreFile(StoreDiscSet, filename, refcount), mFileName(filename), mDiscSet(StoreDiscSet), - mCommitted(false) + mCommitted(false), + mNumBlocks(-1) { } ~RaidPutFileCompleteTransaction(); virtual void Commit(); - virtual int64_t GetNumBlocks() { return mNumBlocks; } + virtual int64_t GetNumBlocks() + { + ASSERT(mNumBlocks != -1); + return mNumBlocks; + } RaidFileWrite& GetRaidFile() { return mStoreFile; } // It doesn't matter what we return here, because this should never be called @@ -762,20 +767,21 @@ void RaidBackupFileSystem::DeleteObjectUnknown(int64_t ObjectID) std::auto_ptr -RaidBackupFileSystem::CombineFile(int64_t OlderPatchID, int64_t NewerFileID) +RaidBackupFileSystem::CombineFileOrDiff(int64_t OlderPatchID, int64_t NewerObjectID, bool NewerIsPatch) { // This normally only happens during housekeeping, which is using a temporary // refcount database, so insist on that for now. BackupStoreRefCountDatabase* pRefCount = mapTemporaryRefCountDatabase.get(); ASSERT(pRefCount != NULL); - ASSERT(mapPermanentRefCountDatabase.get() == NULL); + ASSERT(mapPermanentRefCountDatabase.get() == NULL || + mapPermanentRefCountDatabase->IsReadOnly()); // Open the older object twice (the patch) std::auto_ptr pdiff = GetFile(OlderPatchID); std::auto_ptr pdiff2 = GetFile(OlderPatchID); // Open the newer object (the file to be deleted) - std::auto_ptr pobjectBeingDeleted = GetFile(NewerFileID); + std::auto_ptr pobjectBeingDeleted = GetFile(NewerObjectID); // And open a write file to overwrite the older object (the patch) std::string older_filename = GetObjectFileName(OlderPatchID, @@ -788,8 +794,19 @@ RaidBackupFileSystem::CombineFile(int64_t OlderPatchID, int64_t NewerFileID) RaidFileWrite& overwrite_older(ap_overwrite_older->GetRaidFile()); overwrite_older.Open(true /* allow overwriting */); - // There exists an older version which depends on this one. Need to combine the two over that one. - BackupStoreFile::CombineFile(*pdiff, *pdiff2, *pobjectBeingDeleted, overwrite_older); + if(NewerIsPatch) + { + // Combine two adjacent patches (reverse diffs) into a single one object. + BackupStoreFile::CombineDiffs(*pobjectBeingDeleted, *pdiff, *pdiff2, overwrite_older); + } + else + { + // Combine an older patch (reverse diff) with the subsequent complete file. + BackupStoreFile::CombineFile(*pdiff, *pdiff2, *pobjectBeingDeleted, overwrite_older); + } + + // Need to do this before committing the RaidFile, can't do it after. + ap_overwrite_older->mNumBlocks = overwrite_older.GetDiscUsageInBlocks(); // The file will be committed later when the directory is safely commited. return static_cast >(ap_overwrite_older); @@ -797,37 +814,16 @@ RaidBackupFileSystem::CombineFile(int64_t OlderPatchID, int64_t NewerFileID) std::auto_ptr -RaidBackupFileSystem::CombineDiffs(int64_t OlderPatchID, int64_t NewerPatchID) +RaidBackupFileSystem::CombineFile(int64_t OlderPatchID, int64_t NewerFileID) { - // This normally only happens during housekeeping, which is using a temporary - // refcount database, so insist on that for now. - BackupStoreRefCountDatabase* pRefCount = mapTemporaryRefCountDatabase.get(); - ASSERT(pRefCount != NULL); - ASSERT(mapPermanentRefCountDatabase.get() == NULL); - - // Open the older object twice (the patch) - std::auto_ptr pdiff = GetFile(OlderPatchID); - std::auto_ptr pdiff2 = GetFile(OlderPatchID); - - // Open the newer object (the file to be deleted) - std::auto_ptr pobjectBeingDeleted = GetFile(NewerPatchID); - - // And open a write file to overwrite the older object (the patch) - std::string older_filename = GetObjectFileName(OlderPatchID, - false); // no need to make sure the directory it's in exists. - - std::auto_ptr - ap_overwrite_older(new RaidPutFileCompleteTransaction( - mStoreDiscSet, older_filename, - pRefCount->GetRefCount(OlderPatchID))); - RaidFileWrite& overwrite_older(ap_overwrite_older->GetRaidFile()); - overwrite_older.Open(true /* allow overwriting */); + return CombineFileOrDiff(OlderPatchID, NewerFileID, false); // !NewerIsPatch +} - // This entry is in the middle of a chain, and two patches need combining. - BackupStoreFile::CombineDiffs(*pobjectBeingDeleted, *pdiff, *pdiff2, overwrite_older); - // The file will be committed later when the directory is safely commited. - return static_cast >(ap_overwrite_older); +std::auto_ptr +RaidBackupFileSystem::CombineDiffs(int64_t OlderPatchID, int64_t NewerPatchID) +{ + return CombineFileOrDiff(OlderPatchID, NewerPatchID, true); // NewerIsPatch } diff --git a/lib/backupstore/BackupFileSystem.h b/lib/backupstore/BackupFileSystem.h index 531db2c2c..6397629fd 100644 --- a/lib/backupstore/BackupFileSystem.h +++ b/lib/backupstore/BackupFileSystem.h @@ -280,6 +280,8 @@ class RaidBackupFileSystem : public BackupFileSystem protected: virtual std::auto_ptr GetBackupStoreInfoInternal(bool ReadOnly); + std::auto_ptr + CombineFileOrDiff(int64_t OlderPatchID, int64_t NewerObjectID, bool NewerIsPatch); private: void CheckObjectsScanDir(int64_t StartID, int Level, const std::string &rDirName, diff --git a/lib/backupstore/HousekeepStoreAccount.cpp b/lib/backupstore/HousekeepStoreAccount.cpp index e35ace692..4984aa901 100644 --- a/lib/backupstore/HousekeepStoreAccount.cpp +++ b/lib/backupstore/HousekeepStoreAccount.cpp @@ -158,7 +158,7 @@ bool HousekeepStoreAccount::DoHousekeeping(bool KeepTryingForever) mpNewRefs = &mrFileSystem.GetTemporaryRefCountDatabase(); // Scan the directory for potential things to delete - // This will also remove eligible items marked with RemoveASAP + // This will also find and enqueue eligible items marked with RemoveASAP bool continueHousekeeping = ScanDirectory(BACKUPSTORE_ROOT_DIRECTORY_ID, *pInfo); if(!continueHousekeeping) @@ -177,19 +177,12 @@ bool HousekeepStoreAccount::DoHousekeeping(bool KeepTryingForever) "directory scan was interrupted"); } - // If housekeeping made any changes, such as deleting RemoveASAP files, - // the differences in block counts will be recorded in the deltas. - pInfo->ChangeBlocksUsed(mBlocksUsedDelta); - pInfo->ChangeBlocksInOldFiles(mBlocksInOldFilesDelta); - pInfo->ChangeBlocksInDeletedFiles(mBlocksInDeletedFilesDelta); - - // Reset the delta counts for files, as they will include - // RemoveASAP flagged files deleted during the initial scan. - // keep removeASAPBlocksUsedDelta for reporting - int64_t removeASAPBlocksUsedDelta = mBlocksUsedDelta; - mBlocksUsedDelta = 0; - mBlocksInOldFilesDelta = 0; - mBlocksInDeletedFilesDelta = 0; + if(!continueHousekeeping) + { + // Report any UNexpected changes, and consider them to be errors. + // Do this before applying the expected changes below. + mErrorCount += pInfo->ReportChangesTo(*apOldInfo); + } // If scan directory stopped for some reason, probably parent // instructed to terminate, stop now. @@ -207,10 +200,6 @@ bool HousekeepStoreAccount::DoHousekeeping(bool KeepTryingForever) return false; } - // Report any UNexpected changes, and consider them to be errors. - // Do this before applying the expected changes below. - mErrorCount += pInfo->ReportChangesTo(*apOldInfo); - // Try to load the old reference count database and check whether // any counts have changed. We want to compare the mpNewRefs to // apOldRefs before we delete any files, because that will also change @@ -243,11 +232,9 @@ bool HousekeepStoreAccount::DoHousekeeping(bool KeepTryingForever) // Log deletion if anything was deleted if(mFilesDeleted > 0 || mEmptyDirectoriesDeleted > 0) { - BOX_INFO("Housekeeping on account " << - mrFileSystem.GetAccountIdentifier() << " removed " << - (0 - (mBlocksUsedDelta + removeASAPBlocksUsedDelta)) << - " blocks (" << mFilesDeleted << " files, " << - mEmptyDirectoriesDeleted << " dirs)" << + BOX_INFO("Housekeeping on account " << mrFileSystem.GetAccountIdentifier() << " " + "removed " << -mBlocksUsedDelta << " blocks (" << mFilesDeleted << " " + "files, " << mEmptyDirectoriesDeleted << " dirs)" << (deleteInterrupted?" and was interrupted":"")); } @@ -358,47 +345,49 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID, // BLOCK { - // Remove any files which are marked for removal as soon - // as they become old or deleted. - bool deletedSomething = false; - do + // Add to mDefiniteDeletions any files which are marked for removal as soon as + // they become old or deleted. + + // Iterate through the directory + BackupStoreDirectory::Iterator i(dir); + BackupStoreDirectory::Entry *en = 0; + while((en = i.Next(BackupStoreDirectory::Entry::Flags_File)) != 0) { - // Iterate through the directory - deletedSomething = false; - BackupStoreDirectory::Iterator i(dir); - BackupStoreDirectory::Entry *en = 0; - while((en = i.Next(BackupStoreDirectory::Entry::Flags_File)) != 0) + int16_t enFlags = en->GetFlags(); + if((enFlags & BackupStoreDirectory::Entry::Flags_RemoveASAP) != 0 + && (en->IsDeleted() || en->IsOld())) { - int16_t enFlags = en->GetFlags(); - if((enFlags & BackupStoreDirectory::Entry::Flags_RemoveASAP) != 0 - && (en->IsDeleted() || en->IsOld())) + if(!mrFileSystem.CanMergePatches() && + en->GetDependsNewer() != 0) { - if(!mrFileSystem.CanMergePatches() && - en->GetDependsNewer() != 0) - { - BOX_ERROR("Cannot delete file " << - BOX_FORMAT_OBJECTID(en->GetObjectID()) << - " flagged as RemoveASAP because " - "another file depends on it (" << - BOX_FORMAT_OBJECTID(en->GetDependsNewer()) << - " and the filesystem does not " - "support merging patches"); - continue; - } - - // Delete this immediately. - DeleteFile(ObjectID, en->GetObjectID(), dir, - rBackupStoreInfo); + BOX_ERROR("Cannot delete file " << + BOX_FORMAT_OBJECTID(en->GetObjectID()) << + " flagged as RemoveASAP because " + "another file depends on it (" << + BOX_FORMAT_OBJECTID(en->GetDependsNewer()) << + " and the filesystem does not " + "support merging patches"); + continue; + } - // flag as having done something - deletedSomething = true; + mDefiniteDeletions.push_back( + std::pair(en->GetObjectID(), + ObjectID)); // of the directory - // Must start the loop from the beginning again, as iterator is now - // probably invalid. - break; + // Because we are definitely deleting this file, we don't need + // housekeeping to delete potential files to free up the space + // that it occupies, so reduce the deletion target by this file's + // size. + if(mDeletionSizeTarget > 0) + { + mDeletionSizeTarget -= en->GetSizeInBlocks(); + if(mDeletionSizeTarget < 0) + { + mDeletionSizeTarget = 0; + } } } - } while(deletedSomething); + } } // BLOCK @@ -417,7 +406,6 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID, while((en = i.Next(BackupStoreDirectory::Entry::Flags_File)) != 0) { // Update recalculated usage sizes - int16_t enFlags = en->GetFlags(); int64_t enSizeInBlocks = en->GetSizeInBlocks(); mBlocksUsed += enSizeInBlocks; if(en->IsOld()) mBlocksInOldFiles += enSizeInBlocks; @@ -440,19 +428,19 @@ bool HousekeepStoreAccount::ScanDirectory(int64_t ObjectID, } // enVersionAge is now the age of this version. - // Potentially add it to the list if it's deleted, if it's an old version or deleted + // Add it to the list of potential files to remove, if it's an old version + // or deleted: if(en->IsOld() || en->IsDeleted()) { if(!mrFileSystem.CanMergePatches() && en->GetDependsNewer() != 0) { - BOX_TRACE("Cannot delete file " << + BOX_TRACE("Cannot remove old/deleted file " << BOX_FORMAT_OBJECTID(en->GetObjectID()) << - " flagged as RemoveASAP because " - "another file depends on it (" << + " now, because another file depends on it (" << BOX_FORMAT_OBJECTID(en->GetDependsNewer()) << - " and the filesystem does not " - "support merging patches"); + " and the filesystem does not support merging " + "patches"); continue; } @@ -602,7 +590,19 @@ bool HousekeepStoreAccount::DelEnCompare::operator()(const HousekeepStoreAccount // -------------------------------------------------------------------------- bool HousekeepStoreAccount::DeleteFiles(BackupStoreInfo& rBackupStoreInfo) { - // Only delete files if the deletion target is greater than zero + // Delete all the definite deletions first, because we promised that we would, and because + // the deletion target might only be zero because we are definitely deleting enough files + // to free up all required space. So if we didn't delete them, the store would remain over + // its target size. + for(std::vector >::iterator i = mDefiniteDeletions.begin(); + i != mDefiniteDeletions.end(); i++) + { + int64_t FileID = i->first; + int64_t DirID = i->second; + RemoveReferenceAndMaybeDeleteFile(FileID, DirID, "RemoveASAP", rBackupStoreInfo); + } + + // Only delete potentially deletable files if the deletion target is greater than zero // (otherwise we delete one file each time round, which gradually deletes the old versions) if(mDeletionSizeTarget <= 0) { @@ -624,35 +624,14 @@ bool HousekeepStoreAccount::DeleteFiles(BackupStoreInfo& rBackupStoreInfo) // include account ID here as the specified account is now locked mpHousekeepingCallback->CheckForInterProcessMsg(account_id)) { - // Need to abort now + // Need to abort now. Return true to signal that we were interrupted. return true; } } #endif - // Load up the directory it's in - // Get the filename - BackupStoreDirectory dir; - mrFileSystem.GetDirectory(i->mInDirectory, dir); - - // Delete the file - BackupStoreRefCountDatabase::refcount_t refs = - DeleteFile(i->mInDirectory, i->mObjectID, dir, rBackupStoreInfo); - if(refs == 0) - { - BOX_INFO("Housekeeping removed " << - (i->mIsFlagDeleted ? "deleted" : "old") << - " file " << BOX_FORMAT_OBJECTID(i->mObjectID) << - " from dir " << BOX_FORMAT_OBJECTID(i->mInDirectory)); - } - else - { - BOX_TRACE("Housekeeping preserved " << - (i->mIsFlagDeleted ? "deleted" : "old") << - " file " << BOX_FORMAT_OBJECTID(i->mObjectID) << - " in dir " << BOX_FORMAT_OBJECTID(i->mInDirectory) << - " with " << refs << " references"); - } + RemoveReferenceAndMaybeDeleteFile(i->mObjectID, i->mInDirectory, + (i->mIsFlagDeleted ? "deleted" : "old"), rBackupStoreInfo); // Stop if the deletion target has been matched or exceeded // (checking here rather than at the beginning will tend to reduce the @@ -667,6 +646,30 @@ bool HousekeepStoreAccount::DeleteFiles(BackupStoreInfo& rBackupStoreInfo) return false; } +void HousekeepStoreAccount::RemoveReferenceAndMaybeDeleteFile(int64_t FileID, int64_t DirID, + const std::string& reason, BackupStoreInfo& rBackupStoreInfo) +{ + // Load up the directory it's in + // Get the filename + BackupStoreDirectory dir; + mrFileSystem.GetDirectory(DirID, dir); + + // Delete the file + BackupStoreRefCountDatabase::refcount_t refs = + DeleteFile(DirID, FileID, dir, rBackupStoreInfo); + if(refs == 0) + { + BOX_INFO("Housekeeping removed " << reason << " file " << + BOX_FORMAT_OBJECTID(FileID) << " from dir " << BOX_FORMAT_OBJECTID(DirID)); + } + else + { + BOX_TRACE("Housekeeping preserved " << reason << " file " << + BOX_FORMAT_OBJECTID(FileID) << " in dir " << BOX_FORMAT_OBJECTID(DirID) << + " with " << refs << " references"); + } +} + // -------------------------------------------------------------------------- // @@ -747,23 +750,8 @@ BackupStoreRefCountDatabase::refcount_t HousekeepStoreAccount::DeleteFile( return refs - 1; } - // If the entry is involved in a chain of patches, it needs to be handled - // a bit more carefully. - if(!mrFileSystem.CanMergePatches()) - { - // In this case, we can only remove objects that nothing - // depends on, i.e. with no DependsNewer. - - if(pentry->GetDependsNewer() != 0) - { - THROW_EXCEPTION_MESSAGE(BackupStoreException, - FileSystemCannotMerge, "Unable to delete file " << - BOX_FORMAT_OBJECTID(ObjectID) << " on which " - "newer object " << - BOX_FORMAT_OBJECTID(pentry->GetDependsNewer()) << - " depends"); - } - } + // If the entry is involved in a chain of patches, it needs to be handled a bit + // more carefully. if(pentry->GetDependsNewer() != 0 && pentry->GetDependsOlder() == 0) { @@ -778,6 +766,10 @@ BackupStoreRefCountDatabase::refcount_t HousekeepStoreAccount::DeleteFile( } else if(pentry->GetDependsOlder() != 0) { + // We should have checked whether the BackupFileSystem can merge patches + // before this point: + ASSERT(mrFileSystem.CanMergePatches()); + BackupStoreDirectory::Entry *polder = rDirectory.FindEntryByID(pentry->GetDependsOlder()); if(pentry->GetDependsNewer() == 0) { diff --git a/lib/backupstore/HousekeepStoreAccount.h b/lib/backupstore/HousekeepStoreAccount.h index 64db1cad1..588db1038 100644 --- a/lib/backupstore/HousekeepStoreAccount.h +++ b/lib/backupstore/HousekeepStoreAccount.h @@ -48,6 +48,8 @@ class HousekeepStoreAccount // utility functions bool ScanDirectory(int64_t ObjectID, BackupStoreInfo& rBackupStoreInfo); bool DeleteFiles(BackupStoreInfo& rBackupStoreInfo); + void RemoveReferenceAndMaybeDeleteFile(int64_t FileID, int64_t DirID, + const std::string& reason, BackupStoreInfo& rBackupStoreInfo); bool DeleteEmptyDirectories(BackupStoreInfo& rBackupStoreInfo); void DeleteEmptyDirectory(int64_t dirId, std::vector& rToExamine, BackupStoreInfo& rBackupStoreInfo); @@ -79,6 +81,7 @@ class HousekeepStoreAccount int64_t mDeletionSizeTarget; + std::vector > mDefiniteDeletions; std::set mPotentialDeletions; int64_t mPotentialDeletionsTotalSize; int64_t mMaxSizeInPotentialDeletions; diff --git a/test/backupstorepatch/testbackupstorepatch.cpp b/test/backupstorepatch/testbackupstorepatch.cpp index 1a9e0739d..7aa02058e 100644 --- a/test/backupstorepatch/testbackupstorepatch.cpp +++ b/test/backupstorepatch/testbackupstorepatch.cpp @@ -13,11 +13,13 @@ #include #include -#include "autogen_BackupProtocol.h" +#include "BackupAccountControl.h" #include "BackupClientCryptoKeys.h" #include "BackupClientFileAttributes.h" +#include "BackupProtocol.h" #include "BackupStoreAccountDatabase.h" #include "BackupStoreAccounts.h" +#include "BackupStoreConfigVerify.h" #include "BackupStoreConstants.h" #include "BackupStoreDirectory.h" #include "BackupStoreException.h" @@ -39,6 +41,7 @@ #include "Socket.h" #include "SocketStreamTLS.h" #include "StoreStructure.h" +#include "StoreTestUtils.h" #include "TLSContext.h" #include "Test.h" @@ -51,19 +54,20 @@ typedef struct bool IsCompletelyDifferent; bool HasBeenDeleted; int64_t DepNewer, DepOlder; + int64_t CurrentSizeInBlocks; } file_info; file_info test_files[] = { -// ChPnt, Insert, Delete, ID, IsCDf, BeenDel - {0, 0, 0, 0, false, false}, // 0 dummy first entry - {32000, 2087, 0, 0, false, false}, // 1 +// ChPnt, Insert, Delete, ID, IsCDf, BeenDel + {0, 0, 0, 0, false, false}, // 0 dummy first entry + {32000, 2087, 0, 0, false, false}, // 1 {1000, 1998, 2976, 0, false, false}, // 2 - {27800, 0, 288, 0, false, false}, // 3 - {3208, 1087, 98, 0, false, false}, // 4 - {56000, 23087, 98, 0, false, false}, // 5 - {0, 98765, 9999999,0, false, false}, // 6 completely different, make a break in the storage - {9899, 9887, 2, 0, false, false}, // 7 + {27800, 0, 288, 0, false, false}, // 3 + {3208, 1087, 98, 0, false, false}, // 4 - this entry is deleted from middle of patch chain on r=1 + {56000, 23087, 98, 0, false, false}, // 5 + {0, 98765, 9999999,0, false, false}, // 6 completely different, make a break in the storage + {9899, 9887, 2, 0, false, false}, // 7 {12984, 12345, 1234, 0, false, false}, // 8 {1209, 29885, 3498, 0, false, false} // 9 }; @@ -75,6 +79,7 @@ int test_file_remove_order[] = {0, 2, 3, 5, 8, 1, 4, -1}; #define FIRST_FILE_SIZE (64*1024+3) #define BUFFER_SIZE (256*1024) #define SHORT_TIMEOUT 5000 +#define HOUSEKEEPING_IN_PROCESS // Chunk of memory to use for copying files, etc static void *buffer = 0; @@ -171,8 +176,6 @@ bool files_identical(const char *file1, const char *file2) return true; } - - void create_test_files() { // Create first file @@ -337,7 +340,16 @@ int test(int argc, const char *argv[]) } RaidFileDiscSet rfd(rcontroller.GetDiscSet(discSet)); - int pid = LaunchServer(BBSTORED " testfiles/bbstored.conf", + std::string errs; + std::auto_ptr config( + Configuration::LoadAndVerify("testfiles/bbstored.conf", + &BackupConfigFileVerify, errs)); + TEST_EQUAL(0, errs.size()); + + BackupStoreAccountControl control(*config, 0x01234567); + BackupFileSystem& filesystem(control.GetFileSystem()); + + int pid = LaunchServer(BBSTORED " -c testfiles/bbstored.conf" + bbstored_args, "testfiles/bbstored.pid"); TEST_THAT(pid != -1 && pid != 0); if(pid > 0) @@ -451,6 +463,21 @@ int test(int argc, const char *argv[]) { TEST_THAT(en->GetDependsNewer() == 0); TEST_THAT(en->GetDependsOlder() == 0); + bool found = false; + + for(int tfi = 0; tfi < NUMBER_FILES; tfi++) + { + if(test_files[tfi].IDOnServer == en->GetObjectID()) + { + found = true; + test_files[tfi].CurrentSizeInBlocks = + en->GetSizeInBlocks(); + break; + } + } + + TEST_LINE(found, "Unexpected file found on server: " << + en->GetObjectID()); } } @@ -475,6 +502,19 @@ int test(int argc, const char *argv[]) test_files[f].DepOlder = older; } +#ifdef HOUSEKEEPING_IN_PROCESS + // Kill store server + TEST_THAT(KillServer(pid)); + TEST_THAT(!ServerIsAlive(pid)); + + #ifndef WIN32 + TestRemoteProcessMemLeaks("bbstored.memleaks"); + #endif +#else + // We need to leave the bbstored process running, so that we can connect to it + // and retrieve directory listings from it. +#endif // HOUSEKEEPING_IN_PROCESS + // Check the stuff on the server int deleteIndex = 0; while(true) @@ -482,18 +522,34 @@ int test(int argc, const char *argv[]) // Load up the root directory BackupStoreDirectory dir; { + // Take a lock before actually reading files from disk, + // to avoid them changing under our feet. + filesystem.TryGetLock(); + std::auto_ptr dirStream(RaidFileRead::Open(0, "backup/01234567/o01")); dir.ReadFromStream(*dirStream, SHORT_TIMEOUT); dir.Dump(0, true); + + // Find the test_files entry for the file that was just deleted: + int just_deleted = deleteIndex == 0 ? -1 : test_file_remove_order[deleteIndex - 1]; + file_info* p_just_deleted; + if(just_deleted == 0 || just_deleted == -1) + { + p_just_deleted = NULL; + } + else + { + p_just_deleted = test_files + just_deleted; + } // Check that dependency info is correct - for(unsigned int f = 0; f < NUMBER_FILES; ++f) + for(unsigned int f = 1; f < NUMBER_FILES; ++f) { //TRACE1("t f = %d\n", f); BackupStoreDirectory::Entry *en = dir.FindEntryByID(test_files[f].IDOnServer); if(en == 0) { - TEST_THAT_LINE(test_files[f].HasBeenDeleted, + TEST_LINE(test_files[f].HasBeenDeleted, "Test file " << f << " (id " << BOX_FORMAT_OBJECTID(test_files[f].IDOnServer) << ") was unexpectedly deleted by housekeeping"); @@ -501,7 +557,6 @@ int test(int argc, const char *argv[]) // object was removed by // housekeeping std::string filenameOut; - int startDisc = 0; StoreStructure::MakeObjectFilename( test_files[f].IDOnServer, storeRootDir, discSet, @@ -517,7 +572,10 @@ int test(int argc, const char *argv[]) } else { - TEST_THAT(!test_files[f].HasBeenDeleted); + TEST_LINE(!test_files[f].HasBeenDeleted, + "Test file " << f << " (id " << + BOX_FORMAT_OBJECTID(test_files[f].IDOnServer) << + ") was unexpectedly not deleted by housekeeping"); TEST_THAT(en->GetDependsNewer() == test_files[f].DepNewer); TEST_EQUAL_LINE(test_files[f].DepOlder, en->GetDependsOlder(), "Test file " << f << " (id " << @@ -528,18 +586,70 @@ int test(int argc, const char *argv[]) if(en->GetDependsNewer() == 0) { // Should be a full file - TEST_THAT(en->GetSizeInBlocks() > 40); + TEST_LINE(en->GetSizeInBlocks() > 40, + "Test file " << f << " (id " << + BOX_FORMAT_OBJECTID(test_files[f].IDOnServer) << + ") was smaller than expected: " + "wanted a full file with >40 blocks, " + "but found " << en->GetSizeInBlocks()); } else { // Should be a patch - TEST_THAT(en->GetSizeInBlocks() < 40); + TEST_LINE(en->GetSizeInBlocks() < 40, + "Test file " << f << " (id " << + BOX_FORMAT_OBJECTID(test_files[f].IDOnServer) << + ") was larger than expected: " + "wanted a patch file with <40 blocks, " + "but found " << en->GetSizeInBlocks()); } } + + // All the files that we've deleted so far should have had + // HasBeenDeleted set to true. + if(test_files[f].HasBeenDeleted) + { + TEST_LINE(en == NULL, "File " << f << " should have been " + "deleted by this point") + } + else if(en == 0) + { + TEST_FAIL_WITH_MESSAGE("File " << f << " has been unexpectedly " + "deleted, cannot check its size"); + } + // If the file that was just deleted was a patch that this file depended on, + // then it should have been merged with this file, which should have made this + // file larger. But that might not translate to a larger number of blocks. + else if(test_files[just_deleted].DepOlder == test_files[f].IDOnServer) + { + TEST_LINE(en->GetSizeInBlocks() >= test_files[f].CurrentSizeInBlocks, + "File " << f << " has been merged with an older patch, " + "so it should be larger than its previous size of " << + test_files[f].CurrentSizeInBlocks << " blocks, but it is " << + en->GetSizeInBlocks() << " blocks now"); + } + else + { + // This file should not have changed in size. + TEST_EQUAL_LINE(test_files[f].CurrentSizeInBlocks, en->GetSizeInBlocks(), + "File " << f << " unexpectedly changed size"); + } + + if(en != 0) + { + // Update test_files to record new size for next pass: + test_files[f].CurrentSizeInBlocks = en->GetSizeInBlocks(); + } } + + filesystem.ReleaseLock(); } - // Open a connection to the server (need to do this each time, otherwise housekeeping won't delete files) +#ifdef HOUSEKEEPING_IN_PROCESS + BackupProtocolLocal2 protocol(0x01234567, "test", "backup/01234567/", 0, true); +#else + // Open a connection to the server (need to do this each time, otherwise + // housekeeping won't run on Windows, and thus won't delete any files. SocketStreamTLS *pConn = new SocketStreamTLS; std::auto_ptr apConn(pConn); pConn->Open(context, Socket::TypeINET, "localhost", @@ -550,11 +660,16 @@ int test(int argc, const char *argv[]) TEST_THAT(serverVersion->GetVersion() == BACKUP_STORE_SERVER_VERSION); protocol.QueryLogin(0x01234567, 0); } +#endif - // Pull all the files down, and check that they match the files on disc + // Pull all the files down, and check that they (still) match the files + // that we uploaded earlier. for(unsigned int f = 0; f < NUMBER_FILES; ++f) { - ::printf("r=%d, f=%d\n", deleteIndex, f); + ::printf("r=%d, f=%d, id=%08llx, blocks=%d, deleted=%s\n", deleteIndex, f, + (long long)test_files[f].IDOnServer, + test_files[f].CurrentSizeInBlocks, + test_files[f].HasBeenDeleted ? "true" : "false"); // Might have been deleted if(test_files[f].HasBeenDeleted) @@ -569,19 +684,29 @@ int test(int argc, const char *argv[]) ::unlink(filename_fetched); // Fetch the file + try { std::auto_ptr getobj(protocol.QueryGetFile( BackupProtocolListDirectory::RootDirectory, test_files[f].IDOnServer)); TEST_THAT(getobj->GetObjectID() == test_files[f].IDOnServer); - // BLOCK - { - // Get stream - std::auto_ptr filestream(protocol.ReceiveStream()); - // Get and decode - BackupStoreFile::DecodeFile(*filestream, filename_fetched, SHORT_TIMEOUT); - } } + catch(ConnectionException &e) + { + TEST_FAIL_WITH_MESSAGE("Failed to get test file " << f << + " (id " << BOX_FORMAT_OBJECTID(test_files[f].IDOnServer) << + ") from server: " << e.what()); + continue; + } + + // BLOCK + { + // Get stream + std::auto_ptr filestream(protocol.ReceiveStream()); + // Get and decode + BackupStoreFile::DecodeFile(*filestream, filename_fetched, SHORT_TIMEOUT); + } + // Test for identicalness TEST_THAT(files_identical(filename_fetched, filename)); @@ -594,9 +719,13 @@ int test(int argc, const char *argv[]) } } - // Close the connection + // Close the connection protocol.QueryFinished(); + // Take a lock before modifying the directory + filesystem.GetLock(); + filesystem.GetDirectory(BackupProtocolListDirectory::RootDirectory, dir); + // Mark one of the elements as deleted if(test_file_remove_order[deleteIndex] == -1) { @@ -607,30 +736,45 @@ int test(int argc, const char *argv[]) // Modify the entry BackupStoreDirectory::Entry *pentry = dir.FindEntryByID(test_files[todel].IDOnServer); - TEST_THAT(pentry != 0); + TEST_LINE_OR(pentry != 0, "Cannot delete test file " << todel << " (id " << + BOX_FORMAT_OBJECTID(test_files[todel].IDOnServer) << "): not found on server", + break); + pentry->AddFlags(BackupStoreDirectory::Entry::Flags_RemoveASAP); - // Save it back - { - RaidFileWrite writedir(0, "backup/01234567/o01"); - writedir.Open(true /* overwrite */); - dir.WriteToStream(writedir); - writedir.Commit(true); - } + filesystem.PutDirectory(dir); - // Get the revision number of the root directory, before housekeeping makes any changes. + // Get the revision number of the root directory, before we release + // the lock (and therefore before housekeeping makes any changes). int64_t first_revision = 0; - RaidFileRead::FileExists(0, "backup/01234567/o01", &first_revision); + TEST_THAT(filesystem.ObjectExists(BackupProtocolListDirectory::RootDirectory, + &first_revision)); + filesystem.ReleaseLock(); + +#ifdef HOUSEKEEPING_IN_PROCESS + // Housekeeping wants to open both a temporary and a permanent refcount DB, + // and after committing the temporary one, it becomes the permanent one and + // not ReadOnly, and the BackupFileSystem does not allow opening another + // temporary refcount DB if the permanent one is open for writing (with good + // reason), so we need to close it here so that housekeeping can open it + // again, read-only, on the second and subsequent passes. + if(filesystem.GetCurrentRefCountDatabase() != NULL) + { + filesystem.CloseRefCountDatabase(filesystem.GetCurrentRefCountDatabase()); + } -#ifdef WIN32 + TEST_EQUAL_LINE(0, run_housekeeping(filesystem), + "Housekeeping detected errors in account"); +#else +# ifdef WIN32 // Cannot signal bbstored to do housekeeping now, and we don't need to, as we will // wait up to 32 seconds and detect automatically when it has finished. -#else +# else // Send the server a restart signal, so it does // housekeeping immediately, and wait for it to happen // Wait for old connections to terminate ::sleep(1); ::kill(pid, SIGHUP); -#endif +# endif // WIN32 // Wait for changes to be written back to the root directory. for(int secs_remaining = 32; secs_remaining >= 0; secs_remaining--) @@ -641,19 +785,42 @@ int test(int argc, const char *argv[]) ::fflush(stdout); // Early end? - if(!TestFileExists("testfiles/0_0/backup/01234567/write.lock")) + try { - int64_t revid = 0; - RaidFileRead::FileExists(0, "backup/01234567/o01", &revid); - if(revid != first_revision) + filesystem.TryGetLock(); + int64_t current_revision = 0; + TEST_THAT(filesystem.ObjectExists(BackupProtocolListDirectory::RootDirectory, + ¤t_revision)); + filesystem.ReleaseLock(); + + if(current_revision != first_revision) { + // Root directory has changed, and housekeeping is + // not running right now (as we have a lock), so it + // must have run already. break; } } + catch(BackupStoreException &e) + { + if(EXCEPTION_IS_TYPE(e, BackupStoreException, + CouldNotLockStoreAccount)) + { + // Housekeeping must still be running. Do nothing, + // hopefully after another few seconds it will have + // finished. + } + else + { + // Unexpected exception + throw; + } + } TEST_LINE(secs_remaining != 0, "No changes detected to root directory after 32 seconds"); } ::printf("\n"); +#endif // HOUSEKEEPING_IN_PROCESS // Flag for test test_files[todel].HasBeenDeleted = true; @@ -671,7 +838,10 @@ int test(int argc, const char *argv[]) } if(z < (int)NUMBER_FILES) test_files[z].DepOlder = test_files[todel].DepOlder; } - + +#ifdef HOUSEKEEPING_IN_PROCESS + // We already killed the bbstored process earlier +#else // Kill store server TEST_THAT(KillServer(pid)); TEST_THAT(!ServerIsAlive(pid)); @@ -679,6 +849,7 @@ int test(int argc, const char *argv[]) #ifndef WIN32 TestRemoteProcessMemLeaks("bbstored.memleaks"); #endif +#endif // HOUSEKEEPING_IN_PROCESS } ::free(buffer);