diff --git a/lib/backupstore/BackupCommands.cpp b/lib/backupstore/BackupCommands.cpp index 07c3a1308..0c14b33a4 100644 --- a/lib/backupstore/BackupCommands.cpp +++ b/lib/backupstore/BackupCommands.cpp @@ -67,7 +67,9 @@ std::auto_ptr BackupProtocolReplyable::HandleException(Bo } else if (e.GetType() == BackupStoreException::ExceptionType) { - if(e.GetSubType() == BackupStoreException::AddedFileDoesNotVerify) + // Slightly broken or really broken, both thrown by VerifyStream: + if(e.GetSubType() == BackupStoreException::AddedFileDoesNotVerify || + e.GetSubType() == BackupStoreException::BadBackupStoreFile) { return PROTOCOL_ERROR(Err_FileDoesNotVerify); } diff --git a/lib/backupstore/StoreTestUtils.cpp b/lib/backupstore/StoreTestUtils.cpp index 706710040..6309b2f97 100644 --- a/lib/backupstore/StoreTestUtils.cpp +++ b/lib/backupstore/StoreTestUtils.cpp @@ -76,6 +76,10 @@ void set_refcount(int64_t ObjectID, uint32_t RefCount) { ExpectedRefCounts.resize(i); } + else + { + break; + } } } @@ -295,6 +299,10 @@ bool check_reference_counts(BackupStoreRefCountDatabase& references) { bool counts_ok = true; + TEST_EQUAL_OR(ExpectedRefCounts.size(), + references.GetLastObjectIDUsed() + 1, + counts_ok = false); + for (unsigned int i = BackupProtocolListDirectory::RootDirectory; i < ExpectedRefCounts.size(); i++) { diff --git a/lib/httpserver/S3Client.cpp b/lib/httpserver/S3Client.cpp index accfb5c8f..8b663f380 100644 --- a/lib/httpserver/S3Client.cpp +++ b/lib/httpserver/S3Client.cpp @@ -443,8 +443,19 @@ HTTPResponse S3Client::SendRequest(HTTPRequest& rRequest, if (pStreamToSend) { - rRequest.SendWithStream(*mapClientSocket, mNetworkTimeout, - pStreamToSend, response); + try + { + rRequest.SendWithStream(*mapClientSocket, mNetworkTimeout, + pStreamToSend, response); + } + catch(BoxException &e) + { + // If we encounter a read error from the stream while sending, then + // the client socket is unsafe (because we have sent a request, and + // possibly some data) so we need to close it. + mapClientSocket.reset(); + throw; + } } else { diff --git a/test/backupstore/testbackupstore.cpp b/test/backupstore/testbackupstore.cpp index 7cfef7928..663026c45 100644 --- a/test/backupstore/testbackupstore.cpp +++ b/test/backupstore/testbackupstore.cpp @@ -1269,43 +1269,14 @@ bool test_server_housekeeping() TEST_THAT(change_account_limits("0B", "2000B")); TEST_THAT(run_housekeeping_and_check_account()); protocol.Reopen(); + set_refcount(store1objid, 0); TEST_THAT(check_num_files(fs, 0, 0, 0, 1)); TEST_THAT(check_num_blocks(protocol, 0, 0, 0, root_dir_blocks, root_dir_blocks)); - // Used to not consume the stream - std::auto_ptr upload(new ZeroStream(1000)); - TEST_COMMAND_RETURNS_ERROR(protocol, QueryStoreFile( - BACKUPSTORE_ROOT_DIRECTORY_ID, - 0, - 0, /* use for attr hash too */ - 99999, /* diff from ID */ - uploads[0].name, - upload), - Err_DiffFromFileDoesNotExist); - - // TODO FIXME These tests should not be here, but in - // test_server_commands. But make sure you use a network protocol, - // not a local one, when you move them. - - // Try using GetFile on a directory - { - int64_t subdirid = create_directory(protocol); - TEST_COMMAND_RETURNS_ERROR(protocol, - QueryGetFile(BACKUPSTORE_ROOT_DIRECTORY_ID, subdirid), - Err_FileDoesNotVerify); - } - - // Try retrieving an object that doesn't exist. That used to return - // BackupProtocolSuccess(NoObject) for no apparent reason. - TEST_COMMAND_RETURNS_ERROR(protocol, QueryGetObject(store1objid + 1), - Err_DoesNotExist); - // Close the protocol, so we can housekeep the account protocol.QueryFinished(); - TEST_THAT(run_housekeeping_and_check_account()); - ExpectedRefCounts.resize(3); // stop test failure in teardown_test_backupstore() TEARDOWN_TEST_BACKUPSTORE(); } @@ -1676,7 +1647,14 @@ bool test_server_commands(const std::string& specialisation_name, new BackupProtocolLocal2(rwContext, 0x01234567, false)); // !ReadOnly BackupProtocolLocal2 protocolReadOnly(roContext, 0x01234567, true); // ReadOnly - // Try using GetFile on an object ID that doesn't exist in the directory + // Try retrieving an object that doesn't exist. That used to return + // BackupProtocolSuccess(NoObject) for no apparent reason. + { + TEST_COMMAND_RETURNS_ERROR(*apProtocol, QueryGetObject(2), + Err_DoesNotExist); + } + + // Try using GetFile on an object ID that doesn't exist in the directory. { TEST_COMMAND_RETURNS_ERROR(*apProtocol, QueryGetFile(BACKUPSTORE_ROOT_DIRECTORY_ID, @@ -1684,6 +1662,43 @@ bool test_server_commands(const std::string& specialisation_name, Err_DoesNotExistInDirectory); } + // Try uploading a file that doesn't verify. + { + std::auto_ptr upload(new ZeroStream(1000)); + TEST_COMMAND_RETURNS_ERROR(*apProtocol, QueryStoreFile( + BACKUPSTORE_ROOT_DIRECTORY_ID, + 0, + 0, /* use for attr hash too */ + 0, /* diff from ID */ + uploads[0].name, + upload), + Err_FileDoesNotVerify); + } + + // TODO FIXME: in the case of S3 stores, we will have sent the request (but no data) before + // the client realises that the stream is invalid, and aborts. The S3 server will receive a + // PUT request for a zero-byte file, and have no idea that it's not a valid file, so it will + // store it. We should send a checksum (if possible) and a content-length (at least) to + // prevent this, and test that no file is stored instead of unlinking it here. + if(specialisation_name == "s3") + { + TEST_EQUAL(0, EMU_UNLINK("testfiles/store/subdir/0x2.file")); + } + + // Try uploading a file referencing another file which doesn't exist. + // This used to not consume the stream, leaving it unusable. + { + std::auto_ptr upload(new ZeroStream(1000)); + TEST_COMMAND_RETURNS_ERROR(*apProtocol, QueryStoreFile( + BACKUPSTORE_ROOT_DIRECTORY_ID, + 0, + 0, /* use for attr hash too */ + 99999, /* diff from ID */ + uploads[0].name, + upload), + Err_DiffFromFileDoesNotExist); + } + // BLOCK // TODO FIXME dedent this block. {