Skip to content

Detached199 #2

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 28 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
3ef1e2f
refactor: rename leftover WriteBlockBench
l0rinc Mar 14, 2025
1155549
refactor: collect block read operations into try block
l0rinc Mar 14, 2025
a37dcd5
refactor: shorten BLOCK_SERIALIZATION_HEADER_SIZE
l0rinc Mar 14, 2025
86260bc
log: unify error messages for (read/write)[undo]block
l0rinc Jan 24, 2025
1076917
refactor: replace raw values in ReadRawBlock with BLOCK_HEADER_BYTES
l0rinc Mar 26, 2025
0de00d5
optimization: Bulk serialization reads in `UndoRead`, `ReadBlock`
l0rinc Mar 26, 2025
fe6b0e4
optimization: Bulk serialization writes in `WriteBlockUndo` and `Writ…
l0rinc Mar 26, 2025
2a5ae3c
timeout-minutes: 200
l0rinc Apr 4, 2025
9eb96cc
not --quiet
l0rinc Apr 5, 2025
a033f27
1 << 20
l0rinc Apr 4, 2025
58c00b7
remove most tests to pinpoint the failure
l0rinc Apr 4, 2025
53184de
Revert "remove most tests to pinpoint the failure"
l0rinc Apr 5, 2025
debba98
1<<17
l0rinc Apr 5, 2025
b8989c0
no --ci, --jobs 1
l0rinc Apr 6, 2025
b96ab39
1<<18
l0rinc Apr 6, 2025
d8ff54f
1<<19
l0rinc Apr 6, 2025
3a51e8e
1<<20
l0rinc Apr 6, 2025
f5fefe1
no --jobs 1
l0rinc Apr 6, 2025
1f6a9f3
`1<<20` with `--ci "${MAKEJOBS}"` and parallelism
l0rinc Apr 6, 2025
7f69d9b
1 << 19
l0rinc Apr 8, 2025
7b61be8
1 << 18
l0rinc Apr 8, 2025
9636baa
1 << 17
l0rinc Apr 8, 2025
6b923e1
1 << 16
l0rinc Apr 8, 2025
db75f48
1<<20, reduce test count
l0rinc Apr 9, 2025
d17e392
skip more tests
l0rinc Apr 9, 2025
2789009
skip more test_runner tests
l0rinc Apr 9, 2025
756bdd8
Revert "skip more test_runner tests"
l0rinc Apr 9, 2025
2e399fc
skip fewer test_runner tests
l0rinc Apr 9, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
350 changes: 1 addition & 349 deletions .github/workflows/ci.yml

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion ci/test/03_test_script.sh
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ fi
if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then
# parses TEST_RUNNER_EXTRA as an array which allows for multiple arguments such as TEST_RUNNER_EXTRA='--exclude "rpc_bind.py --ipv6"'
eval "TEST_RUNNER_EXTRA=($TEST_RUNNER_EXTRA)"
LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" test/functional/test_runner.py --ci "${MAKEJOBS}" --tmpdirprefix "${BASE_SCRATCH_DIR}"/test_runner/ --ansi --combinedlogslen=99999999 --timeout-factor="${TEST_RUNNER_TIMEOUT_FACTOR}" "${TEST_RUNNER_EXTRA[@]}" --quiet --failfast
LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" test/functional/test_runner.py --ci "${MAKEJOBS}" --tmpdirprefix "${BASE_SCRATCH_DIR}"/test_runner/ --ansi --combinedlogslen=99999999 --timeout-factor="${TEST_RUNNER_TIMEOUT_FACTOR}" "${TEST_RUNNER_EXTRA[@]}" --failfast
fi

if [ "${RUN_TIDY}" = "true" ]; then
Expand Down
4 changes: 2 additions & 2 deletions src/bench/readwriteblock.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ static CBlock CreateTestBlock()
return block;
}

static void SaveBlockBench(benchmark::Bench& bench)
static void WriteBlockBench(benchmark::Bench& bench)
{
const auto testing_setup{MakeNoLogFileContext<const TestingSetup>(ChainType::MAIN)};
auto& blockman{testing_setup->m_node.chainman->m_blockman};
Expand Down Expand Up @@ -63,6 +63,6 @@ static void ReadRawBlockBench(benchmark::Bench& bench)
});
}

BENCHMARK(SaveBlockBench, benchmark::PriorityLevel::HIGH);
BENCHMARK(WriteBlockBench, benchmark::PriorityLevel::HIGH);
BENCHMARK(ReadBlockBench, benchmark::PriorityLevel::HIGH);
BENCHMARK(ReadRawBlockBench, benchmark::PriorityLevel::HIGH);
104 changes: 53 additions & 51 deletions src/node/blockstorage.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -660,27 +660,30 @@ bool BlockManager::ReadBlockUndo(CBlockUndo& blockundo, const CBlockIndex& index
const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};

// Open history file to read
AutoFile filein{OpenUndoFile(pos, true)};
if (filein.IsNull()) {
LogError("OpenUndoFile failed for %s", pos.ToString());
AutoFile file{OpenUndoFile(pos, true)};
if (file.IsNull()) {
LogError("OpenUndoFile failed for %s while reading", pos.ToString());
return false;
}
BufferedReader filein{std::move(file)};

// Read block
uint256 hashChecksum;
HashVerifier verifier{filein}; // Use HashVerifier as reserializing may lose data, c.f. commit d342424301013ec47dc146a4beb49d5c9319d80a
try {
// Read block
HashVerifier verifier{filein}; // Use HashVerifier, as reserializing may lose data, c.f. commit d3424243

verifier << index.pprev->GetBlockHash();
verifier >> blockundo;

uint256 hashChecksum;
filein >> hashChecksum;
} catch (const std::exception& e) {
LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
return false;
}

// Verify checksum
if (hashChecksum != verifier.GetHash()) {
LogError("%s: Checksum mismatch at %s\n", __func__, pos.ToString());
// Verify checksum
if (hashChecksum != verifier.GetHash()) {
LogError("%s: Checksum mismatch at %s", __func__, pos.ToString());
return false;
}
} catch (const std::exception& e) {
LogError("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString());
return false;
}

Expand Down Expand Up @@ -933,27 +936,27 @@ bool BlockManager::WriteBlockUndo(const CBlockUndo& blockundo, BlockValidationSt
FlatFilePos pos;
const unsigned int blockundo_size{static_cast<unsigned int>(GetSerializeSize(blockundo))};
if (!FindUndoPos(state, block.nFile, pos, blockundo_size + UNDO_DATA_DISK_OVERHEAD)) {
LogError("FindUndoPos failed");
LogError("FindUndoPos failed for %s while writing", pos.ToString());
return false;
}
// Open history file to append
AutoFile fileout{OpenUndoFile(pos)};
if (fileout.IsNull()) {
LogError("OpenUndoFile failed");
AutoFile file{OpenUndoFile(pos)};
if (file.IsNull()) {
LogError("OpenUndoFile failed for %s while writing", pos.ToString());
return FatalError(m_opts.notifications, state, _("Failed to write undo data."));
}
BufferedWriter fileout{file};

// Write index header
fileout << GetParams().MessageStart() << blockundo_size;
// Write undo data
pos.nPos += BLOCK_SERIALIZATION_HEADER_SIZE;
fileout << blockundo;

// Calculate & write checksum
HashWriter hasher{};
hasher << block.pprev->GetBlockHash();
hasher << blockundo;
fileout << hasher.GetHash();
pos.nPos += BLOCK_HEADER_BYTES;
{
// Calculate checksum
HashWriter hasher{};
hasher << block.pprev->GetBlockHash() << blockundo;
// Write undo data & checksum
fileout << blockundo << hasher.GetHash();
}

// rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
// we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
Expand Down Expand Up @@ -986,29 +989,30 @@ bool BlockManager::ReadBlock(CBlock& block, const FlatFilePos& pos) const
block.SetNull();

// Open history file to read
AutoFile filein{OpenBlockFile(pos, true)};
if (filein.IsNull()) {
LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
AutoFile file{OpenBlockFile(pos, true)};
if (file.IsNull()) {
LogError("%s: OpenBlockFile failed for %s", __func__, pos.ToString());
return false;
}
BufferedReader filein{std::move(file)};

// Read block
try {
// Read block
filein >> TX_WITH_WITNESS(block);
} catch (const std::exception& e) {
LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
LogError("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString());
return false;
}

// Check the header
if (!CheckProofOfWork(block.GetHash(), block.nBits, GetConsensus())) {
LogError("%s: Errors in block header at %s\n", __func__, pos.ToString());
LogError("%s: Errors in block header at %s", __func__, pos.ToString());
return false;
}

// Signet only: check block solution
if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) {
LogError("%s: Errors in block solution at %s\n", __func__, pos.ToString());
LogError("%s: Errors in block solution at %s", __func__, pos.ToString());
return false;
}

Expand All @@ -1023,25 +1027,22 @@ bool BlockManager::ReadBlock(CBlock& block, const CBlockIndex& index) const
return false;
}
if (block.GetHash() != index.GetBlockHash()) {
LogError("%s: GetHash() doesn't match index for %s at %s\n", __func__, index.ToString(), block_pos.ToString());
LogError("%s: GetHash() doesn't match index for %s at %s", __func__, index.ToString(), block_pos.ToString());
return false;
}
return true;
}

bool BlockManager::ReadRawBlock(std::vector<uint8_t>& block, const FlatFilePos& pos) const
{
FlatFilePos hpos = pos;
// If nPos is less than 8 the pos is null and we don't have the block data
// Return early to prevent undefined behavior of unsigned int underflow
if (hpos.nPos < 8) {
LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
if (pos.nPos < BLOCK_HEADER_BYTES) {
// Return early to prevent undefined behavior of unsigned int underflow
LogError("%s failed for %s", __func__, pos.ToString());
return false;
}
hpos.nPos -= 8; // Seek back 8 bytes for meta header
AutoFile filein{OpenBlockFile(hpos, true)};
AutoFile filein{OpenBlockFile({pos.nFile, static_cast<unsigned int>(pos.nPos - BLOCK_HEADER_BYTES)}, true)};
if (filein.IsNull()) {
LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
LogError("%s: OpenBlockFile failed for %s", __func__, pos.ToString());
return false;
}

Expand All @@ -1052,22 +1053,22 @@ bool BlockManager::ReadRawBlock(std::vector<uint8_t>& block, const FlatFilePos&
filein >> blk_start >> blk_size;

if (blk_start != GetParams().MessageStart()) {
LogError("%s: Block magic mismatch for %s: %s versus expected %s\n", __func__, pos.ToString(),
LogError("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(),
HexStr(blk_start),
HexStr(GetParams().MessageStart()));
return false;
}

if (blk_size > MAX_SIZE) {
LogError("%s: Block data is larger than maximum deserialization size for %s: %s versus %s\n", __func__, pos.ToString(),
LogError("%s: Block data is larger than maximum deserialization size for %s: %s versus %s", __func__, pos.ToString(),
blk_size, MAX_SIZE);
return false;
}

block.resize(blk_size); // Zeroing of memory is intentional here
filein.read(MakeWritableByteSpan(block));
} catch (const std::exception& e) {
LogError("%s: Read from block file failed: %s for %s\n", __func__, e.what(), pos.ToString());
LogError("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString());
return false;
}

Expand All @@ -1077,22 +1078,23 @@ bool BlockManager::ReadRawBlock(std::vector<uint8_t>& block, const FlatFilePos&
FlatFilePos BlockManager::WriteBlock(const CBlock& block, int nHeight)
{
const unsigned int block_size{static_cast<unsigned int>(GetSerializeSize(TX_WITH_WITNESS(block)))};
FlatFilePos pos{FindNextBlockPos(block_size + BLOCK_SERIALIZATION_HEADER_SIZE, nHeight, block.GetBlockTime())};
FlatFilePos pos{FindNextBlockPos(block_size + BLOCK_HEADER_BYTES, nHeight, block.GetBlockTime())};
if (pos.IsNull()) {
LogError("FindNextBlockPos failed");
LogError("FindNextBlockPos failed for %s while writing", pos.ToString());
return FlatFilePos();
}
AutoFile fileout{OpenBlockFile(pos)};
if (fileout.IsNull()) {
LogError("OpenBlockFile failed");
AutoFile file{OpenBlockFile(pos)};
if (file.IsNull()) {
LogError("OpenBlockFile failed for %s while writing", pos.ToString());
m_opts.notifications.fatalError(_("Failed to write block."));
return FlatFilePos();
}
BufferedWriter fileout{file};

// Write index header
fileout << GetParams().MessageStart() << block_size;
pos.nPos += BLOCK_HEADER_BYTES;
// Write block
pos.nPos += BLOCK_SERIALIZATION_HEADER_SIZE;
fileout << TX_WITH_WITNESS(block);
return pos;
}
Expand Down
6 changes: 3 additions & 3 deletions src/node/blockstorage.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,10 @@ static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB
static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB

/** Size of header written by WriteBlock before a serialized CBlock (8 bytes) */
static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE{std::tuple_size_v<MessageStartChars> + sizeof(unsigned int)};
static constexpr size_t BLOCK_HEADER_BYTES{std::tuple_size_v<MessageStartChars> + sizeof(unsigned int)};

/** Total overhead when writing undo data: header (8 bytes) plus checksum (32 bytes) */
static constexpr size_t UNDO_DATA_DISK_OVERHEAD{BLOCK_SERIALIZATION_HEADER_SIZE + uint256::size()};
static constexpr size_t UNDO_DATA_DISK_OVERHEAD{BLOCK_HEADER_BYTES + uint256::size()};

// Because validation code takes pointers to the map's CBlockIndex objects, if
// we ever switch to another associative container, we need to either use a
Expand Down Expand Up @@ -164,7 +164,7 @@ class BlockManager
* blockfile info, and checks if there is enough disk space to save the block.
*
* The nAddSize argument passed to this function should include not just the size of the serialized CBlock, but also the size of
* separator fields (BLOCK_SERIALIZATION_HEADER_SIZE).
* separator fields (BLOCK_HEADER_BYTES).
*/
[[nodiscard]] FlatFilePos FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime);
[[nodiscard]] bool FlushChainstateBlockFile(int tip_height);
Expand Down
18 changes: 12 additions & 6 deletions src/streams.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,17 +91,23 @@ void AutoFile::write(std::span<const std::byte> src)
std::array<std::byte, 4096> buf;
while (src.size() > 0) {
auto buf_now{std::span{buf}.first(std::min<size_t>(src.size(), buf.size()))};
std::copy(src.begin(), src.begin() + buf_now.size(), buf_now.begin());
util::Xor(buf_now, m_xor, *m_position);
if (std::fwrite(buf_now.data(), 1, buf_now.size(), m_file) != buf_now.size()) {
throw std::ios_base::failure{"XorFile::write: failed"};
}
std::copy_n(src.begin(), buf_now.size(), buf_now.begin());
write_buffer(buf_now);
src = src.subspan(buf_now.size());
*m_position += buf_now.size();
}
}
}

void AutoFile::write_buffer(std::span<std::byte> src)
{
if (!m_file) throw std::ios_base::failure("AutoFile::write_buffer: file handle is nullptr");
util::Xor(src, m_xor, *m_position); // obfuscate in-place
if (std::fwrite(src.data(), 1, src.size(), m_file) != src.size()) {
throw std::ios_base::failure("AutoFile::write_buffer: write failed");
}
if (m_position) *m_position += src.size();
}

bool AutoFile::Commit()
{
return ::FileCommit(m_file);
Expand Down
89 changes: 88 additions & 1 deletion src/streams.h
Original file line number Diff line number Diff line change
Expand Up @@ -465,8 +465,13 @@ class AutoFile
::Unserialize(*this, obj);
return *this;
}

//! Write a mutable buffer more efficiently than write(), obfuscating the buffer in-place.
void write_buffer(std::span<std::byte> src);
};

using BufferData = std::vector<std::byte>;

/** Wrapper around an AutoFile& that implements a ring buffer to
* deserialize from. It guarantees the ability to rewind a given number of bytes.
*
Expand All @@ -481,7 +486,7 @@ class BufferedFile
uint64_t m_read_pos{0}; //!< how many bytes have been read from this
uint64_t nReadLimit; //!< up to which position we're allowed to read
uint64_t nRewind; //!< how many bytes we guarantee to rewind
std::vector<std::byte> vchBuf; //!< the buffer
BufferData vchBuf;

//! read data from the source to fill the buffer
bool Fill() {
Expand Down Expand Up @@ -614,4 +619,86 @@ class BufferedFile
}
};

/**
* Wrapper that buffers reads from an underlying stream.
* Requires underlying stream to support read() and detail_fread() calls
* to support fixed-size and variable-sized reads, respectively.
*/
template <typename S>
class BufferedReader
{
S& m_src;
BufferData m_buf;
size_t m_buf_pos;

public:
explicit BufferedReader(S&& stream, size_t size = 1 << 16)
requires std::is_rvalue_reference_v<S&&>
: m_src{stream}, m_buf(size), m_buf_pos{size} {}

void read(std::span<std::byte> dst)
{
if (const auto available{std::min(dst.size(), m_buf.size() - m_buf_pos)}) {
std::copy_n(m_buf.begin() + m_buf_pos, available, dst.begin());
m_buf_pos += available;
dst = dst.subspan(available);
}
if (dst.size()) {
assert(m_buf_pos == m_buf.size());
m_src.read(dst);

m_buf_pos = 0;
m_buf.resize(m_src.detail_fread(m_buf));
}
}

template <typename T>
BufferedReader& operator>>(T&& obj)
{
Unserialize(*this, obj);
return *this;
}
};

/**
* Wrapper that buffers writes to an underlying stream.
* Requires underlying stream to support write_buffer() method
* for efficient buffer flushing and obfuscation.
*/
template <typename S>
class BufferedWriter
{
S& m_dst;
BufferData m_buf;
size_t m_buf_pos{0};

public:
explicit BufferedWriter(S& stream, size_t size = 1 << 20) : m_dst{stream}, m_buf(size) {}

~BufferedWriter() { flush(); }

void flush()
{
if (m_buf_pos) m_dst.write_buffer(std::span{m_buf}.first(m_buf_pos));
m_buf_pos = 0;
}

void write(std::span<const std::byte> src)
{
while (const auto available{std::min(src.size(), m_buf.size() - m_buf_pos)}) {
std::copy_n(src.begin(), available, m_buf.begin() + m_buf_pos);
m_buf_pos += available;
if (m_buf_pos == m_buf.size()) flush();
src = src.subspan(available);
}
}

template <typename T>
BufferedWriter& operator<<(const T& obj)
{
Serialize(*this, obj);
return *this;
}
};

#endif // BITCOIN_STREAMS_H
Loading
Loading