Skip to content

Commit

Permalink
Properly fail build on fatal IO error. Remove hardcoded IO indirection.
Browse files Browse the repository at this point in the history
  • Loading branch information
connormanning committed Feb 29, 2024
1 parent 5ca3f4c commit d6b787d
Show file tree
Hide file tree
Showing 18 changed files with 246 additions and 176 deletions.
1 change: 0 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ include(${CMAKE_DIR}/pdal.cmake)
include(${CMAKE_DIR}/system.cmake)

set(THIRD_ROOT $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/entwine/third>)
message(WARNING "INCLUDING" ${THIRD_ROOT})
include_directories(SYSTEM PUBLIC ${THIRD_ROOT})

add_subdirectory(entwine)
Expand Down
13 changes: 0 additions & 13 deletions config/cesium-truncated.json

This file was deleted.

12 changes: 0 additions & 12 deletions config/cesium.json

This file was deleted.

58 changes: 53 additions & 5 deletions entwine/builder/builder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ Builder::Builder(
bool verbose)
: endpoints(endpoints)
, metadata(metadata)
, io(Io::create(this->metadata, this->endpoints))
, manifest(manifest)
, hierarchy(hierarchy)
, verbose(verbose)
Expand All @@ -64,13 +65,34 @@ uint64_t Builder::run(
{
Pool pool(2);

std::string fatalError;

std::atomic_uint64_t counter(0);
std::atomic_bool done(false);
pool.add([&]() { monitor(progressInterval, counter, done); });
pool.add([&]() { runInserts(threads, limit, counter); done = true; });
pool.add([&]()
{
try
{
runInserts(threads, limit, counter);
done = true;
}
catch (std::exception& e)
{
fatalError = e.what();
done = true;
}
catch (...)
{
fatalError = "Fatal error: unknown error";
done = true;
}
});

pool.join();

if (fatalError.size()) throw new std::runtime_error(fatalError);

return counter;
}

Expand All @@ -91,7 +113,7 @@ void Builder::runInserts(
const uint64_t stolenThreads = threads.work - actualWorkThreads;
const uint64_t actualClipThreads = threads.clip + stolenThreads;

ChunkCache cache(endpoints, metadata, hierarchy, actualClipThreads);
ChunkCache cache(endpoints, metadata, *io, hierarchy, actualClipThreads);
Pool pool(std::min<uint64_t>(actualWorkThreads, manifest.size()));

uint64_t filesInserted = 0;
Expand All @@ -101,6 +123,8 @@ void Builder::runInserts(
origin < manifest.size() && (!limit || filesInserted < limit);
++origin)
{
if (cache.fatalErrors().size()) break;

const auto& item = manifest.at(origin);
const auto& info = item.source.info;
if (!item.inserted && info.points && active.overlaps(info.bounds))
Expand All @@ -126,6 +150,24 @@ void Builder::runInserts(
pool.join();
cache.join();

// While pool errors from *input* are not fatal and just get stored and
// logged as errors to note that an input file failed to be inserted,
// errors reading/writing from the *output* are irrecoverably fatal. In
// this case, log the error and throw, no need to save metadata since
// the build is busted.
const auto errors = cache.fatalErrors();
if (errors.size())
{
if (verbose)
{
std::cout << "Fatal error: failed to read or write output data\n";
for (const auto& e : errors) std::cout << "\t" << e << std::endl;
std::cout << "Terminating fatally corrupted build..." << std::endl;
}

throw std::runtime_error(errors.front());
}

save(getTotal(threads));
}

Expand Down Expand Up @@ -616,7 +658,12 @@ void merge(
Manifest manifest = base.manifest;

Builder builder(endpoints, metadata, manifest, Hierarchy(), verbose);
ChunkCache cache(endpoints, builder.metadata, builder.hierarchy, threads);
ChunkCache cache(
endpoints,
builder.metadata,
*builder.io,
builder.hierarchy,
threads);

if (verbose) std::cout << "Merging" << std::endl;

Expand Down Expand Up @@ -670,8 +717,9 @@ void mergeOne(Builder& dst, const Builder& src, ChunkCache& cache)
{
// TODO: Should make sure that the src/dst metadata match. For now we're
// relying on the user not to have done anything weird.
const auto& endpoints = dst.endpoints;
const auto& metadata = dst.metadata;
const auto& endpoints = dst.endpoints;
auto io = Io::create(metadata, endpoints);

Clipper clipper(cache);
const auto sharedDepth = getSharedDepth(src.metadata);
Expand Down Expand Up @@ -710,7 +758,7 @@ void mergeOne(Builder& dst, const Builder& src, ChunkCache& cache)
});

const auto stem = key.toString() + getPostfix(src.metadata);
io::read(metadata.dataType, metadata, endpoints, stem, table);
io->read(stem, table);
}
}
}
Expand Down
2 changes: 2 additions & 0 deletions entwine/builder/builder.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ struct Builder

Endpoints endpoints;
Metadata metadata;
std::unique_ptr<Io> io;

Manifest manifest;
Hierarchy hierarchy;
bool verbose = true;
Expand Down
19 changes: 16 additions & 3 deletions entwine/builder/chunk-cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,12 @@ ChunkCache::Info ChunkCache::latchInfo()
ChunkCache::ChunkCache(
const Endpoints& endpoints,
const Metadata& metadata,
const Io& io,
Hierarchy& hierarchy,
const uint64_t threads)
: m_endpoints(endpoints)
, m_metadata(metadata)
, m_io(io)
, m_hierarchy(hierarchy)
, m_pool(threads)
{ }
Expand Down Expand Up @@ -113,7 +115,7 @@ Chunk& ChunkCache::addRef(const ChunkKey& ck, Clipper& clipper)
// case, we'll need to reinitialize the resident chunk from its
// remote source. Our newly added reference will keep it from
// being erased.
ref.assign(m_metadata, ck, m_hierarchy);
ref.assign(m_metadata, m_io, ck, m_hierarchy);
assert(ref.exists());

{
Expand Down Expand Up @@ -152,7 +154,7 @@ Chunk& ChunkCache::addRef(const ChunkKey& ck, Clipper& clipper)
auto insertion = slice.emplace(
std::piecewise_construct,
std::forward_as_tuple(ck.position()),
std::forward_as_tuple(m_metadata, ck, m_hierarchy));
std::forward_as_tuple(m_metadata, m_io, ck, m_hierarchy));

{
SpinGuard lock(infoSpin);
Expand Down Expand Up @@ -265,7 +267,18 @@ void ChunkCache::maybePurge(const uint64_t maxCacheSize)
// Don't hold any locks while we do this, since it may block. We
// only want to block the calling thread in this case, not the
// whole system.
m_pool.add([this, dxyz]() { maybeSerialize(dxyz); });
m_pool.add([this, dxyz]()
{
try
{
maybeSerialize(dxyz);
}
catch (std::exception& e)
{
std::lock_guard<std::mutex> lock(m_errorsMutex);
m_errors.push_back(e.what());
}
});

ownedLock.lock();
}
Expand Down
30 changes: 26 additions & 4 deletions entwine/builder/chunk-cache.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@
#pragma once

#include <array>
#include <mutex>
#include <string>
#include <vector>

#include <entwine/builder/chunk.hpp>
#include <entwine/builder/hierarchy.hpp>
Expand All @@ -26,8 +29,12 @@ class Clipper;
class ReffedChunk
{
public:
ReffedChunk(const Metadata& m, const ChunkKey& ck, const Hierarchy& h)
: m_chunk(makeUnique<Chunk>(m, ck, h))
ReffedChunk(
const Metadata& m,
const Io& io,
const ChunkKey& ck,
const Hierarchy& h)
: m_chunk(makeUnique<Chunk>(m, io, ck, h))
{ }

SpinLock& spin() { return m_spin; }
Expand All @@ -48,10 +55,14 @@ class ReffedChunk

void reset() { m_chunk.reset(); }
bool exists() { return !!m_chunk; }
void assign(const Metadata& m, const ChunkKey& ck, const Hierarchy& h)
void assign(
const Metadata& m,
const Io& io,
const ChunkKey& ck,
const Hierarchy& h)
{
assert(!exists());
m_chunk = makeUnique<Chunk>(m, ck, h);
m_chunk = makeUnique<Chunk>(m, io, ck, h);
}

private:
Expand All @@ -66,6 +77,7 @@ class ChunkCache
ChunkCache(
const Endpoints& endpoints,
const Metadata& Metadata,
const Io& io,
Hierarchy& hierarchy,
uint64_t threads);

Expand All @@ -85,6 +97,12 @@ class ChunkCache

static Info latchInfo();

std::vector<std::string> fatalErrors() const
{
std::lock_guard<std::mutex> lock(m_errorsMutex);
return m_errors;
}

private:
Chunk& addRef(const ChunkKey& ck, Clipper& clipper);
void maybeSerialize(const Dxyz& dxyz);
Expand All @@ -93,13 +111,17 @@ class ChunkCache

const Endpoints& m_endpoints;
const Metadata& m_metadata;
const Io& m_io;
Hierarchy& m_hierarchy;
Pool m_pool;
const uint64_t m_cacheSize = 64;

std::array<SpinLock, maxDepth> m_spins;
std::array<std::map<Xyz, ReffedChunk>, maxDepth> m_slices;

mutable std::mutex m_errorsMutex;
std::vector<std::string> m_errors;

SpinLock m_ownedSpin;
std::set<Dxyz> m_owned;
};
Expand Down
17 changes: 8 additions & 9 deletions entwine/builder/chunk.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,13 @@
namespace entwine
{

Chunk::Chunk(const Metadata& m, const ChunkKey& ck, const Hierarchy& hierarchy)
Chunk::Chunk(
const Metadata& m,
const Io& io,
const ChunkKey& ck,
const Hierarchy& hierarchy)
: m_metadata(m)
, m_io(io)
, m_span(m_metadata.span)
, m_pointSize(getPointSize(m_metadata.absoluteSchema))
, m_chunkKey(ck)
Expand Down Expand Up @@ -173,13 +178,7 @@ uint64_t Chunk::save(const Endpoints& endpoints) const
const auto filename =
m_chunkKey.toString() + getPostfix(m_metadata, m_chunkKey.depth());

io::write(
m_metadata.dataType,
m_metadata,
endpoints,
filename,
table,
m_chunkKey.bounds());
m_io.write(filename, table, m_chunkKey.bounds());

return np;
}
Expand Down Expand Up @@ -208,7 +207,7 @@ void Chunk::load(
const auto filename =
m_chunkKey.toString() + getPostfix(m_metadata, m_chunkKey.depth());

io::read(m_metadata.dataType, m_metadata, endpoints, filename, table);
m_io.read(filename, table);
}

} // namespace entwine
8 changes: 7 additions & 1 deletion entwine/builder/chunk.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include <entwine/builder/hierarchy.hpp>
#include <entwine/builder/overflow.hpp>
#include <entwine/io/io.hpp>
#include <entwine/third/arbiter/arbiter.hpp>
#include <entwine/types/endpoints.hpp>
#include <entwine/types/vector-point-table.hpp>
Expand All @@ -36,7 +37,11 @@ struct VoxelTube
class Chunk
{
public:
Chunk(const Metadata& m, const ChunkKey& ck, const Hierarchy& hierarchy);
Chunk(
const Metadata& m,
const Io& io,
const ChunkKey& ck,
const Hierarchy& hierarchy);

bool insert(ChunkCache& cache, Clipper& clipper, Voxel& voxel, Key& key);
uint64_t save(const Endpoints& endpoints) const;
Expand Down Expand Up @@ -65,6 +70,7 @@ class Chunk
void doOverflow(ChunkCache& cache, Clipper& clipper, uint64_t dir);

const Metadata& m_metadata;
const Io& m_io;
const uint64_t m_span;
const uint64_t m_pointSize;
const ChunkKey m_chunkKey;
Expand Down

0 comments on commit d6b787d

Please sign in to comment.