Skip to content

Commit

Permalink
Remove TokenServer in preparation of making NAR copying O(1) memory
Browse files Browse the repository at this point in the history
  • Loading branch information
edolstra committed Jul 27, 2020
1 parent e5f6fc2 commit cbcf635
Show file tree
Hide file tree
Showing 6 changed files with 1 addition and 146 deletions.
2 changes: 1 addition & 1 deletion src/hydra-queue-runner/Makefile.am
Expand Up @@ -2,6 +2,6 @@ bin_PROGRAMS = hydra-queue-runner

hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \
builder.cc build-result.cc build-remote.cc \
build-result.hh counter.hh token-server.hh state.hh db.hh
build-result.hh counter.hh state.hh db.hh
hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx
hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
20 changes: 0 additions & 20 deletions src/hydra-queue-runner/build-remote.cc
Expand Up @@ -426,31 +426,11 @@ void State::buildRemote(ref<Store> destStore,
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);

/* Block until we have the required amount of memory
available, which is twice the NAR size (namely the
uncompressed and worst-case compressed NAR), plus 150
MB for xz compression overhead. (The xz manpage claims
~94 MiB, but that's not was I'm seeing.) */
auto resStart = std::chrono::steady_clock::now();
size_t compressionCost = totalNarSize + 150 * 1024 * 1024;
result.tokens = std::make_unique<nix::TokenServer::Token>(memoryTokens.get(totalNarSize + compressionCost));
auto resStop = std::chrono::steady_clock::now();

auto resMs = std::chrono::duration_cast<std::chrono::milliseconds>(resStop - resStart).count();
if (resMs >= 1000)
printMsg(lvlError, "warning: had to wait %d ms for %d memory tokens for %s",
resMs, totalNarSize, localStore->printStorePath(step->drvPath));

to << cmdExportPaths << 0;
writeStorePaths(*localStore, to, outputs);
to.flush();
destStore->importPaths(from, /* result.accessor, */ NoCheckSigs);

/* Release the tokens pertaining to NAR
compression. After this we only have the uncompressed
NAR in memory. */
result.tokens->give_back(compressionCost);

auto now2 = std::chrono::steady_clock::now();

result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
Expand Down
3 changes: 0 additions & 3 deletions src/hydra-queue-runner/builder.cc
Expand Up @@ -204,8 +204,6 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
try {
/* FIXME: referring builds may have conflicting timeouts. */
buildRemote(destStore, machine, step, maxSilentTime, buildTimeout, repeats, result, activeStep, updateStep);
} catch (NoTokens & e) {
result.stepStatus = bsNarSizeLimitExceeded;
} catch (Error & e) {
if (activeStep->state_.lock()->cancelled) {
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
Expand All @@ -224,7 +222,6 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
}

result.accessor = 0;
result.tokens = 0;
}

time_t stepStopTime = time(0);
Expand Down
4 changes: 0 additions & 4 deletions src/hydra-queue-runner/hydra-queue-runner.cc
Expand Up @@ -49,14 +49,11 @@ State::State()
: config(std::make_unique<HydraConfig>())
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
, dbPool(config->getIntOption("max_db_connections", 128))
, memoryTokens(config->getIntOption("nar_buffer_size", getMemSize() / 2))
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
, rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME"))))
{
debug("using %d bytes for the NAR buffer", memoryTokens.capacity());

hydraData = getEnvOrDie("HYDRA_DATA");

logDir = canonPath(hydraData + "/build-logs");
Expand Down Expand Up @@ -544,7 +541,6 @@ void State::dumpStatus(Connection & conn)
root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups);
root.attr("nrDbConnections", dbPool.count());
root.attr("nrActiveDbUpdates", nrActiveDbUpdates);
root.attr("memoryTokensInUse", memoryTokens.currentUse());

{
auto nested = root.object("machines");
Expand Down
9 changes: 0 additions & 9 deletions src/hydra-queue-runner/state.hh
Expand Up @@ -8,7 +8,6 @@
#include <queue>

#include "db.hh"
#include "token-server.hh"

#include "parsed-derivations.hh"
#include "pathlocks.hh"
Expand Down Expand Up @@ -65,7 +64,6 @@ struct RemoteResult
time_t startTime = 0, stopTime = 0;
unsigned int overhead = 0;
nix::Path logFile;
std::unique_ptr<nix::TokenServer::Token> tokens;
std::shared_ptr<nix::FSAccessor> accessor;

BuildStatus buildStatus() const
Expand Down Expand Up @@ -410,13 +408,6 @@ private:
std::shared_ptr<nix::Store> localStore;
std::shared_ptr<nix::Store> _destStore;

/* Token server to prevent threads from allocating too many big
strings concurrently while importing NARs from the build
machines. When a thread imports a NAR of size N, it will first
acquire N memory tokens, causing it to block until that many
tokens are available. */
nix::TokenServer memoryTokens;

size_t maxOutputSize;
size_t maxLogSize;

Expand Down
109 changes: 0 additions & 109 deletions src/hydra-queue-runner/token-server.hh

This file was deleted.

0 comments on commit cbcf635

Please sign in to comment.