From 729280432f9ef5cefd68933df303c201e411bbf0 Mon Sep 17 00:00:00 2001 From: be-marc Date: Fri, 31 Oct 2025 09:03:06 +0100 Subject: [PATCH 1/5] feat: add hyperqueue cluster functions --- .Rbuildignore | 1 + .lintr | 19 +++++ DESCRIPTION | 2 +- NAMESPACE | 1 + R/clusterFunctionsHyperQueue.R | 80 +++++++++++++++++++ man/makeClusterFunctions.Rd | 1 + man/makeClusterFunctionsDocker.Rd | 1 + man/makeClusterFunctionsHyperQueue.Rd | 45 +++++++++++ man/makeClusterFunctionsInteractive.Rd | 1 + man/makeClusterFunctionsLSF.Rd | 1 + man/makeClusterFunctionsMulticore.Rd | 1 + man/makeClusterFunctionsOpenLava.Rd | 1 + man/makeClusterFunctionsSGE.Rd | 1 + man/makeClusterFunctionsSSH.Rd | 1 + man/makeClusterFunctionsSlurm.Rd | 1 + man/makeClusterFunctionsSocket.Rd | 1 + man/makeClusterFunctionsTORQUE.Rd | 1 + .../testthat/test_ClusterFunctionHyperQueue.R | 34 ++++++++ 18 files changed, 192 insertions(+), 1 deletion(-) create mode 100644 .lintr create mode 100644 R/clusterFunctionsHyperQueue.R create mode 100644 man/makeClusterFunctionsHyperQueue.Rd create mode 100644 tests/testthat/test_ClusterFunctionHyperQueue.R diff --git a/.Rbuildignore b/.Rbuildignore index 7ab339c5..6b2327f3 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -12,3 +12,4 @@ ^_pkgdown\.yml$ ^README.RMD$ ^.github$ +registry/ diff --git a/.lintr b/.lintr new file mode 100644 index 00000000..b3e9a225 --- /dev/null +++ b/.lintr @@ -0,0 +1,19 @@ +linters: linters_with_defaults( + # lintr defaults: https://lintr.r-lib.org/reference/default_linters.html + # the following setup changes/removes certain linters + assignment_linter = NULL, # do not force using <- for assignments + object_name_linter(c("snake_case", "CamelCase")), # only allow snake case and camel case object names + commented_code_linter = NULL, # allow code in comments + line_length_linter(200L), + object_length_linter(40L), + undesirable_function_linter(fun = c( + # base messaging + cat = "use catf()", + stop = "use stopf()", + warning = "use warningf()", + message = "use messagef()", + # perf + ifelse = "use fifelse()", + rank = "use frank()" + )) + ) diff --git a/DESCRIPTION b/DESCRIPTION index 63043bd7..05b5253b 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -66,4 +66,4 @@ ByteCompile: yes Encoding: UTF-8 NeedsCompilation: yes Roxygen: list(r6 = FALSE) -RoxygenNote: 7.3.2 +RoxygenNote: 7.3.3 diff --git a/NAMESPACE b/NAMESPACE index e77c7b0a..0d4f87da 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -83,6 +83,7 @@ export(loadResult) export(lpt) export(makeClusterFunctions) export(makeClusterFunctionsDocker) +export(makeClusterFunctionsHyperQueue) export(makeClusterFunctionsInteractive) export(makeClusterFunctionsLSF) export(makeClusterFunctionsMulticore) diff --git a/R/clusterFunctionsHyperQueue.R b/R/clusterFunctionsHyperQueue.R new file mode 100644 index 00000000..2ad54047 --- /dev/null +++ b/R/clusterFunctionsHyperQueue.R @@ -0,0 +1,80 @@ +#' @title ClusterFunctions for HyperQueue +#' +#' @description +#' Cluster functions for HyperQueue (\url{https://it4innovations.github.io/hyperqueue/stable/}). +#' +#' Jobs are submitted via the HyperQueue CLI using \code{hq submit} and executed by calling \code{Rscript -e "batchtools::doJobCollection(...)"}. +#' The job name is set to the job hash and logs are handled internally by batchtools. +#' Listing jobs uses \code{hq job list} and cancelling jobs uses \code{hq job cancel}. +#' A running HyperQueue server and workers are required. +#' +#' @inheritParams makeClusterFunctions +#' @return [ClusterFunctions]. +#' @family ClusterFunctions +#' @export +makeClusterFunctionsHyperQueue = function(scheduler.latency = 1, fs.latency = 65) { + submitJob = function(reg, jc) { + assertRegistry(reg, writeable = TRUE) + assertClass(jc, "JobCollection") + + args = c( + "submit", + sprintf("--name=%s", jc$job.hash), + "--stdout=none", + "--stderr=none", + "--", + "Rscript", "-e", + shQuote(sprintf("batchtools::doJobCollection('%s', '%s')", jc$uri, jc$log.file)) + ) + res = runOSCommand("hq", args) + if (res$exit.code > 0L) { + return(cfHandleUnknownSubmitError("hq", res$exit.code, res$output)) + } + batch_ids = sub(".*job ID: ([0-9]+).*", "\\1", res$output) + makeSubmitJobResult(status = 0L, batch.id = batch_ids) + } + + killJob = function(reg, batch.id) { + assertRegistry(reg, writeable = TRUE) + assertString(batch.id) + args = c("job", "cancel", batch.id) + res = runOSCommand("hq", args) + if (res$exit.code > 0L) { + OSError("Killing of job failed", res) + } + makeSubmitJobResult(status = 0L, batch.id = batch.id) + } + + + listJobsQueued = function(reg) { + assertRegistry(reg, writeable = FALSE) + args = c("job", "list", "--filter", "waiting", "--output-mode", "json") + res = runOSCommand("hq", args) + if (res$exit.code > 0L) { + OSError("Listing of jobs failed", res) + } + jobs = jsonlite::fromJSON(res$output) + as.character(jobs$id) + } + + listJobsRunning = function(reg) { + assertRegistry(reg, writeable = FALSE) + args = c("job", "list", "--filter", "running", "--output-mode", "json") + res = runOSCommand("hq", args) + if (res$exit.code > 0L) { + OSError("Listing of jobs failed", res) + } + jobs = jsonlite::fromJSON(res$output) + as.character(jobs$id) + } + + makeClusterFunctions( + name = "HyperQueue", + submitJob = submitJob, + killJob = killJob, + listJobsRunning = listJobsRunning, + listJobsQueued = listJobsQueued, + store.job.collection = TRUE, + scheduler.latency = scheduler.latency, + fs.latency = fs.latency) +} diff --git a/man/makeClusterFunctions.Rd b/man/makeClusterFunctions.Rd index 5df5dff2..4fd9334a 100644 --- a/man/makeClusterFunctions.Rd +++ b/man/makeClusterFunctions.Rd @@ -83,6 +83,7 @@ with the package. \seealso{ Other ClusterFunctions: \code{\link{makeClusterFunctionsDocker}()}, +\code{\link{makeClusterFunctionsHyperQueue}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, diff --git a/man/makeClusterFunctionsDocker.Rd b/man/makeClusterFunctionsDocker.Rd index 35e70811..a46367eb 100644 --- a/man/makeClusterFunctionsDocker.Rd +++ b/man/makeClusterFunctionsDocker.Rd @@ -61,6 +61,7 @@ containers manually (or usa a cron job). \seealso{ Other ClusterFunctions: \code{\link{makeClusterFunctions}()}, +\code{\link{makeClusterFunctionsHyperQueue}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, diff --git a/man/makeClusterFunctionsHyperQueue.Rd b/man/makeClusterFunctionsHyperQueue.Rd new file mode 100644 index 00000000..c9dc83cf --- /dev/null +++ b/man/makeClusterFunctionsHyperQueue.Rd @@ -0,0 +1,45 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusterFunctionsHyperQueue.R +\name{makeClusterFunctionsHyperQueue} +\alias{makeClusterFunctionsHyperQueue} +\title{ClusterFunctions for HyperQueue} +\usage{ +makeClusterFunctionsHyperQueue(scheduler.latency = 1, fs.latency = 65) +} +\arguments{ +\item{scheduler.latency}{[\code{numeric(1)}]\cr +Time to sleep after important interactions with the scheduler to ensure a sane state. +Currently only triggered after calling \code{\link{submitJobs}}.} + +\item{fs.latency}{[\code{numeric(1)}]\cr +Expected maximum latency of the file system, in seconds. +Set to a positive number for network file systems like NFS which enables more robust (but also more expensive) mechanisms to +access files and directories. +Usually safe to set to \code{0} to disable the heuristic, e.g. if you are working on a local file system.} +} +\value{ +[ClusterFunctions]. +} +\description{ +Cluster functions for HyperQueue (\url{https://it4innovations.github.io/hyperqueue/stable/}). + +Jobs are submitted via the HyperQueue CLI using \code{hq submit} and executed by calling \code{Rscript -e "batchtools::doJobCollection(...)"}. +The job name is set to the job hash and logs are handled internally by batchtools. +Listing jobs uses \code{hq job list} and cancelling jobs uses \code{hq job cancel}. +A running HyperQueue server and workers are required. +} +\seealso{ +Other ClusterFunctions: +\code{\link{makeClusterFunctions}()}, +\code{\link{makeClusterFunctionsDocker}()}, +\code{\link{makeClusterFunctionsInteractive}()}, +\code{\link{makeClusterFunctionsLSF}()}, +\code{\link{makeClusterFunctionsMulticore}()}, +\code{\link{makeClusterFunctionsOpenLava}()}, +\code{\link{makeClusterFunctionsSGE}()}, +\code{\link{makeClusterFunctionsSSH}()}, +\code{\link{makeClusterFunctionsSlurm}()}, +\code{\link{makeClusterFunctionsSocket}()}, +\code{\link{makeClusterFunctionsTORQUE}()} +} +\concept{ClusterFunctions} diff --git a/man/makeClusterFunctionsInteractive.Rd b/man/makeClusterFunctionsInteractive.Rd index 275e45e8..7a8ecf9d 100644 --- a/man/makeClusterFunctionsInteractive.Rd +++ b/man/makeClusterFunctionsInteractive.Rd @@ -42,6 +42,7 @@ and \code{killJob} is not implemented for the same reasons. Other ClusterFunctions: \code{\link{makeClusterFunctions}()}, \code{\link{makeClusterFunctionsDocker}()}, +\code{\link{makeClusterFunctionsHyperQueue}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, \code{\link{makeClusterFunctionsOpenLava}()}, diff --git a/man/makeClusterFunctionsLSF.Rd b/man/makeClusterFunctionsLSF.Rd index 577b0bf5..75289bbb 100644 --- a/man/makeClusterFunctionsLSF.Rd +++ b/man/makeClusterFunctionsLSF.Rd @@ -56,6 +56,7 @@ Array jobs are currently not supported. Other ClusterFunctions: \code{\link{makeClusterFunctions}()}, \code{\link{makeClusterFunctionsDocker}()}, +\code{\link{makeClusterFunctionsHyperQueue}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsMulticore}()}, \code{\link{makeClusterFunctionsOpenLava}()}, diff --git a/man/makeClusterFunctionsMulticore.Rd b/man/makeClusterFunctionsMulticore.Rd index 8101b27a..60b27e65 100644 --- a/man/makeClusterFunctionsMulticore.Rd +++ b/man/makeClusterFunctionsMulticore.Rd @@ -29,6 +29,7 @@ Does not work on Windows, use \code{\link{makeClusterFunctionsSocket}} instead. Other ClusterFunctions: \code{\link{makeClusterFunctions}()}, \code{\link{makeClusterFunctionsDocker}()}, +\code{\link{makeClusterFunctionsHyperQueue}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsOpenLava}()}, diff --git a/man/makeClusterFunctionsOpenLava.Rd b/man/makeClusterFunctionsOpenLava.Rd index c08b6e36..90f1d33c 100644 --- a/man/makeClusterFunctionsOpenLava.Rd +++ b/man/makeClusterFunctionsOpenLava.Rd @@ -56,6 +56,7 @@ Array jobs are currently not supported. Other ClusterFunctions: \code{\link{makeClusterFunctions}()}, \code{\link{makeClusterFunctionsDocker}()}, +\code{\link{makeClusterFunctionsHyperQueue}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, diff --git a/man/makeClusterFunctionsSGE.Rd b/man/makeClusterFunctionsSGE.Rd index ddf8f925..0863eb72 100644 --- a/man/makeClusterFunctionsSGE.Rd +++ b/man/makeClusterFunctionsSGE.Rd @@ -66,6 +66,7 @@ Array jobs are currently not supported. Other ClusterFunctions: \code{\link{makeClusterFunctions}()}, \code{\link{makeClusterFunctionsDocker}()}, +\code{\link{makeClusterFunctionsHyperQueue}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, diff --git a/man/makeClusterFunctionsSSH.Rd b/man/makeClusterFunctionsSSH.Rd index ef8dfe87..1d13a7a2 100644 --- a/man/makeClusterFunctionsSSH.Rd +++ b/man/makeClusterFunctionsSSH.Rd @@ -40,6 +40,7 @@ makeClusterFunctionsSSH(list(Worker$new("localhost", ncpus = 2))) Other ClusterFunctions: \code{\link{makeClusterFunctions}()}, \code{\link{makeClusterFunctionsDocker}()}, +\code{\link{makeClusterFunctionsHyperQueue}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, diff --git a/man/makeClusterFunctionsSlurm.Rd b/man/makeClusterFunctionsSlurm.Rd index 9448b474..6e3cd88b 100644 --- a/man/makeClusterFunctionsSlurm.Rd +++ b/man/makeClusterFunctionsSlurm.Rd @@ -69,6 +69,7 @@ otherwise the commands for listing and killing jobs will not work. Other ClusterFunctions: \code{\link{makeClusterFunctions}()}, \code{\link{makeClusterFunctionsDocker}()}, +\code{\link{makeClusterFunctionsHyperQueue}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, diff --git a/man/makeClusterFunctionsSocket.Rd b/man/makeClusterFunctionsSocket.Rd index 88f98011..3d3dd3bd 100644 --- a/man/makeClusterFunctionsSocket.Rd +++ b/man/makeClusterFunctionsSocket.Rd @@ -28,6 +28,7 @@ Jobs are spawned asynchronously using the package \pkg{snow}. Other ClusterFunctions: \code{\link{makeClusterFunctions}()}, \code{\link{makeClusterFunctionsDocker}()}, +\code{\link{makeClusterFunctionsHyperQueue}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, diff --git a/man/makeClusterFunctionsTORQUE.Rd b/man/makeClusterFunctionsTORQUE.Rd index da4b9a9c..e5bfe8a5 100644 --- a/man/makeClusterFunctionsTORQUE.Rd +++ b/man/makeClusterFunctionsTORQUE.Rd @@ -52,6 +52,7 @@ allocations. Other ClusterFunctions: \code{\link{makeClusterFunctions}()}, \code{\link{makeClusterFunctionsDocker}()}, +\code{\link{makeClusterFunctionsHyperQueue}()}, \code{\link{makeClusterFunctionsInteractive}()}, \code{\link{makeClusterFunctionsLSF}()}, \code{\link{makeClusterFunctionsMulticore}()}, diff --git a/tests/testthat/test_ClusterFunctionHyperQueue.R b/tests/testthat/test_ClusterFunctionHyperQueue.R new file mode 100644 index 00000000..6bc61c9a --- /dev/null +++ b/tests/testthat/test_ClusterFunctionHyperQueue.R @@ -0,0 +1,34 @@ +test_that("clusterFunctionsHyperQueue", { + skip_if(TRUE) + skip_on_ci() + skip_on_cran() + + reg = makeTestRegistry() + reg$cluster.functions = makeClusterFunctionsHyperQueue() + saveRegistry(reg) + fun = function(x) { + Sys.sleep(5) + TRUE + } + ids = batchMap(fun, x = c(5, 5), reg = reg) + submitJobs(1:2, reg = reg) + waitForJobs(ids = ids, reg = reg) + + expect_data_table(findJobs(ids = ids, reg = reg), nrow = 2) + expect_data_table(findRunning(reg = reg), nrow = 0L) +}) + +test_that("clusterFunctionsHyperQueue: killJob", { + skip_if(TRUE) + skip_on_ci() + skip_on_cran() + + reg = makeTestRegistry() + reg$cluster.functions = makeClusterFunctionsHyperQueue() + saveRegistry(reg) + fun = function(x) { Sys.sleep(5); TRUE } + ids = batchMap(fun, x = c(5, 5), reg = reg) + submitJobs(1:2, reg = reg) + Sys.sleep(1) + expect_data_table(killJobs(1, reg = reg), nrow = 1) +}) From 94ed5ca06921620f5808dbe2d39cfa8216cfe90a Mon Sep 17 00:00:00 2001 From: be-marc Date: Sat, 1 Nov 2025 20:40:13 +0100 Subject: [PATCH 2/5] cpu --- R/clusterFunctionsHyperQueue.R | 3 +++ .../testthat/test_ClusterFunctionHyperQueue.R | 20 +++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/R/clusterFunctionsHyperQueue.R b/R/clusterFunctionsHyperQueue.R index 2ad54047..6c723cea 100644 --- a/R/clusterFunctionsHyperQueue.R +++ b/R/clusterFunctionsHyperQueue.R @@ -17,11 +17,14 @@ makeClusterFunctionsHyperQueue = function(scheduler.latency = 1, fs.latency = 65 assertRegistry(reg, writeable = TRUE) assertClass(jc, "JobCollection") + ncpus = jc$resources$ncpus %??% 1L + args = c( "submit", sprintf("--name=%s", jc$job.hash), "--stdout=none", "--stderr=none", + sprintf("--cpus=%i", ncpus), "--", "Rscript", "-e", shQuote(sprintf("batchtools::doJobCollection('%s', '%s')", jc$uri, jc$log.file)) diff --git a/tests/testthat/test_ClusterFunctionHyperQueue.R b/tests/testthat/test_ClusterFunctionHyperQueue.R index 6bc61c9a..2929e0e9 100644 --- a/tests/testthat/test_ClusterFunctionHyperQueue.R +++ b/tests/testthat/test_ClusterFunctionHyperQueue.R @@ -32,3 +32,23 @@ test_that("clusterFunctionsHyperQueue: killJob", { Sys.sleep(1) expect_data_table(killJobs(1, reg = reg), nrow = 1) }) + +test_that("clusterFunctionsHyperQueue with resources", { + skip_if(TRUE) + skip_on_ci() + skip_on_cran() + + reg = makeTestRegistry() + reg$cluster.functions = makeClusterFunctionsHyperQueue() + saveRegistry(reg) + fun = function(x) { + Sys.sleep(5) + TRUE + } + ids = batchMap(fun, x = c(5, 5), reg = reg) + submitJobs(1:2, reg = reg, resources = list(ncpus = 2)) + waitForJobs(ids = ids, reg = reg) + + expect_data_table(findJobs(ids = ids, reg = reg), nrow = 2) + expect_data_table(findRunning(reg = reg), nrow = 0L) +}) From 51065e07f9e3c4e396ad0cf4f584c9c268a4da7e Mon Sep 17 00:00:00 2001 From: be-marc Date: Sun, 2 Nov 2025 14:52:17 +0100 Subject: [PATCH 3/5] memory and walltime --- R/clusterFunctionsHyperQueue.R | 9 +++++++-- tests/testthat/test_ClusterFunctionHyperQueue.R | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/R/clusterFunctionsHyperQueue.R b/R/clusterFunctionsHyperQueue.R index 6c723cea..e5fa8169 100644 --- a/R/clusterFunctionsHyperQueue.R +++ b/R/clusterFunctionsHyperQueue.R @@ -17,14 +17,19 @@ makeClusterFunctionsHyperQueue = function(scheduler.latency = 1, fs.latency = 65 assertRegistry(reg, writeable = TRUE) assertClass(jc, "JobCollection") - ncpus = jc$resources$ncpus %??% 1L + ncpus = if (!is.null(jc$resources$ncpus)) sprintf("--cpus=%i", jc$resources$ncpus) + memory = if (!is.null(jc$resources$memory)) sprintf("--resource mem=%iMiB", jc$resources$memory) + walltime = if (!is.null(jc$resources$walltime)) sprintf("--time-limit=%is", jc$resources$walltime) args = c( "submit", sprintf("--name=%s", jc$job.hash), + # hyperqueue cannot write stdout and stderr to the same file "--stdout=none", "--stderr=none", - sprintf("--cpus=%i", ncpus), + ncpus, + memory, + walltime, "--", "Rscript", "-e", shQuote(sprintf("batchtools::doJobCollection('%s', '%s')", jc$uri, jc$log.file)) diff --git a/tests/testthat/test_ClusterFunctionHyperQueue.R b/tests/testthat/test_ClusterFunctionHyperQueue.R index 2929e0e9..c0a13509 100644 --- a/tests/testthat/test_ClusterFunctionHyperQueue.R +++ b/tests/testthat/test_ClusterFunctionHyperQueue.R @@ -46,7 +46,7 @@ test_that("clusterFunctionsHyperQueue with resources", { TRUE } ids = batchMap(fun, x = c(5, 5), reg = reg) - submitJobs(1:2, reg = reg, resources = list(ncpus = 2)) + submitJobs(1:2, reg = reg, resources = list(ncpus = 2, walltime = 10, memory = 5)) waitForJobs(ids = ids, reg = reg) expect_data_table(findJobs(ids = ids, reg = reg), nrow = 2) From 1a0cf333b78b68b0755b49252f7a9ce2ebec25b1 Mon Sep 17 00:00:00 2001 From: be-marc Date: Sun, 2 Nov 2025 14:57:08 +0100 Subject: [PATCH 4/5] ... --- .Rbuildignore | 1 + DESCRIPTION | 1 + R/clusterFunctionsHyperQueue.R | 2 ++ 3 files changed, 4 insertions(+) diff --git a/.Rbuildignore b/.Rbuildignore index 6b2327f3..1e3ae1ff 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -13,3 +13,4 @@ ^README.RMD$ ^.github$ registry/ +^\.lintr$ diff --git a/DESCRIPTION b/DESCRIPTION index 05b5253b..b07baafb 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -52,6 +52,7 @@ Suggests: foreach, future, future.batchtools, + jsonlite, knitr, parallelMap, ranger, diff --git a/R/clusterFunctionsHyperQueue.R b/R/clusterFunctionsHyperQueue.R index e5fa8169..4bd119c2 100644 --- a/R/clusterFunctionsHyperQueue.R +++ b/R/clusterFunctionsHyperQueue.R @@ -55,6 +55,7 @@ makeClusterFunctionsHyperQueue = function(scheduler.latency = 1, fs.latency = 65 listJobsQueued = function(reg) { + requireNamespace("jsonlite") assertRegistry(reg, writeable = FALSE) args = c("job", "list", "--filter", "waiting", "--output-mode", "json") res = runOSCommand("hq", args) @@ -66,6 +67,7 @@ makeClusterFunctionsHyperQueue = function(scheduler.latency = 1, fs.latency = 65 } listJobsRunning = function(reg) { + requireNamespace("jsonlite") assertRegistry(reg, writeable = FALSE) args = c("job", "list", "--filter", "running", "--output-mode", "json") res = runOSCommand("hq", args) From 2f4c75a4077c660ae668c633e0344382559c2151 Mon Sep 17 00:00:00 2001 From: be-marc Date: Fri, 21 Nov 2025 12:31:01 +0100 Subject: [PATCH 5/5] ... --- R/clusterFunctionsHyperQueue.R | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/R/clusterFunctionsHyperQueue.R b/R/clusterFunctionsHyperQueue.R index 4bd119c2..eb93ceb7 100644 --- a/R/clusterFunctionsHyperQueue.R +++ b/R/clusterFunctionsHyperQueue.R @@ -8,6 +8,7 @@ #' Listing jobs uses \code{hq job list} and cancelling jobs uses \code{hq job cancel}. #' A running HyperQueue server and workers are required. #' +#' #' @inheritParams makeClusterFunctions #' @return [ClusterFunctions]. #' @family ClusterFunctions @@ -19,7 +20,9 @@ makeClusterFunctionsHyperQueue = function(scheduler.latency = 1, fs.latency = 65 ncpus = if (!is.null(jc$resources$ncpus)) sprintf("--cpus=%i", jc$resources$ncpus) memory = if (!is.null(jc$resources$memory)) sprintf("--resource mem=%iMiB", jc$resources$memory) - walltime = if (!is.null(jc$resources$walltime)) sprintf("--time-limit=%is", jc$resources$walltime) + # time-limit is the maximum time the job can run, time-request is the minimum remaining lifetime a worker must have + walltime = if (!is.null(jc$resources$walltime)) sprintf("--time-limit=%is --time-request=%is", jc$resources$walltime, jc$resources$walltime) + args = c( "submit",