Skip to content

Commit

Permalink
Fix multiple input sets support in debug mode
Browse files Browse the repository at this point in the history
* Add error for variable batch_size

Signed-off-by: ksztenderski <ksztenderski@nvidia.com>
  • Loading branch information
ksztenderski committed Mar 30, 2022
1 parent 2da6e95 commit 92bfc65
Show file tree
Hide file tree
Showing 5 changed files with 226 additions and 100 deletions.
49 changes: 32 additions & 17 deletions dali/pipeline/pipeline_debug.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,20 @@ class DLL_PUBLIC PipelineDebug {
}
}

DLL_PUBLIC void AddOperator(OpSpec &spec, int logical_id);
DLL_PUBLIC void AddOperator(OpSpec &spec, int logical_id) {
FillOpSpec(spec);
AddOperatorImpl(spec, logical_id);
}

/**
* @brief Adds many identical operators (used for input sets).
*/
DLL_PUBLIC void AddMultipleOperators(OpSpec &spec, std::vector<int> &logical_ids) {
FillOpSpec(spec);
for (int logical_id : logical_ids) {
AddOperatorImpl(spec, logical_id);
}
}

template <typename InBackend, typename OutBackend>
DLL_PUBLIC std::vector<std::shared_ptr<TensorList<OutBackend>>> RunOperator(
Expand All @@ -52,6 +65,24 @@ class DLL_PUBLIC PipelineDebug {
}

private:
void FillOpSpec(OpSpec &spec) {
spec.AddArg("max_batch_size", max_batch_size_);
spec.AddArg("device_id", device_id_);
spec.AddArg("num_threads", num_threads_);
}

void AddOperatorImpl(OpSpec &spec, int logical_id) {
std::string device = spec.GetArgument<std::string>("device");

if (device == "gpu") {
gpu_operators_.insert({logical_id, EagerOperator<GPUBackend>(spec)});
} else if (device == "cpu") {
cpu_operators_.insert({logical_id, EagerOperator<CPUBackend>(spec)});
} else if (device == "mixed") {
mixed_operators_.insert({logical_id, EagerOperator<MixedBackend>(spec)});
}
}

int max_batch_size_;
int device_id_;
int num_threads_;
Expand All @@ -62,22 +93,6 @@ class DLL_PUBLIC PipelineDebug {
std::unordered_map<int, EagerOperator<MixedBackend>> mixed_operators_;
};

void PipelineDebug::AddOperator(OpSpec &spec, int logical_id) {
spec.AddArg("max_batch_size", max_batch_size_);
spec.AddArg("device_id", device_id_);
spec.AddArg("num_threads", num_threads_);

std::string device = spec.GetArgument<std::string>("device");

if (device == "gpu") {
gpu_operators_.insert({logical_id, EagerOperator<GPUBackend>(spec)});
} else if (device == "cpu") {
cpu_operators_.insert({logical_id, EagerOperator<CPUBackend>(spec)});
} else if (device == "mixed") {
mixed_operators_.insert({logical_id, EagerOperator<MixedBackend>(spec)});
}
}

template <>
std::vector<std::shared_ptr<TensorList<CPUBackend>>> PipelineDebug::RunOperator(
int logical_id, const std::vector<std::shared_ptr<TensorList<CPUBackend>>> &inputs,
Expand Down
1 change: 1 addition & 0 deletions dali/python/backend_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -624,6 +624,7 @@ void ExposePipelineDebug(py::module &m) {
return std::make_unique<PipelineDebug>(batch_size, num_threads, device_id, set_affinity);
}))
.def("AddOperator", &PipelineDebug::AddOperator)
.def("AddMultipleOperators", &PipelineDebug::AddMultipleOperators)
.def("RunOperatorCPU", &PipelineDebug::RunOperator<CPUBackend, CPUBackend>)
.def("RunOperatorGPU", &PipelineDebug::RunOperator<GPUBackend, GPUBackend>)
.def("RunOperatorMixed", &PipelineDebug::RunOperator<CPUBackend, GPUBackend>);
Expand Down
Loading

0 comments on commit 92bfc65

Please sign in to comment.