Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add direct operator calls in debug mode #3734

Merged
merged 24 commits into from
Apr 5, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
1f31170
Add direct operators prototype
ksztenderski Mar 8, 2022
94c4978
Add PipelineDebug class in the backend and general cleanup
ksztenderski Mar 9, 2022
c58d62c
Add template of cuda stream support in PipelineDebug
ksztenderski Mar 10, 2022
83fe0a0
Remove copy of outputs for TL
ksztenderski Mar 15, 2022
6405be9
Remove experimental ops exposure and direct_operator_call_test
ksztenderski Mar 15, 2022
aef8a73
Typo fix and cuda_stream fix
ksztenderski Mar 15, 2022
da04779
Add aritm_op support in direct operator for debug mode
ksztenderski Mar 16, 2022
596bafe
Add default layout support in direct op
ksztenderski Mar 16, 2022
cc7a4dc
Clean up
ksztenderski Mar 16, 2022
e843e85
Remove python eager operator exposure add operator device check
ksztenderski Mar 22, 2022
554c698
Add SetDefaultLayout usage in eager operator
ksztenderski Mar 22, 2022
23d4795
Clean up
ksztenderski Mar 22, 2022
d0091be
Add layout set fix
ksztenderski Mar 23, 2022
c1a8edc
Revert PresentAsTensorList
ksztenderski Mar 23, 2022
aecbc75
Merge branch 'NVIDIA:main' into eager_operator_calls
ksztenderski Mar 23, 2022
da27a7e
Change default device for shared thread pool in eager operator
ksztenderski Mar 23, 2022
b134aaa
Fix operator exposure in python
ksztenderski Mar 23, 2022
8148a56
Add OperatorManager util class for debug mode
ksztenderski Mar 29, 2022
2da6e95
Rename back to _debug_mode
ksztenderski Mar 29, 2022
92bfc65
Fix multiple input sets support in debug mode
ksztenderski Mar 30, 2022
aa0da7f
Revert to fn stype names for operators in debug mode
ksztenderski Mar 30, 2022
ea94e0e
Clean up
ksztenderski Mar 30, 2022
8ff9509
Fix input sets len check and adding inputs to OpSpec
ksztenderski Apr 1, 2022
b3846c4
Clean up
ksztenderski Apr 1, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 32 additions & 17 deletions dali/pipeline/pipeline_debug.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,20 @@ class DLL_PUBLIC PipelineDebug {
}
}

DLL_PUBLIC void AddOperator(OpSpec &spec, int logical_id);
DLL_PUBLIC void AddOperator(OpSpec &spec, int logical_id) {
FillOpSpec(spec);
AddOperatorImpl(spec, logical_id);
}

/**
* @brief Adds many identical operators (used for input sets).
*/
DLL_PUBLIC void AddMultipleOperators(OpSpec &spec, std::vector<int> &logical_ids) {
FillOpSpec(spec);
for (int logical_id : logical_ids) {
AddOperatorImpl(spec, logical_id);
}
}

template <typename InBackend, typename OutBackend>
DLL_PUBLIC std::vector<std::shared_ptr<TensorList<OutBackend>>> RunOperator(
Expand All @@ -52,6 +65,24 @@ class DLL_PUBLIC PipelineDebug {
}

private:
void FillOpSpec(OpSpec &spec) {
spec.AddArg("max_batch_size", max_batch_size_);
spec.AddArg("device_id", device_id_);
spec.AddArg("num_threads", num_threads_);
}

void AddOperatorImpl(OpSpec &spec, int logical_id) {
std::string device = spec.GetArgument<std::string>("device");

if (device == "gpu") {
gpu_operators_.insert({logical_id, EagerOperator<GPUBackend>(spec)});
} else if (device == "cpu") {
cpu_operators_.insert({logical_id, EagerOperator<CPUBackend>(spec)});
} else if (device == "mixed") {
mixed_operators_.insert({logical_id, EagerOperator<MixedBackend>(spec)});
}
}

int max_batch_size_;
int device_id_;
int num_threads_;
Expand All @@ -62,22 +93,6 @@ class DLL_PUBLIC PipelineDebug {
std::unordered_map<int, EagerOperator<MixedBackend>> mixed_operators_;
};

void PipelineDebug::AddOperator(OpSpec &spec, int logical_id) {
spec.AddArg("max_batch_size", max_batch_size_);
spec.AddArg("device_id", device_id_);
spec.AddArg("num_threads", num_threads_);

std::string device = spec.GetArgument<std::string>("device");

if (device == "gpu") {
gpu_operators_.insert({logical_id, EagerOperator<GPUBackend>(spec)});
} else if (device == "cpu") {
cpu_operators_.insert({logical_id, EagerOperator<CPUBackend>(spec)});
} else if (device == "mixed") {
mixed_operators_.insert({logical_id, EagerOperator<MixedBackend>(spec)});
}
}

template <>
std::vector<std::shared_ptr<TensorList<CPUBackend>>> PipelineDebug::RunOperator(
int logical_id, const std::vector<std::shared_ptr<TensorList<CPUBackend>>> &inputs,
Expand Down
1 change: 1 addition & 0 deletions dali/python/backend_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -624,6 +624,7 @@ void ExposePipelineDebug(py::module &m) {
return std::make_unique<PipelineDebug>(batch_size, num_threads, device_id, set_affinity);
}))
.def("AddOperator", &PipelineDebug::AddOperator)
.def("AddMultipleOperators", &PipelineDebug::AddMultipleOperators)
.def("RunOperatorCPU", &PipelineDebug::RunOperator<CPUBackend, CPUBackend>)
.def("RunOperatorGPU", &PipelineDebug::RunOperator<GPUBackend, GPUBackend>)
.def("RunOperatorMixed", &PipelineDebug::RunOperator<CPUBackend, GPUBackend>);
Expand Down
Loading