Skip to content

Commit

Permalink
Address review comments
Browse files Browse the repository at this point in the history
- Rearrange order of comments
- Remove unnecessary string construction
- Reorder function declarations
- Add brief documentation for testing `SMARTREDIS_TEST_DEVICE`
- Check for allocated status before deallocating
  • Loading branch information
ashao committed May 16, 2022
1 parent 2d48896 commit 8a34c26
Show file tree
Hide file tree
Showing 7 changed files with 117 additions and 95 deletions.
19 changes: 18 additions & 1 deletion doc/developer/testing.rst
@@ -1,7 +1,24 @@
*******************
Unit Test Framework
Testing Description
*******************

##################
Quick instructions
##################

To run the tests, assuming that all requirements have been installed
1. Activate your environment with SmartSim and SmartRedis installed
2. Modify `SMARTREDIS_TEST_CLUSTER` (`True` or `False` and
`SMARTREDIS_TEST_DEVICE` (`gpu` or `cpu`) as necessary in
`setup_test_env.sh`.
3. `source setup_test_env.sh`
4. `pushd utils/create_cluster; python local_cluster.py; popd`
5. `export SSDB="127.0.0.1:6379,127.0.0.1:6380,127.0.0.1:6381"`
5. Run the desired tests (see `make help` for more details)

###################
Unit Test Framework
###################
All unit tests for the C++ client are located at ``tests/cpp/unit-tests/`` and use the Catch2
test framework. The unit tests mostly follow a Behavior Driven Development (BDD) style by
using Catch2's ``SCENARIO``, ``GIVEN``, ``WHEN``, and ``THEN`` syntax.
Expand Down
65 changes: 32 additions & 33 deletions src/c/c_client.cpp
Expand Up @@ -490,15 +490,15 @@ void _check_params_set_model(void* c_client,
for (size_t i = 0; i < n_inputs; i++){
if (inputs[i] == NULL || input_lengths[i] == 0) {
throw SRParameterException(
std::string("inputs[") + std::to_string(i) + "] is NULL or empty");
"inputs[" + std::to_string(i) + "] is NULL or empty");
}
}
}
if (n_outputs != 1 && output_lengths[0] != 0) {
for (size_t i = 0; i < n_outputs; i++) {
if (outputs[i] == NULL || output_lengths[i] == 0) {
throw SRParameterException(
std::string("outputs[") + std::to_string(i) + "] is NULL or empty");
"outputs[" + std::to_string(i) + "] is NULL or empty");
}
}
}
Expand All @@ -522,10 +522,10 @@ SRError set_model_from_file(void* c_client,
SRError result = SRNoError;
try
{
_check_params_set_model(c_client, name, backend, inputs, input_lengths, n_inputs,
outputs, output_lengths, n_outputs);
// Sanity check params. Tag is strictly optional, and inputs/outputs are
// mandatory IFF backend is TensorFlow (TF or TFLITE)
_check_params_set_model(c_client, name, backend, inputs, input_lengths, n_inputs,
outputs, output_lengths, n_outputs);
SR_CHECK_PARAMS(model_file != NULL && device != NULL);

Client* s = reinterpret_cast<Client*>(c_client);
Expand Down Expand Up @@ -584,10 +584,10 @@ SRError set_model_from_file_multigpu(void* c_client,
SRError result = SRNoError;
try
{
_check_params_set_model(c_client, name, backend, inputs, input_lengths, n_inputs,
outputs, output_lengths, n_outputs);
// Sanity check params. Tag is strictly optional, and inputs/outputs are
// mandatory IFF backend is TensorFlow (TF or TFLITE)
_check_params_set_model(c_client, name, backend, inputs, input_lengths, n_inputs,
outputs, output_lengths, n_outputs);
SR_CHECK_PARAMS(model_file != NULL);

Client* s = reinterpret_cast<Client*>(c_client);
Expand Down Expand Up @@ -795,27 +795,29 @@ SRError get_model(void* c_client,
return result;
}

// Put a script in the database that is stored in a file in a multi-GPU system
// Put a script in the database that is stored in a file.
extern "C"
SRError set_script_from_file_multigpu(void* c_client,
const char* name,
const size_t name_length,
const char* script_file,
const size_t script_file_length,
const int first_gpu,
const int num_gpus)
SRError set_script_from_file(void* c_client,
const char* name,
const size_t name_length,
const char* device,
const size_t device_length,
const char* script_file,
const size_t script_file_length)
{
SRError result = SRNoError;
try
{
// Sanity check params
SR_CHECK_PARAMS(c_client != NULL && name != NULL && script_file != NULL);
SR_CHECK_PARAMS(c_client != NULL && name != NULL && device != NULL &&
script_file != NULL);

Client* s = reinterpret_cast<Client*>(c_client);
std::string name_str(name, name_length);
std::string device_str(device, device_length);
std::string script_file_str(script_file, script_file_length);

s->set_script_from_file_multigpu(name_str, script_file_str, first_gpu, num_gpus);
s->set_script_from_file(name_str, device_str, script_file_str);
}
catch (const Exception& e) {
SRSetLastError(e);
Expand All @@ -829,30 +831,27 @@ SRError set_script_from_file_multigpu(void* c_client,
return result;
}


// Put a script in the database that is stored in a file.
// Put a script in the database that is stored in a file in a multi-GPU system
extern "C"
SRError set_script_from_file(void* c_client,
const char* name,
const size_t name_length,
const char* device,
const size_t device_length,
const char* script_file,
const size_t script_file_length)
SRError set_script_from_file_multigpu(void* c_client,
const char* name,
const size_t name_length,
const char* script_file,
const size_t script_file_length,
const int first_gpu,
const int num_gpus)
{
SRError result = SRNoError;
try
{
// Sanity check params
SR_CHECK_PARAMS(c_client != NULL && name != NULL && device != NULL &&
script_file != NULL);
SR_CHECK_PARAMS(c_client != NULL && name != NULL && script_file != NULL);

Client* s = reinterpret_cast<Client*>(c_client);
std::string name_str(name, name_length);
std::string device_str(device, device_length);
std::string script_file_str(script_file, script_file_length);

s->set_script_from_file(name_str, device_str, script_file_str);
s->set_script_from_file_multigpu(name_str, script_file_str, first_gpu, num_gpus);
}
catch (const Exception& e) {
SRSetLastError(e);
Expand Down Expand Up @@ -993,13 +992,13 @@ void _check_params_run_script(void* c_client,
for (size_t i = 0; i < n_inputs; i++){
if (inputs[i] == NULL || input_lengths[i] == 0) {
throw SRParameterException(
std::string("inputs[") + std::to_string(i) + "] is NULL or empty");
"inputs[" + std::to_string(i) + "] is NULL or empty");
}
}
for (size_t i = 0; i < n_outputs; i++) {
if (outputs[i] == NULL || output_lengths[i] == 0) {
throw SRParameterException(
std::string("outputs[") + std::to_string(i) + "] is NULL or empty");
"outputs[" + std::to_string(i) + "] is NULL or empty");
}
}
}
Expand Down Expand Up @@ -1130,13 +1129,13 @@ void _check_params_run_model(void* c_client,
for (size_t i = 0; i < n_inputs; i++){
if (inputs[i] == NULL || input_lengths[i] == 0) {
throw SRParameterException(
std::string("inputs[") + std::to_string(i) + "] is NULL or empty");
"inputs[" + std::to_string(i) + "] is NULL or empty");
}
}
for (size_t i = 0; i < n_outputs; i++) {
if (outputs[i] == NULL || output_lengths[i] == 0) {
throw SRParameterException(
std::string("outputs[") + std::to_string(i) + "] is NULL or empty");
"outputs[" + std::to_string(i) + "] is NULL or empty");
}
}
}
Expand Down
105 changes: 55 additions & 50 deletions src/fortran/client.F90
Expand Up @@ -684,13 +684,13 @@ function set_model_from_file(self, name, model_file, backend, device, batch_size
c_backend, backend_length, c_device, device_length, c_batch_size, c_min_batch_size, &
c_tag, tag_length, inputs_ptr, input_lengths_ptr, n_inputs, outputs_ptr, &
output_lengths_ptr, n_outputs)
deallocate(c_inputs)
deallocate(input_lengths)
deallocate(ptrs_to_inputs)
deallocate(c_outputs)
deallocate(output_lengths)
deallocate(ptrs_to_outputs)
deallocate(c_tag)
if allocated(c_inputs) deallocate(c_inputs)
if allocated(input_lengths) deallocate(input_lengths)
if allocated(ptrs_to_inputs) deallocate(ptrs_to_inputs)
if allocated(c_outputs) deallocate(c_outputs)
if allocated(output_lengths) deallocate(output_lengths)
if allocated(ptrs_to_outputs) deallocate(ptrs_to_outputs)
if allocated(c_tag) deallocate(c_tag)
end function set_model_from_file

!> Load the machine learning model from a file and set the configuration for use in multi-GPU systems
Expand Down Expand Up @@ -780,13 +780,14 @@ function set_model_from_file_multigpu(self, name, model_file, backend, first_gpu
c_backend, backend_length, c_first_gpu, c_num_gpus, c_batch_size, c_min_batch_size, &
c_tag, tag_length, inputs_ptr, input_lengths_ptr, n_inputs, outputs_ptr, &
output_lengths_ptr, n_outputs)
deallocate(c_inputs)
deallocate(input_lengths)
deallocate(ptrs_to_inputs)
deallocate(c_outputs)
deallocate(output_lengths)
deallocate(ptrs_to_outputs)
deallocate(c_tag)

if allocated(c_inputs) deallocate(c_inputs)
if allocated(input_lengths) deallocate(input_lengths)
if allocated(ptrs_to_inputs) deallocate(ptrs_to_inputs)
if allocated(c_outputs) deallocate(c_outputs)
if allocated(output_lengths) deallocate(output_lengths)
if allocated(ptrs_to_outputs) deallocate(ptrs_to_outputs)
if allocated(c_tag) deallocate(c_tag)
end function set_model_from_file_multigpu

!> Establish a model to run
Expand Down Expand Up @@ -849,12 +850,13 @@ function set_model(self, name, model, backend, device, batch_size, min_batch_siz
c_device, device_length, batch_size, min_batch_size, c_tag, tag_length, &
inputs_ptr, input_lengths_ptr, n_inputs, outputs_ptr, output_lengths_ptr, n_outputs)

deallocate(c_inputs)
deallocate(input_lengths)
deallocate(ptrs_to_inputs)
deallocate(c_outputs)
deallocate(output_lengths)
deallocate(ptrs_to_outputs)
if allocated(c_inputs) deallocate(c_inputs)
if allocated(input_lengths) deallocate(input_lengths)
if allocated(ptrs_to_inputs) deallocate(ptrs_to_inputs)
if allocated(c_outputs) deallocate(c_outputs)
if allocated(output_lengths) deallocate(output_lengths)
if allocated(ptrs_to_outputs) deallocate(ptrs_to_outputs)
if allocated(c_tag) deallocate(c_tag)
end function set_model

!> Set a model from a byte string to run on a system with multiple GPUs
Expand Down Expand Up @@ -916,12 +918,13 @@ function set_model_multigpu(self, name, model, backend, first_gpu, num_gpus, bat
c_first_gpu, c_num_gpus, c_batch_size, c_min_batch_size, c_tag, tag_length, &
inputs_ptr, input_lengths_ptr, n_inputs, outputs_ptr, output_lengths_ptr, n_outputs)

deallocate(c_inputs)
deallocate(input_lengths)
deallocate(ptrs_to_inputs)
deallocate(c_outputs)
deallocate(output_lengths)
deallocate(ptrs_to_outputs)
if allocated(c_inputs) deallocate(c_inputs)
if allocated(input_lengths) deallocate(input_lengths)
if allocated(ptrs_to_inputs) deallocate(ptrs_to_inputs)
if allocated(c_outputs) deallocate(c_outputs)
if allocated(output_lengths) deallocate(output_lengths)
if allocated(ptrs_to_outputs) deallocate(ptrs_to_outputs)
if allocated(c_tag) deallocate(c_tag)
end function set_model_multigpu

!> Run a model in the database using the specified input and output tensors
Expand Down Expand Up @@ -955,12 +958,13 @@ function run_model(self, name, inputs, outputs) result(code)
code = run_model_c(self%client_ptr, c_name, name_length, inputs_ptr, input_lengths_ptr, n_inputs, outputs_ptr, &
output_lengths_ptr, n_outputs)

deallocate(c_inputs)
deallocate(input_lengths)
deallocate(ptrs_to_inputs)
deallocate(c_outputs)
deallocate(output_lengths)
deallocate(ptrs_to_outputs)
if allocated(c_inputs) deallocate(c_inputs)
if allocated(input_lengths) deallocate(input_lengths)
if allocated(ptrs_to_inputs) deallocate(ptrs_to_inputs)
if allocated(c_outputs) deallocate(c_outputs)
if allocated(output_lengths) deallocate(output_lengths)
if allocated(ptrs_to_outputs) deallocate(ptrs_to_outputs)
if allocated(c_tag) deallocate(c_tag)
end function run_model

!> Run a model in the database using the specified input and output tensors in a multi-GPU system
Expand Down Expand Up @@ -1003,12 +1007,13 @@ function run_model_multigpu(self, name, inputs, outputs, offset, first_gpu, num_
code = run_model_multigpu_c(self%client_ptr, c_name, name_length, inputs_ptr, input_lengths_ptr, n_inputs, &
outputs_ptr, output_lengths_ptr, n_outputs, c_offset, c_first_gpu, c_num_gpus)

deallocate(c_inputs)
deallocate(input_lengths)
deallocate(ptrs_to_inputs)
deallocate(c_outputs)
deallocate(output_lengths)
deallocate(ptrs_to_outputs)
if allocated(c_inputs) deallocate(c_inputs)
if allocated(input_lengths) deallocate(input_lengths)
if allocated(ptrs_to_inputs) deallocate(ptrs_to_inputs)
if allocated(c_outputs) deallocate(c_outputs)
if allocated(output_lengths) deallocate(output_lengths)
if allocated(ptrs_to_outputs) deallocate(ptrs_to_outputs)
if allocated(c_tag) deallocate(c_tag)
end function run_model_multigpu

!> Remove a model from the database
Expand Down Expand Up @@ -1227,12 +1232,12 @@ function run_script(self, name, func, inputs, outputs) result(code)
code = run_script_c(self%client_ptr, c_name, name_length, c_func, func_length, inputs_ptr, input_lengths_ptr, &
n_inputs, outputs_ptr, output_lengths_ptr, n_outputs)

deallocate(c_inputs)
deallocate(input_lengths)
deallocate(ptrs_to_inputs)
deallocate(c_outputs)
deallocate(output_lengths)
deallocate(ptrs_to_outputs)
if allocated(c_inputs) deallocate(c_inputs)
if allocated(input_lengths) deallocate(input_lengths)
if allocated(ptrs_to_inputs) deallocate(ptrs_to_inputs)
if allocated(c_outputs) deallocate(c_outputs)
if allocated(output_lengths) deallocate(output_lengths)
if allocated(ptrs_to_outputs) deallocate(ptrs_to_outputs)
end function run_script

function run_script_multigpu(self, name, func, inputs, outputs, offset, first_gpu, num_gpus) result(code)
Expand Down Expand Up @@ -1279,12 +1284,12 @@ function run_script_multigpu(self, name, func, inputs, outputs, offset, first_gp
code = run_script_multigpu_c(self%client_ptr, c_name, name_length, c_func, func_length, inputs_ptr, input_lengths_ptr, &
n_inputs, outputs_ptr, output_lengths_ptr, n_outputs, c_offset, c_first_gpu, c_num_gpus)

deallocate(c_inputs)
deallocate(input_lengths)
deallocate(ptrs_to_inputs)
deallocate(c_outputs)
deallocate(output_lengths)
deallocate(ptrs_to_outputs)
if allocated(c_inputs) deallocate(c_inputs)
if allocated(input_lengths) deallocate(input_lengths)
if allocated(ptrs_to_inputs) deallocate(ptrs_to_inputs)
if allocated(c_outputs) deallocate(c_outputs)
if allocated(output_lengths) deallocate(output_lengths)
if allocated(ptrs_to_outputs) deallocate(ptrs_to_outputs)
end function run_script_multigpu

!> Remove a script from the database
Expand Down
4 changes: 2 additions & 2 deletions src/fortran/client/script_interfaces.inc
Expand Up @@ -99,8 +99,8 @@ interface
type(c_ptr), value, intent(in) :: c_client !< Initialized SmartRedis client
character(kind=c_char), intent(in) :: key(*) !< The key to use to place the script
integer(kind=c_size_t), value, intent(in) :: key_length !< The length of the key c-string, excluding null
character(kind=c_char), intent(in) :: script(*) !< The file storing the script
integer(kind=c_size_t), value, intent(in) :: script_length !< The length of the file name c-string, excluding
character(kind=c_char), intent(in) :: script(*) !< The file storing the script
integer(kind=c_size_t), value, intent(in) :: script_length !< The length of the file name c-string, excluding
!! null terminating character
integer(kind=c_int), value, intent(in) :: first_gpu !< ID of the first GPU to set the model on
integer(kind=c_int), value, intent(in) :: num_gpus !< How many GPUs to use in the orchestrator
Expand Down

0 comments on commit 8a34c26

Please sign in to comment.