Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ commands:
- run:
name: Setup build system
command: |
clang-format --version
./opt/system-setup.py

checkout-all:
Expand Down Expand Up @@ -220,7 +221,7 @@ commands:
jobs:
lint:
docker:
- image: redislabsmodules/llvm-toolset:latest
- image: redisfab/rmbuilder:6.2.5-x64-buster
steps:
- abort_for_docs
- abort_for_noci
Expand Down
2 changes: 2 additions & 0 deletions opt/build/docker/dockerfile-gpu-test.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ COPY ./tests/flow/tests_setup/test_requirements.txt tests/flow/tests_setup/
COPY ./tests/flow/tests_setup/Install_RedisGears.sh tests/flow/tests_setup/
COPY ./get_deps.sh .

RUN apt-get -q install -y git

RUN VENV=venv FORCE=1 ./opt/readies/bin/getpy3

RUN set -e ;\
Expand Down
4 changes: 3 additions & 1 deletion opt/build/docker/dockerfile.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,10 @@ COPY --from=redis /usr/local/ /usr/local/
COPY ./opt/ opt/
ADD ./tests/flow/ tests/flow/

RUN FORCE=1 ./opt/readies/bin/getpy3
RUN ./opt/readies/bin/getupdates
RUN if [ ! -z $(command -v apt-get) ]; then apt-get -qq update; apt-get -q install -y git; fi
RUN if [ ! -z $(command -v yum) ]; then yum install -y git; fi
RUN FORCE=1 ./opt/readies/bin/getpy3
RUN ./opt/system-setup.py

ARG DEPS_ARGS=""
Expand Down
4 changes: 2 additions & 2 deletions opt/clang-check-all.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
# exit immediatly on error ( no need to keep checking )
set -e

CLANG_FMT_SRCS=$(find ../src/ \( -name '*.c' -o -name '*.cc'-o -name '*.cpp' -o -name '*.h' -o -name '*.hh' -o -name '*.hpp' \))
CLANG_FMT_TESTS=$(find ../tests/ \( -name '*.c' -o -name '*.cc'-o -name '*.cpp' -o -name '*.h' -o -name '*.hh' -o -name '*.hpp' \))
CLANG_FMT_SRCS=$(find ../src/ \( -name '*.c' -o -name '*.cc' -o -name '*.cpp' -o -name '*.h' -o -name '*.hh' -o -name '*.hpp' \))
CLANG_FMT_TESTS=$(find ../tests/ \( -name '*.c' -o -name '*.cc' -o -name '*.cpp' -o -name '*.h' -o -name '*.hh' -o -name '*.hpp' \))

for filename in $CLANG_FMT_SRCS; do
echo "Checking $filename"
Expand Down
4 changes: 2 additions & 2 deletions opt/clang-format-all.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
# exit immediatly on error ( no need to keep checking )
set -e

CLANG_FMT_SRCS=$(find ../src/ \( -name '*.c' -o -name '*.cc'-o -name '*.cpp' -o -name '*.h' -o -name '*.hh' -o -name '*.hpp' \))
CLANG_FMT_TESTS=$(find ../tests/ \( -name '*.c' -o -name '*.cc'-o -name '*.cpp' -o -name '*.h' -o -name '*.hh' -o -name '*.hpp' \))
CLANG_FMT_SRCS=$(find ../src/ \( -name '*.c' -o -name '*.cc' -o -name '*.cpp' -o -name '*.h' -o -name '*.hh' -o -name '*.hpp' \))
CLANG_FMT_TESTS=$(find ../tests/ \( -name '*.c' -o -name '*.cc' -o -name '*.cpp' -o -name '*.h' -o -name '*.hh' -o -name '*.hpp' \))

for filename in $CLANG_FMT_SRCS; do
clang-format --verbose -style=file -i $filename
Expand Down
54 changes: 24 additions & 30 deletions src/backends/libtflite_c/tflite_c.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,15 +53,13 @@ static DLDataType getDLDataType(const TfLiteTensor *tensor) {
return dtype;
}


static DLDevice getDLDevice(const TfLiteTensor *tensor, const int64_t &device_id) {
DLDevice device;
device.device_id = device_id;
device.device_type = DLDeviceType::kDLCPU;
return device;
}


size_t dltensorBytes(DLManagedTensor *t) {
int64_t *shape = t->dl_tensor.shape;
size_t len = 1;
Expand All @@ -80,15 +78,15 @@ void copyToTfLiteTensor(std::shared_ptr<tflite::Interpreter> interpreter, int tf
size_t nbytes = dltensorBytes(input);
DLDataType dltensor_type = input->dl_tensor.dtype;
const char *type_mismatch_msg = "Input tensor type doesn't match the type expected"
" by the model definition";
" by the model definition";

switch (tensor->type) {
case kTfLiteUInt8:
if (dltensor_type.code != kDLUInt || dltensor_type.bits != 8) {
throw std::logic_error(type_mismatch_msg);
}
memcpy(interpreter->typed_tensor<uint8_t>(tflite_input), input->dl_tensor.data, nbytes);
break;
if (dltensor_type.code != kDLUInt || dltensor_type.bits != 8) {
throw std::logic_error(type_mismatch_msg);
}
memcpy(interpreter->typed_tensor<uint8_t>(tflite_input), input->dl_tensor.data, nbytes);
break;
case kTfLiteInt64:
if (dltensor_type.code != kDLInt || dltensor_type.bits != 64) {
throw std::logic_error(type_mismatch_msg);
Expand Down Expand Up @@ -273,51 +271,47 @@ extern "C" void *tfliteLoadModel(const char *graph, size_t graphlen, DLDeviceTyp
return ctx;
}

extern "C" size_t tfliteModelNumInputs(void* ctx, char** error) {
ModelContext *ctx_ = (ModelContext*) ctx;
extern "C" size_t tfliteModelNumInputs(void *ctx, char **error) {
ModelContext *ctx_ = (ModelContext *)ctx;
size_t ret = 0;
try {
auto interpreter = ctx_->interpreter;
ret = interpreter->inputs().size();
}
catch(std::exception ex) {
ret = interpreter->inputs().size();
} catch (std::exception ex) {
_setError(ex.what(), error);
}
return ret;
}

extern "C" const char* tfliteModelInputNameAtIndex(void* modelCtx, size_t index, char** error) {
ModelContext *ctx_ = (ModelContext*) modelCtx;
const char* ret = NULL;
extern "C" const char *tfliteModelInputNameAtIndex(void *modelCtx, size_t index, char **error) {
ModelContext *ctx_ = (ModelContext *)modelCtx;
const char *ret = NULL;
try {
ret = ctx_->interpreter->GetInputName(index);
}
catch(std::exception ex) {
} catch (std::exception ex) {
_setError(ex.what(), error);
}
return ret;
}

extern "C" size_t tfliteModelNumOutputs(void* ctx, char** error) {
ModelContext *ctx_ = (ModelContext*) ctx;
extern "C" size_t tfliteModelNumOutputs(void *ctx, char **error) {
ModelContext *ctx_ = (ModelContext *)ctx;
size_t ret = 0;
try {
auto interpreter = ctx_->interpreter;
ret = interpreter->outputs().size();
}
catch(std::exception ex) {
ret = interpreter->outputs().size();
} catch (std::exception ex) {
_setError(ex.what(), error);
}
return ret;
}

extern "C" const char* tfliteModelOutputNameAtIndex(void* modelCtx, size_t index, char** error) {
ModelContext *ctx_ = (ModelContext*) modelCtx;
const char* ret = NULL;
extern "C" const char *tfliteModelOutputNameAtIndex(void *modelCtx, size_t index, char **error) {
ModelContext *ctx_ = (ModelContext *)modelCtx;
const char *ret = NULL;
try {
ret = ctx_->interpreter->GetOutputName(index);
}
catch(std::exception ex) {
} catch (std::exception ex) {
_setError(ex.what(), error);
}
return ret;
Expand Down Expand Up @@ -352,11 +346,11 @@ extern "C" void tfliteRunModel(void *ctx, long n_inputs, DLManagedTensor **input
bool need_reallocation = false;
std::vector<int> dims;
for (size_t i = 0; i < tflite_inputs.size(); i++) {
const TfLiteTensor* tflite_tensor = interpreter->tensor(tflite_inputs[i]);
const TfLiteTensor *tflite_tensor = interpreter->tensor(tflite_inputs[i]);
int64_t ndim = inputs[i]->dl_tensor.ndim;
int64_t *shape = inputs[i]->dl_tensor.shape;
dims.resize(ndim);
for (size_t j=0; j < ndim; j++) {
for (size_t j = 0; j < ndim; j++) {
dims[j] = shape[j];
}
if (!tflite::EqualArrayAndTfLiteIntArray(tflite_tensor->dims, dims.size(), dims.data())) {
Expand Down
Loading