From 7b9cd0a720592ab456d209931c6bdaf1f2564e07 Mon Sep 17 00:00:00 2001 From: Dimitar Tomov Date: Thu, 15 Dec 2022 12:22:03 +0200 Subject: [PATCH] Add preloaded ML model for continious gesture detection Signed-off-by: Dimitar Tomov --- ei-model/model-parameters/anomaly_clusters.h | 65 +++ ei-model/model-parameters/anomaly_types.h | 36 ++ ei-model/model-parameters/dsp_blocks.h | 41 ++ ei-model/model-parameters/model_metadata.h | 207 ++++++++ ei-model/model-parameters/model_variables.h | 59 +++ .../tflite-model/trained_model_compiled.cpp | 465 ++++++++++++++++++ .../tflite-model/trained_model_compiled.h | 75 +++ 7 files changed, 948 insertions(+) create mode 100644 ei-model/model-parameters/anomaly_clusters.h create mode 100644 ei-model/model-parameters/anomaly_types.h create mode 100644 ei-model/model-parameters/dsp_blocks.h create mode 100644 ei-model/model-parameters/model_metadata.h create mode 100644 ei-model/model-parameters/model_variables.h create mode 100644 ei-model/tflite-model/trained_model_compiled.cpp create mode 100644 ei-model/tflite-model/trained_model_compiled.h diff --git a/ei-model/model-parameters/anomaly_clusters.h b/ei-model/model-parameters/anomaly_clusters.h new file mode 100644 index 0000000..2eca7fd --- /dev/null +++ b/ei-model/model-parameters/anomaly_clusters.h @@ -0,0 +1,65 @@ +/* Generated by Edge Impulse + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _EI_CLASSIFIER_ANOMALY_CLUSTERS_H_ +#define _EI_CLASSIFIER_ANOMALY_CLUSTERS_H_ + +#include "edge-impulse-sdk/anomaly/anomaly.h" + +// (before - mean) / scale +const float ei_classifier_anom_scale[EI_CLASSIFIER_ANOM_AXIS_SIZE] = { 4.735934431428502, 2.6370697886685024, 1.752289641283062 }; +const float ei_classifier_anom_mean[EI_CLASSIFIER_ANOM_AXIS_SIZE] = { 4.292335472136911, 2.1852703082689375, 1.6949875804138423 }; + +const ei_classifier_anom_cluster_t ei_classifier_anom_clusters[EI_CLASSIFIER_ANOM_CLUSTER_COUNT] = { { { 1.1627247333526611, -0.09127187728881836, 0.039932578802108765 }, 0.45766612185692546 } +, { { 1.0793125629425049, -0.35539326071739197, -0.25196555256843567 }, 0.393853330260634 } +, { { -0.6699717044830322, -0.025066491216421127, 1.8257455825805664 }, 0.4076753010490145 } +, { { -0.905547022819519, -0.8271730542182922, -0.9636279940605164 }, 0.07057358582419748 } +, { { -0.6083683967590332, 0.3378922939300537, -0.2750750780105591 }, 0.34529380833031026 } +, { { 1.8524712324142456, -0.011566506698727608, 0.8461854457855225 }, 0.432528610593013 } +, { { 1.3358670473098755, -0.05726705491542816, -0.43816810846328735 }, 0.5062860278160605 } +, { { -0.7409022450447083, -0.34725621342658997, -0.8029352426528931 }, 0.21884703625369306 } +, { { -0.503063440322876, -0.3759881258010864, 2.3207943439483643 }, 0.7885827511063271 } +, { { -0.6956930160522461, -0.3123343288898468, 1.5905060768127441 }, 0.39691538375970054 } +, { { 0.7493807077407837, 2.8090922832489014, 0.8038313388824463 }, 0.4756844734737089 } +, { { -0.6844053864479065, -0.14970919489860535, -0.8244701623916626 }, 0.26707199648543406 } +, { { 1.5486040115356445, 0.7133833765983582, 0.5105420351028442 }, 0.4487223169579444 } +, { { 1.7191046476364136, 0.2774096429347992, 0.3952651917934418 }, 0.4223370205546866 } +, { { -0.5709540247917175, 0.6095415353775024, 0.03743164613842964 }, 0.5761022260100517 } +, { { -0.7589687705039978, -0.5149730443954468, 1.1454917192459106 }, 0.41873647710696993 } +, { { -0.03281811252236366, 3.856318950653076, -0.06501699984073639 }, 0.4937528681009911 } +, { { 0.9676327705383301, 3.318385362625122, 1.194267749786377 }, 0.3862170996134061 } +, { { -0.31564873456954956, 1.6312458515167236, 0.08470914512872696 }, 0.5464022209211731 } +, { { -0.4383845925331116, 3.3809237480163574, -0.20014718174934387 }, 0.5178456874700895 } +, { { -0.7231196761131287, -0.3596212565898895, -0.5751107335090637 }, 0.2102935035976298 } +, { { -0.5922588109970093, 0.08036672323942184, 1.3690720796585083 }, 0.4210207540244469 } +, { { 1.2505220174789429, 0.2934052348136902, -0.31634294986724854 }, 0.4438873246997636 } +, { { -0.4361991286277771, -0.1800207793712616, 2.741694450378418 }, 0.7265706160475239 } +, { { 0.9043402075767517, -0.10142571479082108, 0.8351730704307556 }, 0.5526395773058033 } +, { { 0.945662260055542, -0.027204414829611778, -0.3570230305194855 }, 0.37494576737715907 } +, { { 1.8591350317001343, 0.09516841173171997, 1.431195616722107 }, 0.5613021315333133 } +, { { -0.14796173572540283, 3.592709541320801, -0.3852364122867584 }, 0.46899416758090073 } +, { { 1.414710521697998, 0.6246669888496399, -0.007774942554533482 }, 0.46243669139995136 } +, { { 1.2696725130081177, -0.39998090267181396, 0.6421909332275391 }, 0.3975097018500365 } +, { { -0.12758338451385498, 1.1125560998916626, 0.7232611775398254 }, 0.672883066503446 } +, { { -0.2525366246700287, 1.0297623872756958, 2.618793249130249 }, 0.8520370623106243 } +}; + +#endif // _EI_CLASSIFIER_ANOMALY_CLUSTERS_H_ diff --git a/ei-model/model-parameters/anomaly_types.h b/ei-model/model-parameters/anomaly_types.h new file mode 100644 index 0000000..be123ba --- /dev/null +++ b/ei-model/model-parameters/anomaly_types.h @@ -0,0 +1,36 @@ +/* Generated by Edge Impulse + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _EI_CLASSIFIER_ANOMALY_TYPES_HEADER_H_ +#define _EI_CLASSIFIER_ANOMALY_TYPES_HEADER_H_ + +#define EI_CLASSIFIER_HAS_ANOMALY 1 + +const uint16_t EI_CLASSIFIER_ANOM_AXIS[] { 0, 11, 22 }; +#define EI_CLASSIFIER_ANOM_AXIS_SIZE 3 +#define EI_CLASSIFIER_ANOM_CLUSTER_COUNT 32 + +typedef struct { +float centroid[EI_CLASSIFIER_ANOM_AXIS_SIZE]; +float max_error; +} ei_classifier_anom_cluster_t; + +#endif // _EI_CLASSIFIER_ANOMALY_TYPES_HEADER_H_ diff --git a/ei-model/model-parameters/dsp_blocks.h b/ei-model/model-parameters/dsp_blocks.h new file mode 100644 index 0000000..620b9a8 --- /dev/null +++ b/ei-model/model-parameters/dsp_blocks.h @@ -0,0 +1,41 @@ +/* Generated by Edge Impulse + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _EI_CLASSIFIER_DSP_BLOCKS_H_ +#define _EI_CLASSIFIER_DSP_BLOCKS_H_ + +#include "model-parameters/model_metadata.h" +#include "model-parameters/model_variables.h" +#include "edge-impulse-sdk/classifier/ei_run_dsp.h" +#include "edge-impulse-sdk/classifier/ei_model_types.h" + +const size_t ei_dsp_blocks_size = 1; +ei_model_dsp_t ei_dsp_blocks[ei_dsp_blocks_size] = { + { // DSP block 3 + 33, + &extract_spectral_analysis_features, + (void*)&ei_dsp_config_3, + ei_dsp_config_3_axes, + ei_dsp_config_3_axes_size + } +}; + +#endif // _EI_CLASSIFIER_DSP_BLOCKS_H_ \ No newline at end of file diff --git a/ei-model/model-parameters/model_metadata.h b/ei-model/model-parameters/model_metadata.h new file mode 100644 index 0000000..71d59b9 --- /dev/null +++ b/ei-model/model-parameters/model_metadata.h @@ -0,0 +1,207 @@ +/* Generated by Edge Impulse + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _EI_CLASSIFIER_MODEL_METADATA_H_ +#define _EI_CLASSIFIER_MODEL_METADATA_H_ + +#include + +#define EI_CLASSIFIER_NONE 255 +#define EI_CLASSIFIER_UTENSOR 1 +#define EI_CLASSIFIER_TFLITE 2 +#define EI_CLASSIFIER_CUBEAI 3 +#define EI_CLASSIFIER_TFLITE_FULL 4 +#define EI_CLASSIFIER_TENSAIFLOW 5 +#define EI_CLASSIFIER_TENSORRT 6 + +#define EI_CLASSIFIER_SENSOR_UNKNOWN -1 +#define EI_CLASSIFIER_SENSOR_MICROPHONE 1 +#define EI_CLASSIFIER_SENSOR_ACCELEROMETER 2 +#define EI_CLASSIFIER_SENSOR_CAMERA 3 +#define EI_CLASSIFIER_SENSOR_9DOF 4 +#define EI_CLASSIFIER_SENSOR_ENVIRONMENTAL 5 +#define EI_CLASSIFIER_SENSOR_FUSION 6 + +// These must match the enum values in TensorFlow Lite's "TfLiteType" +#define EI_CLASSIFIER_DATATYPE_FLOAT32 1 +#define EI_CLASSIFIER_DATATYPE_INT8 9 + +#define EI_CLASSIFIER_PROJECT_ID 110877 +#define EI_CLASSIFIER_PROJECT_OWNER "EdgeImpulse Inc." +#define EI_CLASSIFIER_PROJECT_NAME "Tutorial: Continuous motion recognition" +#define EI_CLASSIFIER_PROJECT_DEPLOY_VERSION 53 +#define EI_CLASSIFIER_NN_INPUT_FRAME_SIZE 33 +#define EI_CLASSIFIER_RAW_SAMPLE_COUNT 125 +#define EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME 3 +#define EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE (EI_CLASSIFIER_RAW_SAMPLE_COUNT * EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME) +#define EI_CLASSIFIER_INPUT_WIDTH 0 +#define EI_CLASSIFIER_INPUT_HEIGHT 0 +#define EI_CLASSIFIER_INPUT_FRAMES 0 +#define EI_CLASSIFIER_INTERVAL_MS 16 +#define EI_CLASSIFIER_LABEL_COUNT 4 +#define EI_CLASSIFIER_HAS_ANOMALY 1 +#define EI_CLASSIFIER_FREQUENCY 62.5 +#define EI_CLASSIFIER_USE_QUANTIZED_DSP_BLOCK 0 +#define EI_CLASSIFIER_HAS_MODEL_VARIABLES 1 + + +#define EI_CLASSIFIER_OBJECT_DETECTION 0 +#define EI_CLASSIFIER_TFLITE_OUTPUT_DATA_TENSOR 0 + + +#define EI_CLASSIFIER_TFLITE_INPUT_DATATYPE EI_CLASSIFIER_DATATYPE_INT8 +#define EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED 1 +#define EI_CLASSIFIER_TFLITE_INPUT_SCALE 0.11322642862796783 +#define EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT -128 +#define EI_CLASSIFIER_TFLITE_OUTPUT_DATATYPE EI_CLASSIFIER_DATATYPE_INT8 +#define EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED 1 +#define EI_CLASSIFIER_TFLITE_OUTPUT_SCALE 0.00390625 +#define EI_CLASSIFIER_TFLITE_OUTPUT_ZEROPOINT -128 + + + +#define EI_CLASSIFIER_INFERENCING_ENGINE EI_CLASSIFIER_TFLITE + + +#define EI_CLASSIFIER_COMPILED 1 +#define EI_CLASSIFIER_HAS_TFLITE_OPS_RESOLVER 1 + + +#define EI_CLASSIFIER_HAS_FFT_INFO 0 + +#define EI_CLASSIFIER_SENSOR EI_CLASSIFIER_SENSOR_ACCELEROMETER +#define EI_CLASSIFIER_FUSION_AXES_STRING "accX + accY + accZ" + +#ifndef EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW +#define EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW 4 +#endif // EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW +#define EI_CLASSIFIER_SLICE_SIZE (EI_CLASSIFIER_RAW_SAMPLE_COUNT / EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW) + +#if EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE && EI_CLASSIFIER_USE_FULL_TFLITE == 1 +#undef EI_CLASSIFIER_INFERENCING_ENGINE +#undef EI_CLASSIFIER_HAS_TFLITE_OPS_RESOLVER +#define EI_CLASSIFIER_INFERENCING_ENGINE EI_CLASSIFIER_TFLITE_FULL +#define EI_CLASSIFIER_HAS_TFLITE_OPS_RESOLVER 0 +#if EI_CLASSIFIER_COMPILED == 1 +#error "Cannot use full TensorFlow Lite with EON" +#endif +#endif // EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE && EI_CLASSIFIER_USE_FULL_TFLITE == 1 + +typedef struct { + uint16_t implementation_version; + int axes; + float scale_axes; + bool average; + bool minimum; + bool maximum; + bool rms; + bool stdev; + bool skewness; + bool kurtosis; +} ei_dsp_config_flatten_t; + +typedef struct { + uint16_t implementation_version; + int axes; + const char * channels; +} ei_dsp_config_image_t; + +typedef struct { + uint16_t implementation_version; + int axes; + int num_cepstral; + float frame_length; + float frame_stride; + int num_filters; + int fft_length; + int win_size; + int low_frequency; + int high_frequency; + float pre_cof; + int pre_shift; +} ei_dsp_config_mfcc_t; + +typedef struct { + uint16_t implementation_version; + int axes; + float frame_length; + float frame_stride; + int num_filters; + int fft_length; + int low_frequency; + int high_frequency; + int win_size; + int noise_floor_db; +} ei_dsp_config_mfe_t; + +typedef struct { + uint16_t implementation_version; + int axes; + float scale_axes; +} ei_dsp_config_raw_t; + +typedef struct { + uint16_t implementation_version; + int axes; + float scale_axes; + const char * filter_type; + float filter_cutoff; + int filter_order; + const char * analysis_type; + int fft_length; + int spectral_peaks_count; + float spectral_peaks_threshold; + const char * spectral_power_edges; + bool do_log; + bool do_fft_overlap; + int wavelet_level; + const char * wavelet; +} ei_dsp_config_spectral_analysis_t; + +typedef struct { + uint16_t implementation_version; + int axes; + float frame_length; + float frame_stride; + int fft_length; + int noise_floor_db; + bool show_axes; +} ei_dsp_config_spectrogram_t; + +typedef struct { + uint16_t implementation_version; + int axes; + float frame_length; + float frame_stride; + int num_filters; + int fft_length; + int low_frequency; + int high_frequency; + float pre_cof; +} ei_dsp_config_audio_syntiant_t; + +typedef struct { + uint16_t implementation_version; + int axes; + bool scaling; +} ei_dsp_config_imu_syntiant_t; + +#endif // _EI_CLASSIFIER_MODEL_METADATA_H_ diff --git a/ei-model/model-parameters/model_variables.h b/ei-model/model-parameters/model_variables.h new file mode 100644 index 0000000..f503baf --- /dev/null +++ b/ei-model/model-parameters/model_variables.h @@ -0,0 +1,59 @@ +/* Generated by Edge Impulse + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _EI_CLASSIFIER_MODEL_VARIABLES_H_ +#define _EI_CLASSIFIER_MODEL_VARIABLES_H_ + +#include +#include "model_metadata.h" +#include "edge-impulse-sdk/classifier/ei_model_types.h" + +const char* ei_classifier_inferencing_categories[] = { "idle", "snake", "updown", "wave" }; + +uint8_t ei_dsp_config_3_axes[] = { 0, 1, 2 }; +const uint32_t ei_dsp_config_3_axes_size = 3; +ei_dsp_config_spectral_analysis_t ei_dsp_config_3 = { + 1, + 3, + 1.0f, + "low", + 3.0f, + 6, + "FFT", + 128, + 3, + 0.1f, + "0.1, 0.5, 1.0, 2.0, 5.0", + true, + false, + 4, + "db4" +}; +const ei_model_performance_calibration_t ei_calibration = { + 1, /* integer version number */ + false, /* Has configured performance calibration */ + (int32_t)(EI_CLASSIFIER_RAW_SAMPLE_COUNT / ((EI_CLASSIFIER_FREQUENCY > 0) ? EI_CLASSIFIER_FREQUENCY : 1)) * 1000, /* Model window */ + 0.8f, /* Default threshold */ + (int32_t)(EI_CLASSIFIER_RAW_SAMPLE_COUNT / ((EI_CLASSIFIER_FREQUENCY > 0) ? EI_CLASSIFIER_FREQUENCY : 1)) * 500, /* Half of model window */ + 0 /* Don't use flags */ +}; + +#endif // _EI_CLASSIFIER_MODEL_METADATA_H_ diff --git a/ei-model/tflite-model/trained_model_compiled.cpp b/ei-model/tflite-model/trained_model_compiled.cpp new file mode 100644 index 0000000..50d51c4 --- /dev/null +++ b/ei-model/tflite-model/trained_model_compiled.cpp @@ -0,0 +1,465 @@ +/* Generated by Edge Impulse + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +// Generated on: 07.06.2022 07:54:23 + +#include +#include +#include +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_mutable_op_resolver.h" +#include "edge-impulse-sdk/porting/ei_classifier_porting.h" + +#if EI_CLASSIFIER_PRINT_STATE +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" { + extern void ei_printf(const char *format, ...); +} +#else +extern void ei_printf(const char *format, ...); +#endif +#endif + +#if defined __GNUC__ +#define ALIGN(X) __attribute__((aligned(X))) +#elif defined _MSC_VER +#define ALIGN(X) __declspec(align(X)) +#elif defined __TASKING__ +#define ALIGN(X) __align(X) +#endif + +using namespace tflite; +using namespace tflite::ops; +using namespace tflite::ops::micro; + +namespace { + +constexpr int kTensorArenaSize = 272; + +#if defined(EI_CLASSIFIER_ALLOCATION_STATIC) +uint8_t tensor_arena[kTensorArenaSize] ALIGN(16); +#elif defined(EI_CLASSIFIER_ALLOCATION_STATIC_HIMAX) +#pragma Bss(".tensor_arena") +uint8_t tensor_arena[kTensorArenaSize] ALIGN(16); +#pragma Bss() +#elif defined(EI_CLASSIFIER_ALLOCATION_STATIC_HIMAX_GNU) +uint8_t tensor_arena[kTensorArenaSize] ALIGN(16) __attribute__((section(".tensor_arena"))); +#else +#define EI_CLASSIFIER_ALLOCATION_HEAP 1 +uint8_t* tensor_arena = NULL; +#endif + +static uint8_t* tensor_boundary; +static uint8_t* current_location; + +template struct TfArray { + int sz; T elem[SZ]; +}; +enum used_operators_e { + OP_FULLY_CONNECTED, OP_SOFTMAX, OP_LAST +}; +struct TensorInfo_t { // subset of TfLiteTensor used for initialization from constant memory + TfLiteAllocationType allocation_type; + TfLiteType type; + void* data; + TfLiteIntArray* dims; + size_t bytes; + TfLiteQuantization quantization; +}; +struct NodeInfo_t { // subset of TfLiteNode used for initialization from constant memory + struct TfLiteIntArray* inputs; + struct TfLiteIntArray* outputs; + void* builtin_data; + used_operators_e used_op_index; +}; + +TfLiteContext ctx{}; +TfLiteTensor tflTensors[11]; +TfLiteEvalTensor tflEvalTensors[11]; +TfLiteRegistration registrations[OP_LAST]; +TfLiteNode tflNodes[4]; + +const TfArray<2, int> tensor_dimension0 = { 2, { 1,33 } }; +const TfArray<1, float> quant0_scale = { 1, { 0.11322642862796783, } }; +const TfArray<1, int> quant0_zero = { 1, { -128 } }; +const TfLiteAffineQuantization quant0 = { (TfLiteFloatArray*)&quant0_scale, (TfLiteIntArray*)&quant0_zero, 0 }; +const ALIGN(16) int32_t tensor_data1[20] = { -3, -62, 1031, -127, 977, 282, -31, 1546, -41, -9, 311, -75, 817, 25, -82, 1116, -23, -92, 119, -3, }; +const TfArray<1, int> tensor_dimension1 = { 1, { 20 } }; +const TfArray<1, float> quant1_scale = { 1, { 0.0006838862900622189, } }; +const TfArray<1, int> quant1_zero = { 1, { 0 } }; +const TfLiteAffineQuantization quant1 = { (TfLiteFloatArray*)&quant1_scale, (TfLiteIntArray*)&quant1_zero, 0 }; +const ALIGN(16) int32_t tensor_data2[10] = { 321, -5, -29, 344, 24, -32, -75, -25, -30, 35, }; +const TfArray<1, int> tensor_dimension2 = { 1, { 10 } }; +const TfArray<1, float> quant2_scale = { 1, { 0.001575543312355876, } }; +const TfArray<1, int> quant2_zero = { 1, { 0 } }; +const TfLiteAffineQuantization quant2 = { (TfLiteFloatArray*)&quant2_scale, (TfLiteIntArray*)&quant2_zero, 0 }; +const ALIGN(16) int32_t tensor_data3[4] = { 396, -238, -36, 3, }; +const TfArray<1, int> tensor_dimension3 = { 1, { 4 } }; +const TfArray<1, float> quant3_scale = { 1, { 0.0013353734975680709, } }; +const TfArray<1, int> quant3_zero = { 1, { 0 } }; +const TfLiteAffineQuantization quant3 = { (TfLiteFloatArray*)&quant3_scale, (TfLiteIntArray*)&quant3_zero, 0 }; +const ALIGN(16) int8_t tensor_data4[20*33] = { + -13, 4, -3, 31, -4, 34, -41, -40, -33, -2, 69, -14, -13, 60, 57, 50, 60, 6, 105, -16, 26, -70, 78, -32, -6, 9, -31, 40, 0, 19, -5, -40, 19, + 1, 6, -7, -28, 3, 44, 28, 38, -50, 30, 22, -32, -19, -53, -38, -28, -26, -52, 34, 19, -10, -36, -50, -37, 11, -38, 7, 18, -35, -3, 40, 45, -59, + 15, 5, -41, 51, 50, 7, -21, 32, -37, 2, -22, 13, 34, 15, 65, 24, -4, 6, -30, -77, 58, 60, -44, 74, -42, 61, 12, -5, -28, -118, -100, 62, -57, + -64, -31, -54, 9, 3, 7, -48, 1, 2, 38, -8, -58, -8, 14, -9, -38, -21, -20, 45, 26, -62, -43, 42, -19, -21, 34, -6, -25, 36, 0, -12, 26, -6, + -4, 63, -45, 61, 46, 35, -16, -31, -15, -5, -16, 9, 53, -2, -17, 63, 11, 12, -22, -21, -41, 12, -18, 47, -2, -3, 39, 18, 19, -48, -29, -16, -42, + 55, -5, 17, 5, 28, 34, 32, 47, 25, -11, 6, -6, 75, 0, 48, 27, -10, 23, -3, 79, 49, -20, -16, 16, -45, 21, 32, -1, 44, 2, 12, 55, 19, + -46, 47, -12, -23, -25, 11, 13, -16, -31, 25, -50, -46, -58, -44, 44, 9, -16, 23, 10, -24, 5, -12, 46, -45, 10, -3, -31, 5, 10, -20, 20, -20, -39, + -15, 26, -35, -21, -50, 42, 4, -49, 48, -39, 37, -33, -58, -32, -40, -5, 2, -41, 0, -14, -23, -42, -40, -15, -5, -52, 40, 12, 20, 38, -45, 50, -45, + 22, 14, 25, -50, 36, 21, -18, -4, 27, -40, -51, 42, 0, -33, -55, -40, 11, -20, 24, -14, -5, -26, -8, -30, -20, 46, -46, 11, 46, 5, -9, 24, -22, + 9, 37, 18, 42, -50, 28, -5, -17, 32, 1, 15, -11, -24, 2, 28, -41, 18, 63, 126, -18, 20, -62, 27, 44, 73, -6, 1, -13, 82, 91, 8, 26, 13, + -20, -43, -19, -3, 43, 18, 24, -4, 13, -12, 5, 39, -4, 38, 21, -51, -25, 61, 66, -14, -85, -45, 73, -63, 13, 64, -10, 49, 54, 16, 92, 5, -30, + 39, 23, -9, -18, -2, 44, 4, 32, 36, 44, -18, 36, -1, -46, -42, 15, -1, 22, 21, 31, 27, -30, -13, 6, 28, -47, -20, 33, -44, 18, -55, 12, -45, + 9, 13, -3, -27, 46, -13, 33, -28, -22, 32, -24, -16, -48, 60, 50, -24, 27, 12, -22, -19, -90, 15, 61, -15, -33, 5, -45, 12, 39, -12, 41, -13, -24, + -3, 24, -25, 37, -1, -23, 29, -16, 8, 28, 41, 38, 38, -32, -2, 11, -43, -39, -11, 21, -72, -25, 18, -20, -57, 52, 24, 2, 37, -43, -62, 46, 6, + -20, -44, 69, -15, 27, 5, 23, 39, 18, 2, -25, 3, -52, -4, 12, -23, -21, -8, 43, 13, 65, 13, -22, 18, 40, 16, -25, 6, 1, 30, 25, 3, 54, + -53, 73, -44, 14, -3, 16, -38, -77, -49, 46, -66, 21, 19, -18, 64, 3, 16, 39, -37, 22, 11, -1, 6, -19, -32, -27, 22, 21, -21, -73, -91, -15, -48, + -45, 46, 30, 6, -6, -4, -9, -30, -9, 17, 0, -49, -34, -53, -38, 22, 34, 22, -51, -10, -29, -54, -28, 39, 31, 19, -16, 2, 50, 46, 1, -13, -9, + -33, -8, -60, -58, 41, -30, 2, 36, -40, -1, -4, 16, 1, -24, -19, -24, -6, -17, -36, 37, -57, -20, 21, -55, 38, -17, -48, 44, 23, 19, 23, 0, 1, + -15, 75, 58, 97, 68, 45, 36, -67, -43, -39, -65, 1, 127, 30, 48, 99, 21, 39, -1, -33, -21, -32, 31, 111, 12, 20, 72, 28, 34, 36, 63, 42, 60, + 21, 54, 73, 31, 50, -2, 20, -15, 85, 80, 28, 67, 38, 8, 51, 43, 23, -55, -37, 13, 52, 77, 35, -7, 24, 36, -1, 51, -53, -74, -68, -45, 54, +}; +const TfArray<2, int> tensor_dimension4 = { 2, { 20,33 } }; +const TfArray<1, float> quant4_scale = { 1, { 0.006039988249540329, } }; +const TfArray<1, int> quant4_zero = { 1, { 0 } }; +const TfLiteAffineQuantization quant4 = { (TfLiteFloatArray*)&quant4_scale, (TfLiteIntArray*)&quant4_zero, 0 }; +const ALIGN(16) int8_t tensor_data5[10*20] = { + -7, -1, 46, 4, 65, -10, -19, 92, -36, -40, -16, -30, 42, 34, -35, 32, -36, -1, 11, 5, + -22, 5, -17, 13, 8, 3, 23, -15, -12, -25, 5, -27, -9, -10, -27, -29, -14, 22, 16, -30, + -30, 25, 6, -19, 20, 1, 12, -16, 26, 21, -24, -16, 7, -32, 10, -12, -9, 30, -33, -13, + -53, -9, 41, -27, 47, 5, 24, 127, 26, -52, 29, 10, 50, -28, -11, 50, 6, 10, -35, -38, + 31, 11, 0, -17, -7, 31, 20, -30, -26, 55, 31, -42, 1, 18, -7, 2, 24, -7, 68, 13, + 22, 25, -23, 6, -23, -16, 4, -11, 29, -39, -38, 28, -28, 13, -24, 23, -27, 1, -38, 18, + 29, -12, 28, 25, 49, -15, -9, -31, 11, 21, -16, 15, -22, 1, -14, 5, 11, -13, 41, -18, + 3, 27, -24, -28, 5, -15, 2, -33, 23, 24, 49, 25, 3, -5, -16, -28, -14, 16, -4, 8, + -18, 18, 26, 18, 53, -10, 18, -76, -10, -28, 21, 20, 31, 31, -12, 9, -28, 32, 27, -3, + -18, 17, 41, 23, -18, 42, 19, -29, 43, -10, -23, 25, -37, -5, -9, 34, 31, 27, 8, 45, +}; +const TfArray<2, int> tensor_dimension5 = { 2, { 10,20 } }; +const TfArray<1, float> quant5_scale = { 1, { 0.012687418609857559, } }; +const TfArray<1, int> quant5_zero = { 1, { 0 } }; +const TfLiteAffineQuantization quant5 = { (TfLiteFloatArray*)&quant5_scale, (TfLiteIntArray*)&quant5_zero, 0 }; +const ALIGN(16) int8_t tensor_data6[4*10] = { + 52, -6, -3, 104, -51, -19, -93, -4, 0, -7, + 20, -6, 41, -25, -3, 13, 40, -34, 33, 30, + -127, -50, -35, -6, 50, 24, 13, 21, 11, -39, + -103, 46, 35, -6, 9, 0, -6, -44, -53, 75, +}; +const TfArray<2, int> tensor_dimension6 = { 2, { 4,10 } }; +const TfArray<1, float> quant6_scale = { 1, { 0.011534213088452816, } }; +const TfArray<1, int> quant6_zero = { 1, { 0 } }; +const TfLiteAffineQuantization quant6 = { (TfLiteFloatArray*)&quant6_scale, (TfLiteIntArray*)&quant6_zero, 0 }; +const TfArray<2, int> tensor_dimension7 = { 2, { 1,20 } }; +const TfArray<1, float> quant7_scale = { 1, { 0.12418155372142792, } }; +const TfArray<1, int> quant7_zero = { 1, { -128 } }; +const TfLiteAffineQuantization quant7 = { (TfLiteFloatArray*)&quant7_scale, (TfLiteIntArray*)&quant7_zero, 0 }; +const TfArray<2, int> tensor_dimension8 = { 2, { 1,10 } }; +const TfArray<1, float> quant8_scale = { 1, { 0.11577499657869339, } }; +const TfArray<1, int> quant8_zero = { 1, { -128 } }; +const TfLiteAffineQuantization quant8 = { (TfLiteFloatArray*)&quant8_scale, (TfLiteIntArray*)&quant8_zero, 0 }; +const TfArray<2, int> tensor_dimension9 = { 2, { 1,4 } }; +const TfArray<1, float> quant9_scale = { 1, { 0.18887442350387573, } }; +const TfArray<1, int> quant9_zero = { 1, { 9 } }; +const TfLiteAffineQuantization quant9 = { (TfLiteFloatArray*)&quant9_scale, (TfLiteIntArray*)&quant9_zero, 0 }; +const TfArray<2, int> tensor_dimension10 = { 2, { 1,4 } }; +const TfArray<1, float> quant10_scale = { 1, { 0.00390625, } }; +const TfArray<1, int> quant10_zero = { 1, { -128 } }; +const TfLiteAffineQuantization quant10 = { (TfLiteFloatArray*)&quant10_scale, (TfLiteIntArray*)&quant10_zero, 0 }; +const TfLiteFullyConnectedParams opdata0 = { kTfLiteActRelu, kTfLiteFullyConnectedWeightsFormatDefault, false, false }; +const TfArray<3, int> inputs0 = { 3, { 0,4,1 } }; +const TfArray<1, int> outputs0 = { 1, { 7 } }; +const TfLiteFullyConnectedParams opdata1 = { kTfLiteActRelu, kTfLiteFullyConnectedWeightsFormatDefault, false, false }; +const TfArray<3, int> inputs1 = { 3, { 7,5,2 } }; +const TfArray<1, int> outputs1 = { 1, { 8 } }; +const TfLiteFullyConnectedParams opdata2 = { kTfLiteActNone, kTfLiteFullyConnectedWeightsFormatDefault, false, false }; +const TfArray<3, int> inputs2 = { 3, { 8,6,3 } }; +const TfArray<1, int> outputs2 = { 1, { 9 } }; +const TfLiteSoftmaxParams opdata3 = { 1 }; +const TfArray<1, int> inputs3 = { 1, { 9 } }; +const TfArray<1, int> outputs3 = { 1, { 10 } }; +const TensorInfo_t tensorData[] = { + { kTfLiteArenaRw, kTfLiteInt8, tensor_arena + 0, (TfLiteIntArray*)&tensor_dimension0, 33, {kTfLiteAffineQuantization, const_cast(static_cast(&quant0))}, }, + { kTfLiteMmapRo, kTfLiteInt32, (void*)tensor_data1, (TfLiteIntArray*)&tensor_dimension1, 80, {kTfLiteAffineQuantization, const_cast(static_cast(&quant1))}, }, + { kTfLiteMmapRo, kTfLiteInt32, (void*)tensor_data2, (TfLiteIntArray*)&tensor_dimension2, 40, {kTfLiteAffineQuantization, const_cast(static_cast(&quant2))}, }, + { kTfLiteMmapRo, kTfLiteInt32, (void*)tensor_data3, (TfLiteIntArray*)&tensor_dimension3, 16, {kTfLiteAffineQuantization, const_cast(static_cast(&quant3))}, }, + { kTfLiteMmapRo, kTfLiteInt8, (void*)tensor_data4, (TfLiteIntArray*)&tensor_dimension4, 660, {kTfLiteAffineQuantization, const_cast(static_cast(&quant4))}, }, + { kTfLiteMmapRo, kTfLiteInt8, (void*)tensor_data5, (TfLiteIntArray*)&tensor_dimension5, 200, {kTfLiteAffineQuantization, const_cast(static_cast(&quant5))}, }, + { kTfLiteMmapRo, kTfLiteInt8, (void*)tensor_data6, (TfLiteIntArray*)&tensor_dimension6, 40, {kTfLiteAffineQuantization, const_cast(static_cast(&quant6))}, }, + { kTfLiteArenaRw, kTfLiteInt8, tensor_arena + 48, (TfLiteIntArray*)&tensor_dimension7, 20, {kTfLiteAffineQuantization, const_cast(static_cast(&quant7))}, }, + { kTfLiteArenaRw, kTfLiteInt8, tensor_arena + 0, (TfLiteIntArray*)&tensor_dimension8, 10, {kTfLiteAffineQuantization, const_cast(static_cast(&quant8))}, }, + { kTfLiteArenaRw, kTfLiteInt8, tensor_arena + 16, (TfLiteIntArray*)&tensor_dimension9, 4, {kTfLiteAffineQuantization, const_cast(static_cast(&quant9))}, }, + { kTfLiteArenaRw, kTfLiteInt8, tensor_arena + 0, (TfLiteIntArray*)&tensor_dimension10, 4, {kTfLiteAffineQuantization, const_cast(static_cast(&quant10))}, }, +};const NodeInfo_t nodeData[] = { + { (TfLiteIntArray*)&inputs0, (TfLiteIntArray*)&outputs0, const_cast(static_cast(&opdata0)), OP_FULLY_CONNECTED, }, + { (TfLiteIntArray*)&inputs1, (TfLiteIntArray*)&outputs1, const_cast(static_cast(&opdata1)), OP_FULLY_CONNECTED, }, + { (TfLiteIntArray*)&inputs2, (TfLiteIntArray*)&outputs2, const_cast(static_cast(&opdata2)), OP_FULLY_CONNECTED, }, + { (TfLiteIntArray*)&inputs3, (TfLiteIntArray*)&outputs3, const_cast(static_cast(&opdata3)), OP_SOFTMAX, }, +}; +static std::vector overflow_buffers; +static void * AllocatePersistentBuffer(struct TfLiteContext* ctx, + size_t bytes) { + void *ptr; + if (current_location - bytes < tensor_boundary) { + // OK, this will look super weird, but.... we have CMSIS-NN buffers which + // we cannot calculate beforehand easily. + ptr = ei_calloc(bytes, 1); + if (ptr == NULL) { + printf("ERR: Failed to allocate persistent buffer of size %d\n", (int)bytes); + return NULL; + } + overflow_buffers.push_back(ptr); + return ptr; + } + + current_location -= bytes; + + ptr = current_location; + memset(ptr, 0, bytes); + + return ptr; +} +typedef struct { + size_t bytes; + void *ptr; +} scratch_buffer_t; +static std::vector scratch_buffers; + +static TfLiteStatus RequestScratchBufferInArena(struct TfLiteContext* ctx, size_t bytes, + int* buffer_idx) { + scratch_buffer_t b; + b.bytes = bytes; + + b.ptr = AllocatePersistentBuffer(ctx, b.bytes); + if (!b.ptr) { + return kTfLiteError; + } + + scratch_buffers.push_back(b); + + *buffer_idx = scratch_buffers.size() - 1; + + return kTfLiteOk; +} + +static void* GetScratchBuffer(struct TfLiteContext* ctx, int buffer_idx) { + if (buffer_idx > static_cast(scratch_buffers.size()) - 1) { + return NULL; + } + return scratch_buffers[buffer_idx].ptr; +} + +static TfLiteTensor* GetTensor(const struct TfLiteContext* context, + int tensor_idx) { + return &tflTensors[tensor_idx]; +} + +static TfLiteEvalTensor* GetEvalTensor(const struct TfLiteContext* context, + int tensor_idx) { + return &tflEvalTensors[tensor_idx]; +} + +} // namespace + +TfLiteStatus trained_model_init( void*(*alloc_fnc)(size_t,size_t) ) { +#ifdef EI_CLASSIFIER_ALLOCATION_HEAP + tensor_arena = (uint8_t*) alloc_fnc(16, kTensorArenaSize); + if (!tensor_arena) { + printf("ERR: failed to allocate tensor arena\n"); + return kTfLiteError; + } +#else + memset(tensor_arena, 0, kTensorArenaSize); +#endif + tensor_boundary = tensor_arena; + current_location = tensor_arena + kTensorArenaSize; + ctx.AllocatePersistentBuffer = &AllocatePersistentBuffer; + ctx.RequestScratchBufferInArena = &RequestScratchBufferInArena; + ctx.GetScratchBuffer = &GetScratchBuffer; + ctx.GetTensor = &GetTensor; + ctx.GetEvalTensor = &GetEvalTensor; + ctx.tensors = tflTensors; + ctx.tensors_size = 11; + for(size_t i = 0; i < 11; ++i) { + tflTensors[i].type = tensorData[i].type; + tflEvalTensors[i].type = tensorData[i].type; + tflTensors[i].is_variable = 0; + +#if defined(EI_CLASSIFIER_ALLOCATION_HEAP) + tflTensors[i].allocation_type = tensorData[i].allocation_type; +#else + tflTensors[i].allocation_type = (tensor_arena <= tensorData[i].data && tensorData[i].data < tensor_arena + kTensorArenaSize) ? kTfLiteArenaRw : kTfLiteMmapRo; +#endif + tflTensors[i].bytes = tensorData[i].bytes; + tflTensors[i].dims = tensorData[i].dims; + tflEvalTensors[i].dims = tensorData[i].dims; + +#if defined(EI_CLASSIFIER_ALLOCATION_HEAP) + if(tflTensors[i].allocation_type == kTfLiteArenaRw){ + uint8_t* start = (uint8_t*) ((uintptr_t)tensorData[i].data + (uintptr_t) tensor_arena); + + tflTensors[i].data.data = start; + tflEvalTensors[i].data.data = start; + } + else{ + tflTensors[i].data.data = tensorData[i].data; + tflEvalTensors[i].data.data = tensorData[i].data; + } +#else + tflTensors[i].data.data = tensorData[i].data; + tflEvalTensors[i].data.data = tensorData[i].data; +#endif // EI_CLASSIFIER_ALLOCATION_HEAP + tflTensors[i].quantization = tensorData[i].quantization; + if (tflTensors[i].quantization.type == kTfLiteAffineQuantization) { + TfLiteAffineQuantization const* quant = ((TfLiteAffineQuantization const*)(tensorData[i].quantization.params)); + tflTensors[i].params.scale = quant->scale->data[0]; + tflTensors[i].params.zero_point = quant->zero_point->data[0]; + } + if (tflTensors[i].allocation_type == kTfLiteArenaRw) { + auto data_end_ptr = (uint8_t*)tflTensors[i].data.data + tensorData[i].bytes; + if (data_end_ptr > tensor_boundary) { + tensor_boundary = data_end_ptr; + } + } + } + if (tensor_boundary > current_location /* end of arena size */) { + printf("ERR: tensor arena is too small, does not fit model - even without scratch buffers\n"); + return kTfLiteError; + } + registrations[OP_FULLY_CONNECTED] = Register_FULLY_CONNECTED(); + registrations[OP_SOFTMAX] = Register_SOFTMAX(); + + for(size_t i = 0; i < 4; ++i) { + tflNodes[i].inputs = nodeData[i].inputs; + tflNodes[i].outputs = nodeData[i].outputs; + tflNodes[i].builtin_data = nodeData[i].builtin_data; +tflNodes[i].custom_initial_data = nullptr; + tflNodes[i].custom_initial_data_size = 0; +if (registrations[nodeData[i].used_op_index].init) { + tflNodes[i].user_data = registrations[nodeData[i].used_op_index].init(&ctx, (const char*)tflNodes[i].builtin_data, 0); + } + } + for(size_t i = 0; i < 4; ++i) { + if (registrations[nodeData[i].used_op_index].prepare) { + TfLiteStatus status = registrations[nodeData[i].used_op_index].prepare(&ctx, &tflNodes[i]); + if (status != kTfLiteOk) { + return status; + } + } + } + return kTfLiteOk; +} + +static const int inTensorIndices[] = { + 0, +}; +TfLiteTensor* trained_model_input(int index) { + return &ctx.tensors[inTensorIndices[index]]; +} + +static const int outTensorIndices[] = { + 10, +}; +TfLiteTensor* trained_model_output(int index) { + return &ctx.tensors[outTensorIndices[index]]; +} + +TfLiteStatus trained_model_invoke() { + for(size_t i = 0; i < 4; ++i) { + TfLiteStatus status = registrations[nodeData[i].used_op_index].invoke(&ctx, &tflNodes[i]); + +#if EI_CLASSIFIER_PRINT_STATE + ei_printf("layer %lu\n", i); + ei_printf(" inputs:\n"); + for (size_t ix = 0; ix < tflNodes[i].inputs->size; ix++) { + auto d = tensorData[tflNodes[i].inputs->data[ix]]; + + size_t data_ptr = (size_t)d.data; + + if (d.allocation_type == kTfLiteArenaRw) { + data_ptr = (size_t)tensor_arena + data_ptr; + } + + if (d.type == TfLiteType::kTfLiteInt8) { + int8_t* data = (int8_t*)data_ptr; + ei_printf(" %lu (%zu bytes, ptr=%p, alloc_type=%d, type=%d): ", ix, d.bytes, data, (int)d.allocation_type, (int)d.type); + for (size_t jx = 0; jx < d.bytes; jx++) { + ei_printf("%d ", data[jx]); + } + } + else { + float* data = (float*)data_ptr; + ei_printf(" %lu (%zu bytes, ptr=%p, alloc_type=%d, type=%d): ", ix, d.bytes, data, (int)d.allocation_type, (int)d.type); + for (size_t jx = 0; jx < d.bytes / 4; jx++) { + ei_printf("%f ", data[jx]); + } + } + ei_printf("\n"); + } + ei_printf("\n"); + + ei_printf(" outputs:\n"); + for (size_t ix = 0; ix < tflNodes[i].outputs->size; ix++) { + auto d = tensorData[tflNodes[i].outputs->data[ix]]; + + size_t data_ptr = (size_t)d.data; + + if (d.allocation_type == kTfLiteArenaRw) { + data_ptr = (size_t)tensor_arena + data_ptr; + } + + if (d.type == TfLiteType::kTfLiteInt8) { + int8_t* data = (int8_t*)data_ptr; + ei_printf(" %lu (%zu bytes, ptr=%p, alloc_type=%d, type=%d): ", ix, d.bytes, data, (int)d.allocation_type, (int)d.type); + for (size_t jx = 0; jx < d.bytes; jx++) { + ei_printf("%d ", data[jx]); + } + } + else { + float* data = (float*)data_ptr; + ei_printf(" %lu (%zu bytes, ptr=%p, alloc_type=%d, type=%d): ", ix, d.bytes, data, (int)d.allocation_type, (int)d.type); + for (size_t jx = 0; jx < d.bytes / 4; jx++) { + ei_printf("%f ", data[jx]); + } + } + ei_printf("\n"); + } + ei_printf("\n"); +#endif // EI_CLASSIFIER_PRINT_STATE + + if (status != kTfLiteOk) { + return status; + } + } + return kTfLiteOk; +} + +TfLiteStatus trained_model_reset( void (*free_fnc)(void* ptr) ) { +#ifdef EI_CLASSIFIER_ALLOCATION_HEAP + free_fnc(tensor_arena); +#endif + scratch_buffers.clear(); + for (size_t ix = 0; ix < overflow_buffers.size(); ix++) { + free(overflow_buffers[ix]); + } + overflow_buffers.clear(); + return kTfLiteOk; +} diff --git a/ei-model/tflite-model/trained_model_compiled.h b/ei-model/tflite-model/trained_model_compiled.h new file mode 100644 index 0000000..ce93743 --- /dev/null +++ b/ei-model/tflite-model/trained_model_compiled.h @@ -0,0 +1,75 @@ +/* Generated by Edge Impulse + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +// Generated on: 07.06.2022 07:54:23 + +#ifndef trained_model_GEN_H +#define trained_model_GEN_H + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" + +// Sets up the model with init and prepare steps. +TfLiteStatus trained_model_init( void*(*alloc_fnc)(size_t,size_t) ); +// Returns the input tensor with the given index. +TfLiteTensor *trained_model_input(int index); +// Returns the output tensor with the given index. +TfLiteTensor *trained_model_output(int index); +// Runs inference for the model. +TfLiteStatus trained_model_invoke(); +//Frees memory allocated +TfLiteStatus trained_model_reset( void (*free)(void* ptr) ); + + +// Returns the number of input tensors. +inline size_t trained_model_inputs() { + return 1; +} +// Returns the number of output tensors. +inline size_t trained_model_outputs() { + return 1; +} + +inline void *trained_model_input_ptr(int index) { + return trained_model_input(index)->data.data; +} +inline size_t trained_model_input_size(int index) { + return trained_model_input(index)->bytes; +} +inline int trained_model_input_dims_len(int index) { + return trained_model_input(index)->dims->data[0]; +} +inline int *trained_model_input_dims(int index) { + return &trained_model_input(index)->dims->data[1]; +} + +inline void *trained_model_output_ptr(int index) { + return trained_model_output(index)->data.data; +} +inline size_t trained_model_output_size(int index) { + return trained_model_output(index)->bytes; +} +inline int trained_model_output_dims_len(int index) { + return trained_model_output(index)->dims->data[0]; +} +inline int *trained_model_output_dims(int index) { + return &trained_model_output(index)->dims->data[1]; +} + +#endif