-
Notifications
You must be signed in to change notification settings - Fork 20
/
ei_run_classifier.h
1080 lines (954 loc) · 44.5 KB
/
ei_run_classifier.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2022 EdgeImpulse Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _EDGE_IMPULSE_RUN_CLASSIFIER_H_
#define _EDGE_IMPULSE_RUN_CLASSIFIER_H_
#include "ei_model_types.h"
#include "model-parameters/model_metadata.h"
#include "ei_run_dsp.h"
#include "ei_classifier_types.h"
#include "ei_signal_with_axes.h"
#include "postprocessing/ei_postprocessing.h"
#include "edge-impulse-sdk/porting/ei_classifier_porting.h"
#include "edge-impulse-sdk/porting/ei_logging.h"
#include <memory>
#if EI_CLASSIFIER_HAS_ANOMALY
#include "inferencing_engines/anomaly.h"
#endif
#if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1
#include "ei_sampler.h"
#endif
#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) && (EI_CLASSIFIER_COMPILED != 1)
#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_micro.h"
#elif EI_CLASSIFIER_COMPILED == 1
#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_eon.h"
#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL
#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_full.h"
#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_TIDL
#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_tidl.h"
#elif (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSORRT)
#include "edge-impulse-sdk/classifier/inferencing_engines/tensorrt.h"
#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW
#include "edge-impulse-sdk/classifier/inferencing_engines/tensaiflow.h"
#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI
#include "edge-impulse-sdk/classifier/inferencing_engines/drpai.h"
#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_AKIDA
#include "edge-impulse-sdk/classifier/inferencing_engines/akida.h"
#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_ONNX_TIDL
#include "edge-impulse-sdk/classifier/inferencing_engines/onnx_tidl.h"
#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_MEMRYX
#include "edge-impulse-sdk/classifier/inferencing_engines/memryx.h"
#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_ETHOS_LINUX
#include "edge-impulse-sdk/classifier/inferencing_engines/ethos_linux.h"
#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_NONE
// noop
#else
#error "Unknown inferencing engine"
#endif
// This file has an implicit dependency on ei_run_dsp.h, so must come after that include!
#include "model-parameters/model_variables.h"
#ifdef __cplusplus
namespace {
#endif // __cplusplus
/* Function prototypes ----------------------------------------------------- */
extern "C" EI_IMPULSE_ERROR run_inference(ei_impulse_handle_t *handle, ei_feature_t *fmatrix, ei_impulse_result_t *result, bool debug);
extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized(const ei_impulse_t *impulse, signal_t *signal, ei_impulse_result_t *result, bool debug);
static EI_IMPULSE_ERROR can_run_classifier_image_quantized(const ei_impulse_t *impulse, ei_learning_block_t block_ptr);
#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
EI_IMPULSE_ERROR ei_scale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix);
EI_IMPULSE_ERROR ei_unscale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix);
#endif // EI_CLASSIFIER_LOAD_IMAGE_SCALING
/* Private variables ------------------------------------------------------- */
static uint64_t classifier_continuous_features_written = 0;
/* Private functions ------------------------------------------------------- */
/* These functions (up to Public functions section) are not exposed to end-user,
therefore changes are allowed. */
/**
* @brief Display the results of the inference
*
* @param result The result
*/
__attribute__((unused)) void display_results(ei_impulse_result_t* result)
{
// print the predictions
ei_printf("Predictions (DSP: %d ms., Classification: %d ms., Anomaly: %d ms.): \n",
result->timing.dsp, result->timing.classification, result->timing.anomaly);
#if EI_CLASSIFIER_OBJECT_DETECTION == 1
ei_printf("#Object detection results:\r\n");
bool bb_found = result->bounding_boxes[0].value > 0;
for (size_t ix = 0; ix < result->bounding_boxes_count; ix++) {
auto bb = result->bounding_boxes[ix];
if (bb.value == 0) {
continue;
}
ei_printf(" %s (", bb.label);
ei_printf_float(bb.value);
ei_printf(") [ x: %u, y: %u, width: %u, height: %u ]\n", bb.x, bb.y, bb.width, bb.height);
}
if (!bb_found) {
ei_printf(" No objects found\n");
}
#elif (EI_CLASSIFIER_LABEL_COUNT == 1) && (!EI_CLASSIFIER_HAS_ANOMALY)// regression
ei_printf("#Regression results:\r\n");
ei_printf(" %s: ", result->classification[0].label);
ei_printf_float(result->classification[0].value);
ei_printf("\n");
#elif EI_CLASSIFIER_LABEL_COUNT > 1 // if there is only one label, this is an anomaly only
ei_printf("#Classification results:\r\n");
for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) {
ei_printf(" %s: ", result->classification[ix].label);
ei_printf_float(result->classification[ix].value);
ei_printf("\n");
}
#endif
#if EI_CLASSIFIER_HAS_ANOMALY == 3 // visual AD
ei_printf("#Visual anomaly grid results:\r\n");
for (uint32_t i = 0; i < result->visual_ad_count; i++) {
ei_impulse_result_bounding_box_t bb = result->visual_ad_grid_cells[i];
if (bb.value == 0) {
continue;
}
ei_printf(" %s (", bb.label);
ei_printf_float(bb.value);
ei_printf(") [ x: %u, y: %u, width: %u, height: %u ]\n", bb.x, bb.y, bb.width, bb.height);
}
ei_printf("Visual anomaly values: Mean ");
ei_printf_float(result->visual_ad_result.mean_value);
ei_printf(" Max ");
ei_printf_float(result->visual_ad_result.max_value);
ei_printf("\r\n");
#elif (EI_CLASSIFIER_HAS_ANOMALY > 0) // except for visual AD
ei_printf("Anomaly prediction: ");
ei_printf_float(result->anomaly);
ei_printf("\r\n");
#endif
}
/**
* @brief Do inferencing over the processed feature matrix
*
* @param impulse struct with information about model and DSP
* @param fmatrix Processed matrix
* @param result Output classifier results
* @param[in] debug Debug output enable
*
* @return The ei impulse error.
*/
extern "C" EI_IMPULSE_ERROR run_inference(
ei_impulse_handle_t *handle,
ei_feature_t *fmatrix,
ei_impulse_result_t *result,
bool debug = false)
{
auto& impulse = handle->impulse;
for (size_t ix = 0; ix < impulse->learning_blocks_size; ix++) {
ei_learning_block_t block = impulse->learning_blocks[ix];
#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
// we do not plan to have multiple dsp blocks with image
// so just apply scaling to the first one
EI_IMPULSE_ERROR scale_res = ei_scale_fmatrix(&block, fmatrix[0].matrix);
if (scale_res != EI_IMPULSE_OK) {
return scale_res;
}
#endif
result->copy_output = block.keep_output;
EI_IMPULSE_ERROR res = block.infer_fn(impulse, fmatrix, ix, (uint32_t*)block.input_block_ids, block.input_block_ids_size, result, block.config, debug);
if (res != EI_IMPULSE_OK) {
return res;
}
#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
// undo scaling
scale_res = ei_unscale_fmatrix(&block, fmatrix[0].matrix);
if (scale_res != EI_IMPULSE_OK) {
return scale_res;
}
#endif
}
if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) {
return EI_IMPULSE_CANCELED;
}
return EI_IMPULSE_OK;
}
/**
* @brief Process a complete impulse
*
* @param impulse struct with information about model and DSP
* @param signal Sample data
* @param result Output classifier results
* @param handle Handle from open_impulse. nullptr for backward compatibility
* @param[in] debug Debug output enable
*
* @return The ei impulse error.
*/
extern "C" EI_IMPULSE_ERROR process_impulse(ei_impulse_handle_t *handle,
signal_t *signal,
ei_impulse_result_t *result,
bool debug = false)
{
if(!handle) {
return EI_IMPULSE_INFERENCE_ERROR;
}
#if (EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_ONNX_TIDL)) || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI
// Shortcut for quantized image models
ei_learning_block_t block = handle->impulse->learning_blocks[0];
if (can_run_classifier_image_quantized(handle->impulse, block) == EI_IMPULSE_OK) {
return run_classifier_image_quantized(handle->impulse, signal, result, debug);
}
#endif
#ifndef EI_DSP_RESULT_OVERRIDE
// Don't wipe in CI, as we store a pointer
memset(result, 0, sizeof(ei_impulse_result_t));
#endif
uint32_t block_num = handle->impulse->dsp_blocks_size + handle->impulse->learning_blocks_size;
// smart pointer to features array
std::unique_ptr<ei_feature_t[]> features_ptr(new ei_feature_t[block_num]);
ei_feature_t* features = features_ptr.get();
memset(features, 0, sizeof(ei_feature_t) * block_num);
// have it outside of the loop to avoid going out of scope
std::unique_ptr<ei::matrix_t> *matrix_ptrs = new std::unique_ptr<ei::matrix_t>[block_num];
uint64_t dsp_start_us = ei_read_timer_us();
size_t out_features_index = 0;
for (size_t ix = 0; ix < handle->impulse->dsp_blocks_size; ix++) {
ei_model_dsp_t block = handle->impulse->dsp_blocks[ix];
matrix_ptrs[ix] = std::unique_ptr<ei::matrix_t>(new ei::matrix_t(1, block.n_output_features));
features[ix].matrix = matrix_ptrs[ix].get();
features[ix].blockId = block.blockId;
if (out_features_index + block.n_output_features > handle->impulse->nn_input_frame_size) {
ei_printf("ERR: Would write outside feature buffer\n");
delete[] matrix_ptrs;
return EI_IMPULSE_DSP_ERROR;
}
#if EIDSP_SIGNAL_C_FN_POINTER
if (block.axes_size != handle->impulse->raw_samples_per_frame) {
ei_printf("ERR: EIDSP_SIGNAL_C_FN_POINTER can only be used when all axes are selected for DSP blocks\n");
delete[] matrix_ptrs;
return EI_IMPULSE_DSP_ERROR;
}
auto internal_signal = signal;
#else
SignalWithAxes swa(signal, block.axes, block.axes_size, handle->impulse);
auto internal_signal = swa.get_signal();
#endif
int ret;
if (block.factory) { // ie, if we're using state
// Msg user
static bool has_printed = false;
if (!has_printed) {
EI_LOGI("Impulse maintains state. Call run_classifier_init() to reset state (e.g. if data stream is interrupted.)\n");
has_printed = true;
}
// getter has a lazy init, so we can just call it
auto dsp_handle = handle->state.get_dsp_handle(ix);
if(dsp_handle) {
ret = dsp_handle->extract(internal_signal, features[ix].matrix, block.config, handle->impulse->frequency);
#if EI_DSP_ENABLE_RUNTIME_HR == 1
hr_class* hr = static_cast<hr_class*>(dsp_handle);
result->heart_rate = hr->get_last_hr();
#endif
}
else {
return EI_IMPULSE_OUT_OF_MEMORY;
}
} else {
ret = block.extract_fn(internal_signal, features[ix].matrix, block.config, handle->impulse->frequency);
}
if (ret != EIDSP_OK) {
ei_printf("ERR: Failed to run DSP process (%d)\n", ret);
delete[] matrix_ptrs;
return EI_IMPULSE_DSP_ERROR;
}
if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) {
delete[] matrix_ptrs;
return EI_IMPULSE_CANCELED;
}
out_features_index += block.n_output_features;
}
#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0
for (size_t ix = 0; ix < handle->impulse->learning_blocks_size; ix++) {
ei_learning_block_t block = handle->impulse->learning_blocks[ix];
if (block.keep_output) {
matrix_ptrs[handle->impulse->dsp_blocks_size + ix] = std::unique_ptr<ei::matrix_t>(new ei::matrix_t(1, block.output_features_count));
features[handle->impulse->dsp_blocks_size + ix].matrix = matrix_ptrs[handle->impulse->dsp_blocks_size + ix].get();
features[handle->impulse->dsp_blocks_size + ix].blockId = block.blockId;
}
}
#endif // EI_CLASSIFIER_SINGLE_FEATURE_INPUT
result->timing.dsp_us = ei_read_timer_us() - dsp_start_us;
result->timing.dsp = (int)(result->timing.dsp_us / 1000);
if (debug) {
ei_printf("Features (%d ms.): ", result->timing.dsp);
for (size_t ix = 0; ix < block_num; ix++) {
if (features[ix].matrix == nullptr) {
continue;
}
for (size_t jx = 0; jx < features[ix].matrix->cols; jx++) {
ei_printf_float(features[ix].matrix->buffer[jx]);
ei_printf(" ");
}
ei_printf("\n");
}
}
if (debug) {
ei_printf("Running impulse...\n");
}
EI_IMPULSE_ERROR res = run_inference(handle, features, result, debug);
delete[] matrix_ptrs;
res = run_postprocessing(handle, result, debug);
return res;
}
/**
* @brief Opens an impulse
*
* @param impulse struct with information about model and DSP
*
* @return A pointer to the impulse handle, or nullptr if memory allocation failed.
*/
extern "C" EI_IMPULSE_ERROR init_impulse(ei_impulse_handle_t *handle) {
if (!handle) {
return EI_IMPULSE_OUT_OF_MEMORY;
}
handle->state.reset();
return EI_IMPULSE_OK;
}
/**
* @brief Process a complete impulse for continuous inference
*
* @param handle struct with information about model and DSP
* @param signal Sample data
* @param result Output classifier results
* @param[in] debug Debug output enable
*
* @return The ei impulse error.
*/
extern "C" EI_IMPULSE_ERROR process_impulse_continuous(ei_impulse_handle_t *handle,
signal_t *signal,
ei_impulse_result_t *result,
bool debug)
{
auto impulse = handle->impulse;
static ei::matrix_t static_features_matrix(1, impulse->nn_input_frame_size);
if (!static_features_matrix.buffer) {
return EI_IMPULSE_ALLOC_FAILED;
}
memset(result, 0, sizeof(ei_impulse_result_t));
EI_IMPULSE_ERROR ei_impulse_error = EI_IMPULSE_OK;
uint64_t dsp_start_us = ei_read_timer_us();
size_t out_features_index = 0;
for (size_t ix = 0; ix < impulse->dsp_blocks_size; ix++) {
ei_model_dsp_t block = impulse->dsp_blocks[ix];
if (out_features_index + block.n_output_features > impulse->nn_input_frame_size) {
ei_printf("ERR: Would write outside feature buffer\n");
return EI_IMPULSE_DSP_ERROR;
}
ei::matrix_t fm(1, block.n_output_features,
static_features_matrix.buffer + out_features_index);
int (*extract_fn_slice)(ei::signal_t *signal, ei::matrix_t *output_matrix, void *config, const float frequency, matrix_size_t *out_matrix_size);
/* Switch to the slice version of the mfcc feature extract function */
if (block.extract_fn == extract_mfcc_features) {
extract_fn_slice = &extract_mfcc_per_slice_features;
}
else if (block.extract_fn == extract_spectrogram_features) {
extract_fn_slice = &extract_spectrogram_per_slice_features;
}
else if (block.extract_fn == extract_mfe_features) {
extract_fn_slice = &extract_mfe_per_slice_features;
}
else {
ei_printf("ERR: Unknown extract function, only MFCC, MFE and spectrogram supported\n");
return EI_IMPULSE_DSP_ERROR;
}
matrix_size_t features_written;
#if EIDSP_SIGNAL_C_FN_POINTER
if (block.axes_size != impulse->raw_samples_per_frame) {
ei_printf("ERR: EIDSP_SIGNAL_C_FN_POINTER can only be used when all axes are selected for DSP blocks\n");
return EI_IMPULSE_DSP_ERROR;
}
int ret = extract_fn_slice(signal, &fm, block.config, impulse->frequency, &features_written);
#else
SignalWithAxes swa(signal, block.axes, block.axes_size, impulse);
int ret = extract_fn_slice(swa.get_signal(), &fm, block.config, impulse->frequency, &features_written);
#endif
if (ret != EIDSP_OK) {
ei_printf("ERR: Failed to run DSP process (%d)\n", ret);
return EI_IMPULSE_DSP_ERROR;
}
if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) {
return EI_IMPULSE_CANCELED;
}
classifier_continuous_features_written += (features_written.rows * features_written.cols);
out_features_index += block.n_output_features;
}
result->timing.dsp_us = ei_read_timer_us() - dsp_start_us;
result->timing.dsp = (int)(result->timing.dsp_us / 1000);
for (int i = 0; i < impulse->label_count; i++) {
// set label correctly in the result struct if we have no results (otherwise is nullptr)
result->classification[i].label = impulse->categories[(uint32_t)i];
}
if (classifier_continuous_features_written >= impulse->nn_input_frame_size) {
dsp_start_us = ei_read_timer_us();
uint32_t block_num = impulse->dsp_blocks_size + impulse->learning_blocks_size;
// smart pointer to features array
std::unique_ptr<ei_feature_t[]> features_ptr(new ei_feature_t[block_num]);
ei_feature_t* features = features_ptr.get();
memset(features, 0, sizeof(ei_feature_t) * block_num);
// have it outside of the loop to avoid going out of scope
std::unique_ptr<ei::matrix_t> *matrix_ptrs = new std::unique_ptr<ei::matrix_t>[block_num];
out_features_index = 0;
// iterate over every dsp block and run normalization
for (size_t ix = 0; ix < impulse->dsp_blocks_size; ix++) {
ei_model_dsp_t block = impulse->dsp_blocks[ix];
matrix_ptrs[ix] = std::unique_ptr<ei::matrix_t>(new ei::matrix_t(1, block.n_output_features));
features[ix].matrix = matrix_ptrs[ix].get();
features[ix].blockId = block.blockId;
/* Create a copy of the matrix for normalization */
for (size_t m_ix = 0; m_ix < block.n_output_features; m_ix++) {
features[ix].matrix->buffer[m_ix] = static_features_matrix.buffer[out_features_index + m_ix];
}
if (block.extract_fn == extract_mfcc_features) {
calc_cepstral_mean_and_var_normalization_mfcc(features[ix].matrix, block.config);
}
else if (block.extract_fn == extract_spectrogram_features) {
calc_cepstral_mean_and_var_normalization_spectrogram(features[ix].matrix, block.config);
}
else if (block.extract_fn == extract_mfe_features) {
calc_cepstral_mean_and_var_normalization_mfe(features[ix].matrix, block.config);
}
out_features_index += block.n_output_features;
}
result->timing.dsp_us += ei_read_timer_us() - dsp_start_us;
result->timing.dsp = (int)(result->timing.dsp_us / 1000);
if (debug) {
ei_printf("Feature Matrix: \n");
for (size_t ix = 0; ix < features->matrix->cols; ix++) {
ei_printf_float(features->matrix->buffer[ix]);
ei_printf(" ");
}
ei_printf("\n");
ei_printf("Running impulse...\n");
}
ei_impulse_error = run_inference(handle, features, result, debug);
delete[] matrix_ptrs;
ei_impulse_error = run_postprocessing(handle, result, debug);
}
return ei_impulse_error;
}
/**
* Check if the current impulse could be used by 'run_classifier_image_quantized'
*/
__attribute__((unused)) static EI_IMPULSE_ERROR can_run_classifier_image_quantized(const ei_impulse_t *impulse, ei_learning_block_t block_ptr) {
if (impulse->inferencing_engine != EI_CLASSIFIER_TFLITE
&& impulse->inferencing_engine != EI_CLASSIFIER_TENSAIFLOW
&& impulse->inferencing_engine != EI_CLASSIFIER_DRPAI
&& impulse->inferencing_engine != EI_CLASSIFIER_ONNX_TIDL) // check later
{
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
}
// visual anomaly also needs to go through the normal path
if (impulse->has_anomaly){
return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES;
}
// Check if we have tflite graph
if (block_ptr.infer_fn != run_nn_inference) {
return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES;
}
// Check if we have a quantized NN Input layer (input is always quantized for DRP-AI)
ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)block_ptr.config;
if (block_config->quantized != 1) {
return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES;
}
// And if we have one DSP block which operates on images...
if (impulse->dsp_blocks_size != 1 || impulse->dsp_blocks[0].extract_fn != extract_image_features) {
return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES;
}
return EI_IMPULSE_OK;
}
#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_ONNX_TIDL)
/**
* Special function to run the classifier on images, only works on TFLite models (either interpreter, EON, tensaiflow, drpai, tidl, memryx)
* that allocates a lot less memory by quantizing in place. This only works if 'can_run_classifier_image_quantized'
* returns EI_IMPULSE_OK.
*/
extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized(
const ei_impulse_t *impulse,
signal_t *signal,
ei_impulse_result_t *result,
bool debug = false)
{
memset(result, 0, sizeof(ei_impulse_result_t));
return run_nn_inference_image_quantized(impulse, signal, result, impulse->learning_blocks[0].config, debug);
}
#endif // #if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI)
#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
static const float torch_mean[] = { 0.485, 0.456, 0.406 };
static const float torch_std[] = { 0.229, 0.224, 0.225 };
// This is ordered BGR
static const float tao_mean[] = { 103.939, 116.779, 123.68 };
EI_IMPULSE_ERROR ei_scale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) {
if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
// @todo; could we write some faster vector math here?
for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] - torch_mean[0]) / torch_std[0];
fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] - torch_mean[1]) / torch_std[1];
fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] - torch_mean[2]) / torch_std[2];
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) {
int scale_res = numpy::scale(fmatrix, 255.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN128_127) {
int scale_res = numpy::scale(fmatrix, 255.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
scale_res = numpy::subtract(fmatrix, 128.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN1_1) {
int scale_res = numpy::scale(fmatrix, 2.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
scale_res = numpy::subtract(fmatrix, 1.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_BGR_SUBTRACT_IMAGENET_MEAN) {
int scale_res = numpy::scale(fmatrix, 255.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
// Transpose RGB to BGR and subtract mean
for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
float r = fmatrix->buffer[ix + 0];
fmatrix->buffer[ix + 0] = fmatrix->buffer[ix + 2] - tao_mean[0];
fmatrix->buffer[ix + 1] -= tao_mean[1];
fmatrix->buffer[ix + 2] = r - tao_mean[2];
}
}
return EI_IMPULSE_OK;
}
EI_IMPULSE_ERROR ei_unscale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) {
if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
// @todo; could we write some faster vector math here?
for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] * torch_std[0]) + torch_mean[0];
fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] * torch_std[1]) + torch_mean[1];
fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] * torch_std[2]) + torch_mean[2];
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN128_127) {
int scale_res = numpy::add(fmatrix, 128.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
scale_res = numpy::scale(fmatrix, 1 / 255.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN1_1) {
int scale_res = numpy::add(fmatrix, 1.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
scale_res = numpy::scale(fmatrix, 1 / 2.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) {
int scale_res = numpy::scale(fmatrix, 1 / 255.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_BGR_SUBTRACT_IMAGENET_MEAN) {
// Transpose BGR to RGB and add mean
for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
float b = fmatrix->buffer[ix + 0];
fmatrix->buffer[ix + 0] = fmatrix->buffer[ix + 2] + tao_mean[2];
fmatrix->buffer[ix + 1] += tao_mean[1];
fmatrix->buffer[ix + 2] = b + tao_mean[0];
}
int scale_res = numpy::scale(fmatrix, 1 / 255.0f);
if (scale_res != EIDSP_OK) {
ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
return EI_IMPULSE_DSP_ERROR;
}
}
return EI_IMPULSE_OK;
}
#endif
/* Public functions ------------------------------------------------------- */
/* Tread carefully: public functions are not to be changed
to preserve backwards compatibility. Anything in this public section
will be documented by Doxygen. */
/**
* @defgroup ei_functions Functions
*
* Public-facing functions for running inference using the Edge Impulse C++ library.
*
* **Source**: [classifier/ei_run_classifier.h](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/classifier/ei_run_classifier.h)
*
* @addtogroup ei_functions
* @{
*/
/**
* @brief Initialize static variables for running preprocessing and inference
* continuously.
*
* Initializes and clears any internal static variables needed by `run_classifier_continuous()`.
* This includes the moving average filter (MAF). This function should be called prior to
* calling `run_classifier_continuous()`.
*
* **Blocking**: yes
*
* **Example**: [nano_ble33_sense_microphone_continuous.ino](https://github.com/edgeimpulse/example-lacuna-ls200/blob/main/nano_ble33_sense_microphone_continous/nano_ble33_sense_microphone_continuous.ino)
*/
extern "C" void run_classifier_init(void)
{
classifier_continuous_features_written = 0;
ei_dsp_clear_continuous_audio_state();
init_impulse(&ei_default_impulse);
init_postprocessing(&ei_default_impulse);
}
/**
* @brief Initialize static variables for running preprocessing and inference
* continuously.
*
* Initializes and clears any internal static variables needed by `run_classifier_continuous()`.
* This includes the moving average filter (MAF). This function should be called prior to
* calling `run_classifier_continuous()`.
*
* **Blocking**: yes
*
* **Example**: [nano_ble33_sense_microphone_continuous.ino](https://github.com/edgeimpulse/example-lacuna-ls200/blob/main/nano_ble33_sense_microphone_continous/nano_ble33_sense_microphone_continuous.ino)
*
* @param[in] handle struct with information about model and DSP
*/
__attribute__((unused)) void run_classifier_init(ei_impulse_handle_t *handle)
{
classifier_continuous_features_written = 0;
ei_dsp_clear_continuous_audio_state();
init_impulse(handle);
init_postprocessing(handle);
}
/**
* @brief Deletes static variables when running preprocessing and inference continuously.
*
* Deletes internal static variables used by `run_classifier_continuous()`, which
* includes the moving average filter (MAF). This function should be called when you
* are done running continuous classification.
*
* **Blocking**: yes
*
* **Example**: [ei_run_audio_impulse.cpp](https://github.com/edgeimpulse/firmware-nordic-thingy53/blob/main/src/inference/ei_run_audio_impulse.cpp)
*/
extern "C" void run_classifier_deinit(void)
{
deinit_postprocessing(&ei_default_impulse);
}
__attribute__((unused)) void run_classifier_deinit(ei_impulse_handle_t *handle)
{
deinit_postprocessing(handle);
}
/**
* @brief Run preprocessing (DSP) on new slice of raw features. Add output features
* to rolling matrix and run inference on full sample.
*
* Accepts a new slice of features give by the callback defined in the `signal` parameter.
* It performs preprocessing (DSP) on this new slice of features and appends the output to
* a sliding window of pre-processed features (stored in a static features matrix). The matrix
* stores the new slice and as many old slices as necessary to make up one full sample for
* performing inference.
*
* `run_classifier_init()` must be called before making any calls to
* `run_classifier_continuous().`
*
* For example, if you are doing keyword spotting on 1-second slices of audio and you want to
* perform inference 4 times per second (given by `EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW`), you
* would collect 0.25 seconds of audio and call run_classifier_continuous(). The function would
* compute the Mel-Frequency Cepstral Coefficients (MFCCs) for that 0.25 second slice of audio,
* drop the oldest 0.25 seconds' worth of MFCCs from its internal matrix, and append the newest
* slice of MFCCs. This process allows the library to keep track of the pre-processed features
* (e.g. MFCCs) in the window instead of the entire set of raw features (e.g. raw audio data),
* which can potentially save a lot of space in RAM. After updating the static matrix,
* inference is performed using the whole matrix, which acts as a sliding window of
* pre-processed features.
*
* Additionally, a moving average filter (MAF) can be enabled for `run_classifier_continuous()`,
* which averages (arithmetic mean) the last *n* inference results for each class. *n* is
* `EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW / 2`. In our example above, if we enabled the MAF, the
* values in `result` would contain predictions averaged from the previous 2 inferences.
*
* To learn more about `run_classifier_continuous()`, see
* [this guide](https://docs.edgeimpulse.com/docs/tutorials/advanced-inferencing/continuous-audio-sampling)
* on continuous audio sampling. While the guide is written for audio signals, the concepts of continuous sampling and inference can be extrapolated to any time-series data.
*
* **Blocking**: yes
*
* **Example**: [nano_ble33_sense_microphone_continuous.ino](https://github.com/edgeimpulse/example-lacuna-ls200/blob/main/nano_ble33_sense_microphone_continous/nano_ble33_sense_microphone_continuous.ino)
*
* @param[in] signal Pointer to a signal_t struct that contains the number of elements in the
* slice of raw features (e.g. `EI_CLASSIFIER_SLICE_SIZE`) and a pointer to a callback that reads
* in the slice of raw features.
* @param[out] result Pointer to an `ei_impulse_result_t` struct that contains the various output
* results from inference after run_classifier() returns.
* @param[in] debug Print internal preprocessing and inference debugging information via
* `ei_printf()`.
* @param[in] enable_maf_unused Enable the moving average filter (MAF) for the classifier - deprecated, replaced with Performance Calibration
*
* @return Error code as defined by `EI_IMPULSE_ERROR` enum. Will be `EI_IMPULSE_OK` if inference
* completed successfully.
*/
extern "C" EI_IMPULSE_ERROR run_classifier_continuous(
signal_t *signal,
ei_impulse_result_t *result,
bool debug = false,
bool enable_maf_unused = true)
{
auto& impulse = ei_default_impulse;
return process_impulse_continuous(&impulse, signal, result, debug);
}
/**
* @brief Run preprocessing (DSP) on new slice of raw features. Add output features
* to rolling matrix and run inference on full sample.
*
* Accepts a new slice of features give by the callback defined in the `signal` parameter.
* It performs preprocessing (DSP) on this new slice of features and appends the output to
* a sliding window of pre-processed features (stored in a static features matrix). The matrix
* stores the new slice and as many old slices as necessary to make up one full sample for
* performing inference.
*
* `run_classifier_init()` must be called before making any calls to
* `run_classifier_continuous().`
*
* For example, if you are doing keyword spotting on 1-second slices of audio and you want to
* perform inference 4 times per second (given by `EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW`), you
* would collect 0.25 seconds of audio and call run_classifier_continuous(). The function would
* compute the Mel-Frequency Cepstral Coefficients (MFCCs) for that 0.25 second slice of audio,
* drop the oldest 0.25 seconds' worth of MFCCs from its internal matrix, and append the newest
* slice of MFCCs. This process allows the library to keep track of the pre-processed features
* (e.g. MFCCs) in the window instead of the entire set of raw features (e.g. raw audio data),
* which can potentially save a lot of space in RAM. After updating the static matrix,
* inference is performed using the whole matrix, which acts as a sliding window of
* pre-processed features.
*
* Additionally, a moving average filter (MAF) can be enabled for `run_classifier_continuous()`,
* which averages (arithmetic mean) the last *n* inference results for each class. *n* is
* `EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW / 2`. In our example above, if we enabled the MAF, the
* values in `result` would contain predictions averaged from the previous 2 inferences.
*
* To learn more about `run_classifier_continuous()`, see
* [this guide](https://docs.edgeimpulse.com/docs/tutorials/advanced-inferencing/continuous-audio-sampling)
* on continuous audio sampling. While the guide is written for audio signals, the concepts of continuous sampling and inference can be extrapolated to any time-series data.
*
* **Blocking**: yes
*
* **Example**: [nano_ble33_sense_microphone_continuous.ino](https://github.com/edgeimpulse/example-lacuna-ls200/blob/main/nano_ble33_sense_microphone_continous/nano_ble33_sense_microphone_continuous.ino)
*
* @param[in] impulse `ei_impulse_handle_t` struct with information about preprocessing and model.
* @param[in] signal Pointer to a signal_t struct that contains the number of elements in the
* slice of raw features (e.g. `EI_CLASSIFIER_SLICE_SIZE`) and a pointer to a callback that reads
* in the slice of raw features.
* @param[out] result Pointer to an `ei_impulse_result_t` struct that contains the various output
* results from inference after run_classifier() returns.
* @param[in] debug Print internal preprocessing and inference debugging information via
* `ei_printf()`.
* @param[in] enable_maf_unused Enable the moving average filter (MAF) for the classifier - deprecated, replaced with Performance Calibration
*
* @return Error code as defined by `EI_IMPULSE_ERROR` enum. Will be `EI_IMPULSE_OK` if inference
* completed successfully.
*/
__attribute__((unused)) EI_IMPULSE_ERROR run_classifier_continuous(
ei_impulse_handle_t *impulse,
signal_t *signal,
ei_impulse_result_t *result,
bool debug = false,
bool enable_maf_unused = true)
{
return process_impulse_continuous(impulse, signal, result, debug);
}
/**
* @brief Run the classifier over a raw features array.
*
*
* Overloaded function [run_classifier()](#run_classifier-1) that defaults to the single impulse.
*
* **Blocking**: yes
*
* @param[in] signal Pointer to a `signal_t` struct that contains the total length of the raw
* feature array, which must match EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE, and a pointer to a callback
* that reads in the raw features.
* @param[out] result Pointer to an ei_impulse_result_t struct that will contain the various output
* results from inference after `run_classifier()` returns.
* @param[in] debug Print internal preprocessing and inference debugging information via `ei_printf()`.
*
* @return Error code as defined by `EI_IMPULSE_ERROR` enum. Will be `EI_IMPULSE_OK` if inference
* completed successfully.
*/
extern "C" EI_IMPULSE_ERROR run_classifier(
signal_t *signal,
ei_impulse_result_t *result,
bool debug = false)
{
return process_impulse(&ei_default_impulse, signal, result, debug);
}
/**
* @brief Run the classifier over a raw features array.
*
*
* Accepts a `signal_t` input struct pointing to a callback that reads in pages of raw features.
* `run_classifier()` performs any necessary preprocessing on the raw features (e.g. DSP, cropping
* of images, etc.) before performing inference. Results from inference are stored in an
* `ei_impulse_result_t` struct.
*
* **Blocking**: yes
*
* **Example**: [standalone inferencing main.cpp](https://github.com/edgeimpulse/example-standalone-inferencing/blob/master/source/main.cpp)
*
* @param[in] impulse Pointer to an `ei_impulse_handle_t` struct that contains the model and
* preprocessing information.
* @param[in] signal Pointer to a `signal_t` struct that contains the total length of the raw
* feature array, which must match EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE, and a pointer to a callback
* that reads in the raw features.
* @param[out] result Pointer to an ei_impulse_result_t struct that will contain the various output
* results from inference after `run_classifier()` returns.
* @param[in] debug Print internal preprocessing and inference debugging information via `ei_printf()`.
*
* @return Error code as defined by `EI_IMPULSE_ERROR` enum. Will be `EI_IMPULSE_OK` if inference
* completed successfully.
*/
__attribute__((unused)) EI_IMPULSE_ERROR run_classifier(
ei_impulse_handle_t *impulse,
signal_t *signal,
ei_impulse_result_t *result,
bool debug = false)
{
return process_impulse(impulse, signal, result, debug);
}
/** @} */ // end of ei_functions Doxygen group
/* Deprecated functions ------------------------------------------------------- */
/* These functions are being deprecated and possibly will be removed or moved in future.
Do not use these - if possible, change your code to reflect the upcoming changes. */
#if EIDSP_SIGNAL_C_FN_POINTER == 0
/**
* @brief Run the impulse, if you provide an instance of sampler it will also persist
* the data for you.
*
* @deprecated This function is deprecated and will be removed in future versions. Use
* `run_classifier()` instead.
*
* @param[in] sampler Instance to an **initialized** sampler
* @param[out] result Object to store the results in
* @param[in] data_fn Callback function to retrieve data from sensors
* @param[in] debug Whether to log debug messages (default false)
*
* @return Error code as defined by `EI_IMPULSE_ERROR` enum. Will be `EI_IMPULSE_OK` if inference
* completed successfully.
*/
__attribute__((unused)) EI_IMPULSE_ERROR run_impulse(
#if (defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1) || defined(__DOXYGEN__)
EdgeSampler *sampler,
#endif
ei_impulse_result_t *result,
#ifdef __MBED__
mbed::Callback<void(float*, size_t)> data_fn,
#else
std::function<void(float*, size_t)> data_fn,
#endif
bool debug = false) {