diff --git a/EdgeImpulse.EI-SDK.pdsc b/EdgeImpulse.EI-SDK.pdsc
index 335fe5d..40fcf47 100644
--- a/EdgeImpulse.EI-SDK.pdsc
+++ b/EdgeImpulse.EI-SDK.pdsc
@@ -5,13 +5,16 @@
EI-SDK
LICENSE-apache-2.0.txt
Edge Impulse SDK
- https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.61.30/
+ https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.61.36/
hello@edgeimpulse.com
https://github.com/edgeimpulse/edge-impulse-sdk-pack.git
-
+
EI-SDK
+
+ EI-SDK
+
EI-SDK
@@ -98,9 +101,6 @@
EI-SDK
-
-
- EI-SDK
@@ -146,7 +146,7 @@
-
+
Edge Impulse SDK
diff --git a/EdgeImpulse.pidx b/EdgeImpulse.pidx
index 3f63921..f2ea8cd 100644
--- a/EdgeImpulse.pidx
+++ b/EdgeImpulse.pidx
@@ -2,8 +2,8 @@
EdgeImpulse
https://raw.githubusercontent.com/edgeimpulse/edge-impulse-sdk-pack/main/
- 2024-12-02 12:34:28
+ 2024-12-08 22:21:40
-
+
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h
index 0bc101d..bb34e23 100644
--- a/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h
@@ -226,7 +226,7 @@ extern "C" EI_IMPULSE_ERROR process_impulse(ei_impulse_handle_t *handle,
ei_impulse_result_t *result,
bool debug = false)
{
- if(!handle) {
+ if ((handle == nullptr) || (handle->impulse == nullptr) || (result == nullptr) || (signal == nullptr)) {
return EI_IMPULSE_INFERENCE_ERROR;
}
@@ -252,19 +252,43 @@ extern "C" EI_IMPULSE_ERROR process_impulse(ei_impulse_handle_t *handle,
// smart pointer to features array
std::unique_ptr features_ptr(new ei_feature_t[block_num]);
ei_feature_t* features = features_ptr.get();
+
+ if (features == nullptr) {
+ ei_printf("ERR: Out of memory, can't allocate features\n");
+ return EI_IMPULSE_ALLOC_FAILED;
+ }
+
memset(features, 0, sizeof(ei_feature_t) * block_num);
// have it outside of the loop to avoid going out of scope
std::unique_ptr[]> matrix_ptrs_ptr(new std::unique_ptr[block_num]);
std::unique_ptr *matrix_ptrs = matrix_ptrs_ptr.get();
+ if (matrix_ptrs == nullptr) {
+ delete[] matrix_ptrs;
+ ei_printf("ERR: Out of memory, can't allocate matrix_ptrs\n");
+ return EI_IMPULSE_ALLOC_FAILED;
+ }
+
uint64_t dsp_start_us = ei_read_timer_us();
size_t out_features_index = 0;
for (size_t ix = 0; ix < handle->impulse->dsp_blocks_size; ix++) {
ei_model_dsp_t block = handle->impulse->dsp_blocks[ix];
+
matrix_ptrs[ix] = std::unique_ptr(new ei::matrix_t(1, block.n_output_features));
+ if (matrix_ptrs[ix] == nullptr) {
+ ei_printf("ERR: Out of memory, can't allocate matrix_ptrs[%lu]\n", ix);
+ return EI_IMPULSE_ALLOC_FAILED;
+ }
+
+ if (matrix_ptrs[ix]->buffer == nullptr) {
+ ei_printf("ERR: Out of memory, can't allocate matrix_ptrs[%lu]\n", ix);
+ delete[] matrix_ptrs;
+ return EI_IMPULSE_ALLOC_FAILED;
+ }
+
features[ix].matrix = matrix_ptrs[ix].get();
features[ix].blockId = block.blockId;
@@ -395,8 +419,12 @@ extern "C" EI_IMPULSE_ERROR init_impulse(ei_impulse_handle_t *handle) {
extern "C" EI_IMPULSE_ERROR process_impulse_continuous(ei_impulse_handle_t *handle,
signal_t *signal,
ei_impulse_result_t *result,
- bool debug)
+ bool debug = false)
{
+ if ((handle == nullptr) || (handle->impulse == nullptr) || (result == nullptr) || (signal == nullptr)) {
+ return EI_IMPULSE_INFERENCE_ERROR;
+ }
+
auto impulse = handle->impulse;
static ei::matrix_t static_features_matrix(1, impulse->nn_input_frame_size);
if (!static_features_matrix.buffer) {
@@ -482,16 +510,36 @@ extern "C" EI_IMPULSE_ERROR process_impulse_continuous(ei_impulse_handle_t *hand
// smart pointer to features array
std::unique_ptr features_ptr(new ei_feature_t[block_num]);
ei_feature_t* features = features_ptr.get();
+ if (features == nullptr) {
+ ei_printf("ERR: Out of memory, can't allocate features\n");
+ return EI_IMPULSE_ALLOC_FAILED;
+ }
memset(features, 0, sizeof(ei_feature_t) * block_num);
// have it outside of the loop to avoid going out of scope
std::unique_ptr *matrix_ptrs = new std::unique_ptr[block_num];
+ if (matrix_ptrs == nullptr) {
+ ei_printf("ERR: Out of memory, can't allocate matrix_ptrs\n");
+ return EI_IMPULSE_ALLOC_FAILED;
+ }
out_features_index = 0;
// iterate over every dsp block and run normalization
for (size_t ix = 0; ix < impulse->dsp_blocks_size; ix++) {
ei_model_dsp_t block = impulse->dsp_blocks[ix];
matrix_ptrs[ix] = std::unique_ptr(new ei::matrix_t(1, block.n_output_features));
+
+ if (matrix_ptrs[ix] == nullptr) {
+ ei_printf("ERR: Out of memory, can't allocate matrix_ptrs[%lu]\n", ix);
+ return EI_IMPULSE_ALLOC_FAILED;
+ }
+
+ if (matrix_ptrs[ix]->buffer == nullptr) {
+ ei_printf("ERR: Out of memory, can't allocate matrix_ptrs[%lu]\n", ix);
+ delete[] matrix_ptrs;
+ return EI_IMPULSE_ALLOC_FAILED;
+ }
+
features[ix].matrix = matrix_ptrs[ix].get();
features[ix].blockId = block.blockId;
diff --git a/edgeimpulse/edge-impulse-sdk/dsp/numpy.hpp b/edgeimpulse/edge-impulse-sdk/dsp/numpy.hpp
index 376cbaa..3efbf85 100644
--- a/edgeimpulse/edge-impulse-sdk/dsp/numpy.hpp
+++ b/edgeimpulse/edge-impulse-sdk/dsp/numpy.hpp
@@ -2511,7 +2511,7 @@ class numpy {
first_time = false; // only warn once
if (res == EIDSP_FFT_SIZE_NOT_SUPPORTED) {
EI_LOGI("HW RFFT failed, FFT size not supported. Must be a power of 2 between %d and %d, (size was %d)",
- ei::fft::MIN_FFT_SIZE, ei::fft::MAX_FFT_SIZE, n_fft);
+ ei::fft::MIN_FFT_SIZE, ei::fft::MAX_FFT_SIZE, (int)n_fft);
}
else {
EI_LOGI("HW RFFT failed, falling back to SW");
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.cpp
index fe628f7..96c59ec 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.cpp
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.cpp
@@ -1786,13 +1786,16 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
if (input->type == kTfLiteInt8) {
data_dims_t input_dims = {
.width = input_width, .height = input_height,
- .channels = input->dims->data[3], 1
+ .channels = input->dims->data[3], .extra = 1
};
data_dims_t output_dims = {
.width = output_width, .height = output_height,
- .channels = output->dims->data[3], 1
+ .channels = output->dims->data[3], .extra = 1
+ };
+ data_dims_t filter_dims = {
+ .width = filter_width, .height = filter_height,
+ .channels = 0, .extra = 0
};
- data_dims_t filter_dims = {.width = filter_width, .height = filter_height, 0, 0};
conv_params_t conv_params = {
.in_offset = 0, .out_offset = 0,
.stride = {params.stride_width, params.stride_height},
@@ -1880,13 +1883,15 @@ inline void EvalQuantizedPerChannel(
data_dims_t input_dims = {
.width = input_width, .height = input_height,
- .channels = input_depth, 1
+ .channels = input_depth, .extra = 1
};
data_dims_t output_dims = {
.width = output_width, .height = output_height,
- .channels = output_depth, 1
+ .channels = output_depth, .extra = 1
+ };
+ data_dims_t filter_dims = { .width = filter_width, .height = filter_height,
+ .channels = 0, .extra = 0
};
- data_dims_t filter_dims = {.width = filter_width, .height = filter_height, 0, 0};
conv_params_t conv_params = {
.in_offset = input_offset, .out_offset = output_offset,
.stride = {stride_width, stride_height},
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.cpp
index 4abf9d1..49e770f 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.cpp
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.cpp
@@ -1724,13 +1724,15 @@ inline void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
data_dims_t input_dims = {
.width = input_width, .height = input_height,
- .channels = input_depth, 1
+ .channels = input_depth, .extra = 1
};
data_dims_t output_dims = {
.width = output_width, .height = output_height,
- .channels = output_depth, 1
+ .channels = output_depth, .extra = 1
+ };
+ data_dims_t filter_dims = { .width = filter_width, .height = filter_height,
+ .channels = 0, .extra = 0
};
- data_dims_t filter_dims = {.width = filter_width, .height = filter_height, 0, 0};
dw_conv_params_t conv_params = {
.in_offset = input_offset, .out_offset = output_offset,
.ch_mult = depth_multiplier,
@@ -1833,13 +1835,15 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
if (input->type == kTfLiteInt8) {
data_dims_t input_dims = {
.width = input_width, .height = input_height,
- .channels = input->dims->data[3], 1
+ .channels = input->dims->data[3], .extra = 1
};
data_dims_t output_dims = {
.width = output_width, .height = output_height,
- .channels = output->dims->data[3], 1
+ .channels = output->dims->data[3], .extra = 1
};
- data_dims_t filter_dims = {.width = filter_width, .height = filter_height, 0, 0};
+ data_dims_t filter_dims = {
+ .width = filter_width, .height = filter_height,
+ .channels = 0, .extra = 0};
dw_conv_params_t conv_params = {
.in_offset = 0, .out_offset = 0,
.ch_mult = params.depth_multiplier,