diff --git a/.github/workflows/pack.yml b/.github/workflows/pack.yml
index 7b302ad..71f65fe 100644
--- a/.github/workflows/pack.yml
+++ b/.github/workflows/pack.yml
@@ -50,4 +50,4 @@ jobs:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./output/*.pack
asset_name: ${{ github.ref }}.pack
- asset_content_type: application/zipgh t
\ No newline at end of file
+ asset_content_type: application/zip
\ No newline at end of file
diff --git a/EdgeImpulse.EI-SDK.pdsc b/EdgeImpulse.EI-SDK.pdsc
index e24b269..f1167d0 100644
--- a/EdgeImpulse.EI-SDK.pdsc
+++ b/EdgeImpulse.EI-SDK.pdsc
@@ -8,7 +8,7 @@
hello@edgeimpulse.com
-
+
EI-SDK
@@ -55,7 +55,7 @@
-
+
Edge Impulse SDK
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_model_types.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_model_types.h
index dd9156c..593ffd9 100644
--- a/edgeimpulse/edge-impulse-sdk/classifier/ei_model_types.h
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_model_types.h
@@ -66,6 +66,7 @@
#define EI_CLASSIFIER_IMAGE_SCALING_0_255 1
#define EI_CLASSIFIER_IMAGE_SCALING_TORCH 2
#define EI_CLASSIFIER_IMAGE_SCALING_MIN1_1 3
+#define EI_CLASSIFIER_IMAGE_SCALING_MIN128_127 4
struct ei_impulse;
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h
index 7c8ad69..48cdbdd 100644
--- a/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h
@@ -72,6 +72,11 @@ extern "C" EI_IMPULSE_ERROR run_inference(const ei_impulse_t *impulse, ei_featur
extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized(const ei_impulse_t *impulse, signal_t *signal, ei_impulse_result_t *result, bool debug);
static EI_IMPULSE_ERROR can_run_classifier_image_quantized(const ei_impulse_t *impulse, ei_learning_block_t block_ptr);
+#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
+EI_IMPULSE_ERROR ei_scale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix);
+EI_IMPULSE_ERROR ei_unscale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix);
+#endif // EI_CLASSIFIER_LOAD_IMAGE_SCALING
+
/* Private variables ------------------------------------------------------- */
static uint64_t classifier_continuous_features_written = 0;
@@ -82,62 +87,6 @@ static RecognizeEvents *avg_scores = NULL;
/* These functions (up to Public functions section) are not exposed to end-user,
therefore changes are allowed. */
-#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
-static const float torch_mean[] = { 0.485, 0.456, 0.406 };
-static const float torch_std[] = { 0.229, 0.224, 0.225 };
-
-static EI_IMPULSE_ERROR scale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) {
- if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
- // @todo; could we write some faster vector math here?
- for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
- fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] - torch_mean[0]) / torch_std[0];
- fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] - torch_mean[1]) / torch_std[1];
- fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] - torch_mean[2]) / torch_std[2];
- }
- }
- else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) {
- int scale_res = numpy::scale(fmatrix, 255.0f);
- if (scale_res != EIDSP_OK) {
- ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
- return EI_IMPULSE_DSP_ERROR;
- }
- }
- else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN1_1) {
- int scale_res = numpy::scale(fmatrix, 2.0f);
- if (scale_res != EIDSP_OK) {
- ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
- return EI_IMPULSE_DSP_ERROR;
- }
- scale_res = numpy::subtract(fmatrix, 1.0f);
- if (scale_res != EIDSP_OK) {
- ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
- return EI_IMPULSE_DSP_ERROR;
- }
- }
-
- return EI_IMPULSE_OK;
-}
-
-static EI_IMPULSE_ERROR unscale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) {
- if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
- // @todo; could we write some faster vector math here?
- for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
- fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] * torch_std[0]) + torch_mean[0];
- fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] * torch_std[1]) + torch_mean[1];
- fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] * torch_std[2]) + torch_mean[2];
- }
- }
- else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) {
- int scale_res = numpy::scale(fmatrix, 1 / 255.0f);
- if (scale_res != EIDSP_OK) {
- ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
- return EI_IMPULSE_DSP_ERROR;
- }
- }
- return EI_IMPULSE_OK;
-}
-#endif
-
/**
* @brief Display the results of the inference
@@ -201,7 +150,7 @@ extern "C" EI_IMPULSE_ERROR run_inference(
#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
// we do not plan to have multiple dsp blocks with image
// so just apply scaling to the first one
- EI_IMPULSE_ERROR scale_res = scale_fmatrix(&block, fmatrix[0].matrix);
+ EI_IMPULSE_ERROR scale_res = ei_scale_fmatrix(&block, fmatrix[0].matrix);
if (scale_res != EI_IMPULSE_OK) {
return scale_res;
}
@@ -216,7 +165,7 @@ extern "C" EI_IMPULSE_ERROR run_inference(
#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
// undo scaling
- scale_res = unscale_fmatrix(&block, fmatrix[0].matrix);
+ scale_res = ei_unscale_fmatrix(&block, fmatrix[0].matrix);
if (scale_res != EI_IMPULSE_OK) {
return scale_res;
}
@@ -595,6 +544,98 @@ extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized(
/* Thread carefully: public functions are not to be changed
to preserve backwards compatibility. */
+#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
+static const float torch_mean[] = { 0.485, 0.456, 0.406 };
+static const float torch_std[] = { 0.229, 0.224, 0.225 };
+
+EI_IMPULSE_ERROR ei_scale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) {
+ if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
+ // @todo; could we write some faster vector math here?
+ for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
+ fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] - torch_mean[0]) / torch_std[0];
+ fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] - torch_mean[1]) / torch_std[1];
+ fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] - torch_mean[2]) / torch_std[2];
+ }
+ }
+ else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) {
+ int scale_res = numpy::scale(fmatrix, 255.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ }
+ else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN128_127) {
+ int scale_res = numpy::scale(fmatrix, 255.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ scale_res = numpy::subtract(fmatrix, 128.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ }
+ else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN1_1) {
+ int scale_res = numpy::scale(fmatrix, 2.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ scale_res = numpy::subtract(fmatrix, 1.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ }
+
+ return EI_IMPULSE_OK;
+}
+
+EI_IMPULSE_ERROR ei_unscale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) {
+ if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
+ // @todo; could we write some faster vector math here?
+ for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
+ fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] * torch_std[0]) + torch_mean[0];
+ fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] * torch_std[1]) + torch_mean[1];
+ fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] * torch_std[2]) + torch_mean[2];
+ }
+ }
+ else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN128_127) {
+ int scale_res = numpy::add(fmatrix, 128.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ scale_res = numpy::scale(fmatrix, 1 / 255.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ }
+ else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN1_1) {
+ int scale_res = numpy::add(fmatrix, 1.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ scale_res = numpy::scale(fmatrix, 1 / 2.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ }
+ else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) {
+ int scale_res = numpy::scale(fmatrix, 1 / 255.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ }
+ return EI_IMPULSE_OK;
+}
+#endif
+
/**
* @brief Init static vars
*/
diff --git a/edgeimpulse/edge-impulse-sdk/porting/ei_logging.h b/edgeimpulse/edge-impulse-sdk/porting/ei_logging.h
index d15832e..37926c0 100644
--- a/edgeimpulse/edge-impulse-sdk/porting/ei_logging.h
+++ b/edgeimpulse/edge-impulse-sdk/porting/ei_logging.h
@@ -46,7 +46,7 @@
extern "C"
#endif // defined(__cplusplus) && EI_C_LINKAGE == 1
-const char *debug_msgs[] =
+__attribute__((unused)) static const char *debug_msgs[] =
{
"NONE", // this one will never show
"ERR",
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.cpp
index 2d4b858..e293283 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.cpp
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.cpp
@@ -207,7 +207,10 @@ TfLiteStatus InitializeTfLiteTensorFromFlatbuffer(
result->is_variable = flatbuffer_tensor.is_variable();
result->data.data = GetFlatbufferTensorBuffer(flatbuffer_tensor, buffers);
-
+ // this is useful for debugging
+#ifdef EI_LOG_LEVEL && EI_LOG_LEVEL >= 4
+ result->name = flatbuffer_tensor.name()->c_str();
+#endif
// TODO(petewarden): Some of these paths aren't getting enough testing
// coverage, so we should figure out some tests that exercise them.
if (result->data.data == nullptr) {
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h
index 082b898..0e096c7 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h
@@ -80,6 +80,12 @@ class MicroGraph {
// to be the subgraph of that operator.
int GetCurrentSubgraphIndex() { return current_subgraph_index_; }
+ // Set the current subgraph index.
+ void SetCurrentSubgraphIndex(int subgraph_idx)
+ {
+ current_subgraph_index_ = subgraph_idx;
+ }
+
// Gets the list of alloctions for each subgraph. This is the source of truth
// for all per-subgraph allocation data.
SubgraphAllocations* GetAllocations() { return subgraph_allocations_; }
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.cpp b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.cpp
index 8877a8d..3c734fb 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.cpp
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.cpp
@@ -320,16 +320,15 @@ TfLiteTensor* MicroInterpreter::output(size_t index) {
return output_tensors_[index];
}
-TfLiteTensor* MicroInterpreter::tensor(size_t index) {
- const size_t length = tensors_size();
+TfLiteTensor* MicroInterpreter::tensor(size_t index, size_t subgraph_idx) {
+ const size_t length = tensors_size(subgraph_idx);
if (index >= length) {
MicroPrintf("Tensor index %d out of range (length is %d)", index, length);
return nullptr;
}
- return allocator_.AllocatePersistentTfLiteTensor(model_, graph_.GetAllocations(), index, 0);
+ return allocator_.AllocatePersistentTfLiteTensor(model_, graph_.GetAllocations(), index, subgraph_idx);
}
-
// Repurposing free subgraphs to reset state for some ops for now
// will reset api is made. See b/220940833#comment25 for more context.
TfLiteStatus MicroInterpreter::Reset() {
diff --git a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h
index 051490b..5901372 100644
--- a/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h
+++ b/edgeimpulse/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h
@@ -80,9 +80,10 @@ class MicroInterpreter {
// one external context.
TfLiteStatus SetMicroExternalContext(void* external_context_payload);
- size_t tensors_size() const { return model_->subgraphs()->Get(0)->tensors()->size(); }
+ size_t tensors_size(size_t subgraph_idx = 0) const { return model_->subgraphs()->Get(subgraph_idx)->tensors()->size(); }
+
+ TfLiteTensor* tensor(size_t tensor_index, size_t subgraph_idx = 0);
- TfLiteTensor* tensor(size_t tensor_index);
template
T* typed_tensor(int tensor_index) {
if (TfLiteTensor* tensor_ptr = tensor(tensor_index)) {
@@ -135,13 +136,17 @@ class MicroInterpreter {
TfLiteStatus initialization_status() const { return initialization_status_; }
- size_t operators_size() const { return model_->subgraphs()->Get(0)->operators()->size(); }
-
#ifdef EON_COMPILER_RUN
NodeAndRegistration* node_and_registrations_ = nullptr;
- const NodeAndRegistration node_and_registration(int node_index) const {
- return node_and_registrations_[node_index];
+ size_t operators_size(uint32_t subgraph_idx = 0) const
+ {
+ return model_->subgraphs()->Get(subgraph_idx)->operators()->size();
+ }
+
+ const NodeAndRegistration node_and_registration(int node_index, int sg)
+ {
+ return graph_.GetAllocations()[sg].node_and_registrations[node_index];
}
#endif