diff --git a/EdgeImpulse.EI-SDK.pdsc b/EdgeImpulse.EI-SDK.pdsc
index 51d9ae9..7da594b 100644
--- a/EdgeImpulse.EI-SDK.pdsc
+++ b/EdgeImpulse.EI-SDK.pdsc
@@ -5,13 +5,16 @@
EI-SDK
LICENSE-apache-2.0.txt
Edge Impulse SDK
- https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.49.23/
+ https://github.com/edgeimpulse/edge-impulse-sdk-pack/releases/download/v1.49.27/
hello@edgeimpulse.com
https://github.com/edgeimpulse/edge-impulse-sdk-pack.git
-
+
EI-SDK
+
+ EI-SDK
+
EI-SDK
@@ -131,7 +134,7 @@
-
+
Edge Impulse SDK
diff --git a/EdgeImpulse.pidx b/EdgeImpulse.pidx
index 97f498e..24e7c2e 100644
--- a/EdgeImpulse.pidx
+++ b/EdgeImpulse.pidx
@@ -2,8 +2,8 @@
EdgeImpulse
https://raw.githubusercontent.com/edgeimpulse/edge-impulse-sdk-pack/main/
- 2024-05-09 08:51:12
+ 2024-05-13 11:16:32
-
+
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/drpai.h b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/drpai.h
index 01d4053..6ecea7d 100644
--- a/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/drpai.h
+++ b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/drpai.h
@@ -710,10 +710,11 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
}
case EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI: {
- #if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+ if (block_config->quantized == 1) {
ei_printf("ERR: YOLOv5 does not support quantized inference\n");
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
- #else
+ }
+ else {
if (debug) {
ei_printf("DEBUG: raw drpai output");
ei_printf("\n[");
@@ -725,14 +726,12 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(
}
ei_printf("]\n");
}
+ }
#if ((EI_CLASSIFIER_OBJECT_DETECTION == 1) && (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI))
// do post processing
fill_res = drpai_run_yolov5_postprocessing(impulse, block_config, signal, result, debug);
#endif
-
- #endif
-
break;
}
default: {
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/onnx_tidl.h b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/onnx_tidl.h
index 0c957b1..acc3e12 100644
--- a/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/onnx_tidl.h
+++ b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/onnx_tidl.h
@@ -387,11 +387,11 @@ static EI_IMPULSE_ERROR inference_onnx_run(const ei_impulse_t *impulse,
if (block_config->object_detection) {
switch (block_config->object_detection_last_layer) {
case EI_CLASSIFIER_LAST_LAYER_YOLOX: {
- #if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+ if (block_config->quantized == 1) {
ei_printf("ERR: YOLOX does not support quantized inference\n");
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
- #else
-
+ }
+ else {
if (debug) {
ei_printf("YOLOX OUTPUT (%d ms.): ", result->timing.classification);
for (size_t ix = 0; ix < output_tensor_features_count; ix++) {
@@ -406,7 +406,7 @@ static EI_IMPULSE_ERROR inference_onnx_run(const ei_impulse_t *impulse,
result,
(float*)out_data,
output_tensor_features_count);
- #endif
+ }
break;
}
default: {
@@ -417,7 +417,7 @@ static EI_IMPULSE_ERROR inference_onnx_run(const ei_impulse_t *impulse,
}
}
else {
-#if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1
switch (output_tensor_type) {
case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: {
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/tensaiflow.h b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/tensaiflow.h
index 7ce1591..dd6caf6 100644
--- a/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/tensaiflow.h
+++ b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/tensaiflow.h
@@ -181,7 +181,7 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(
if (block_config->object_detection) {
switch (block_config->object_detection_last_layer) {
case EI_CLASSIFIER_LAST_LAYER_FOMO: {
- #if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+ if (block_config->quantized == 1) {
fill_res = fill_result_struct_i8_fomo(
impulse,
block_config,
@@ -191,10 +191,11 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(
graph_config->output_scale,
impulse->fomo_output_size,
impulse->fomo_output_size);
- #else
+ }
+ else {
ei_printf("ERR: TensaiFlow does not support float32 inference\n");
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
- #endif
+ }
break;
}
default: {
@@ -205,7 +206,7 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(
}
}
else {
- #if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+ if (block_config->quantized == 1) {
fill_res = fill_result_struct_i8(
impulse,
result,
@@ -213,10 +214,11 @@ EI_IMPULSE_ERROR run_nn_inference_image_quantized(
graph_config->output_zeropoint,
graph_config->output_scale,
debug);
- #else
+ }
+ else {
ei_printf("ERR: TensaiFlow does not support float32 inference\n");
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
- #endif
+ }
}
if (fill_res != EI_IMPULSE_OK) {
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h
index 85cee68..c567805 100644
--- a/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h
+++ b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h
@@ -388,10 +388,11 @@ EI_IMPULSE_ERROR fill_result_struct_from_output_tensor_tflite(
break;
}
case EI_CLASSIFIER_LAST_LAYER_YOLOX: {
- #if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+ if (block_config->quantized == 1) {
ei_printf("ERR: YOLOX does not support quantized inference\n");
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
- #else
+ }
+ else {
fill_res = fill_result_struct_f32_yolox(
impulse,
block_config,
@@ -399,14 +400,15 @@ EI_IMPULSE_ERROR fill_result_struct_from_output_tensor_tflite(
output->data.f,
impulse->tflite_output_features_count,
debug);
- #endif
+ }
break;
}
case EI_CLASSIFIER_LAST_LAYER_YOLOV7: {
- #if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+ if (block_config->quantized == 1) {
ei_printf("ERR: YOLOV7 does not support quantized inference\n");
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
- #else
+ }
+ else {
size_t output_feature_count = 1;
for (int ix = 0; ix < output->dims->size; ix++) {
output_feature_count *= output->dims->data[ix];
@@ -417,7 +419,7 @@ EI_IMPULSE_ERROR fill_result_struct_from_output_tensor_tflite(
result,
output->data.f,
output_feature_count);
- #endif
+ }
break;
}
case EI_CLASSIFIER_LAST_LAYER_TAO_SSD:
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/tflite_tidl.h b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/tflite_tidl.h
index c9642f1..5d78201 100644
--- a/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/tflite_tidl.h
+++ b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/tflite_tidl.h
@@ -200,7 +200,7 @@ EI_IMPULSE_ERROR run_nn_inference(
result->timing.classification_us = ctx_end_us - ctx_start_us;
result->timing.classification = (int)(result->timing.classification_us / 1000);
-#if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1
int8_t* out_data = interpreter->typed_output_tensor(block_config->output_data_tensor);
#else
float* out_data = interpreter->typed_output_tensor(block_config->output_data_tensor);
@@ -232,7 +232,6 @@ EI_IMPULSE_ERROR run_nn_inference(
}
ei_printf(")\n");
}
-
}
}
@@ -249,7 +248,8 @@ EI_IMPULSE_ERROR run_nn_inference(
if (block_config->object_detection) {
switch (block_config->object_detection_last_layer) {
case EI_CLASSIFIER_LAST_LAYER_FOMO: {
- #if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+ if (block_config->quantized == 1) {
+#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1
fill_res = fill_result_struct_i8_fomo(
impulse,
block_config,
@@ -259,7 +259,9 @@ EI_IMPULSE_ERROR run_nn_inference(
out_data->tflite_output_scale,
impulse->fomo_output_size,
impulse->fomo_output_size);
- #else
+#endif
+ }
+ else {
fill_res = fill_result_struct_f32_fomo(
impulse,
block_config,
@@ -267,7 +269,7 @@ EI_IMPULSE_ERROR run_nn_inference(
out_data,
impulse->fomo_output_size,
impulse->fomo_output_size);
- #endif
+ }
break;
}
case EI_CLASSIFIER_LAST_LAYER_SSD: {
@@ -279,10 +281,11 @@ EI_IMPULSE_ERROR run_nn_inference(
if (!label_tensor) {
return EI_IMPULSE_LABEL_TENSOR_WAS_NULL;
}
- #if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+ if (block_config->quantized == 1) {
ei_printf("ERR: MobileNet SSD does not support quantized inference\n");
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
- #else
+ }
+ else {
fill_res = fill_result_struct_f32_object_detection(
impulse,
block_config,
@@ -291,15 +294,16 @@ EI_IMPULSE_ERROR run_nn_inference(
scores_tensor,
label_tensor,
debug);
- #endif
+ }
break;
}
case EI_CLASSIFIER_LAST_LAYER_YOLOV5:
case EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI: {
- #if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+ if (block_config->quantized == 1) {
ei_printf("ERR: YOLOv5 does not support quantized inference\n");
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
- #else
+ }
+ else {
int version = block_config->object_detection_last_layer == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI ?
5 : 6;
fill_res = fill_result_struct_f32_yolov5(
@@ -310,14 +314,15 @@ EI_IMPULSE_ERROR run_nn_inference(
out_data,
impulse->tflite_output_features_count,
debug);
- #endif
+ }
break;
}
case EI_CLASSIFIER_LAST_LAYER_YOLOX: {
- #if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+ if (block_config->quantized == 1) {
ei_printf("ERR: YOLOX does not support quantized inference\n");
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
- #else
+ }
+ else {
fill_res = fill_result_struct_f32_yolox(
impulse,
block_config,
@@ -325,14 +330,15 @@ EI_IMPULSE_ERROR run_nn_inference(
out_data,
impulse->tflite_output_features_count,
debug);
- #endif
+ }
break;
}
case EI_CLASSIFIER_LAST_LAYER_YOLOV7: {
- #if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+ if (block_config->quantized == 1) {
ei_printf("ERR: YOLOV7 does not support quantized inference\n");
return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
- #else
+ }
+ else {
TfLiteTensor *output = interpreter->output_tensor(0);
size_t output_feature_count = 1;
for (int ix = 0; ix < output->dims->size; ix++) {
@@ -344,7 +350,7 @@ EI_IMPULSE_ERROR run_nn_inference(
result,
output->data.f,
output_feature_count);
- #endif
+ }
break;
}
default: {
@@ -355,7 +361,7 @@ EI_IMPULSE_ERROR run_nn_inference(
}
}
else {
-#if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1
+#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1
fill_res = fill_result_struct_i8(impulse, result, out_data, out_data->tflite_output_zeropoint, out_data->tflite_output_scale, debug);
#else
fill_res = fill_result_struct_f32(impulse, result, out_data, debug);