diff --git a/EdgeImpulse.EI-SDK.pdsc b/EdgeImpulse.EI-SDK.pdsc
new file mode 100644
index 0000000..e24b269
--- /dev/null
+++ b/EdgeImpulse.EI-SDK.pdsc
@@ -0,0 +1,563 @@
+
+
+
+ EdgeImpulse
+ EI-SDK
+ LICENSE-apache-2.0.txt
+ Edge Impulse SDK
+
+ hello@edgeimpulse.com
+
+
+ EI-SDK
+
+
+
+ EdgeImpulse
+ Edge Impulse SDK
+
+
+
+ True if using one of the Cortex-M core
+
+
+
+
+
+
+
+
+
+
+
+ True if Cortex-M core == TRUE and either GCC or ARMCC and device running in little-endian byte ordering
+
+
+
+
+
+
+ True if CMSIS Core and CMSIS DSP and CMSIS NN are in use
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Edge Impulse SDK
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/LICENSE-apache-2.0.txt b/LICENSE-apache-2.0.txt
new file mode 100644
index 0000000..0cdd12c
--- /dev/null
+++ b/LICENSE-apache-2.0.txt
@@ -0,0 +1,165 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
\ No newline at end of file
diff --git a/README.md b/README.md
index e827415..d635729 100644
--- a/README.md
+++ b/README.md
@@ -1,2 +1,9 @@
-# edge-impulse-sdk-package
-Edge Impulse SDK Open CMSIS package
+# Edge Impulse DSP and Inferencing SDK
+
+Portable library for digital signal processing and machine learning inferencing. This repository contains the device implementation in C++ for both processing and learning blocks in [Edge Impulse](https://www.edgeimpulse.com).
+
+[Documentation](https://docs.edgeimpulse.com/reference#inferencing-sdk)
+
+## Develop locally
+
+If you want to develop locally the easiest is to grab the [example-standalone-inferencing](https://github.com/edgeimpulse/example-standalone-inferencing) (Desktop) or [example-standalone-inferencing-mbed](https://github.com/edgeimpulse/example-standalone-inferencing-mbed) (ST IoT Discovery Kit, f.e. to test CMSIS-DSP / CMSIS-NN integration) example applications, add your Edge Impulse project (use the C++ Library export option), then symlink this repository in.
diff --git a/edgeimpulse/edge-impulse-sdk/LICENSE b/edgeimpulse/edge-impulse-sdk/LICENSE
new file mode 100644
index 0000000..0cdd12c
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/LICENSE
@@ -0,0 +1,165 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
\ No newline at end of file
diff --git a/edgeimpulse/edge-impulse-sdk/LICENSE-apache-2.0.txt b/edgeimpulse/edge-impulse-sdk/LICENSE-apache-2.0.txt
new file mode 100644
index 0000000..0cdd12c
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/LICENSE-apache-2.0.txt
@@ -0,0 +1,165 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
\ No newline at end of file
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_aligned_malloc.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_aligned_malloc.h
new file mode 100644
index 0000000..7ef1a26
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_aligned_malloc.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EDGE_IMPULSE_ALIGNED_MALLOC_H_
+#define _EDGE_IMPULSE_ALIGNED_MALLOC_H_
+
+#include
+#include "../porting/ei_classifier_porting.h"
+
+#ifdef __cplusplus
+namespace {
+#endif // __cplusplus
+
+/**
+* Based on https://github.com/embeddedartistry/embedded-resources/blob/master/examples/c/malloc_aligned.c
+*/
+
+/**
+* Simple macro for making sure memory addresses are aligned
+* to the nearest power of two
+*/
+#ifndef align_up
+#define align_up(num, align) \
+ (((num) + ((align) - 1)) & ~((align) - 1))
+#endif
+
+//Number of bytes we're using for storing the aligned pointer offset
+typedef uint16_t offset_t;
+#define PTR_OFFSET_SZ sizeof(offset_t)
+
+/**
+* aligned_malloc takes in the requested alignment and size
+* We will call malloc with extra bytes for our header and the offset
+* required to guarantee the desired alignment.
+*/
+__attribute__((unused)) void * ei_aligned_calloc(size_t align, size_t size)
+{
+ void * ptr = NULL;
+
+ //We want it to be a power of two since align_up operates on powers of two
+ assert((align & (align - 1)) == 0);
+
+ if(align && size)
+ {
+ /*
+ * We know we have to fit an offset value
+ * We also allocate extra bytes to ensure we can meet the alignment
+ */
+ uint32_t hdr_size = PTR_OFFSET_SZ + (align - 1);
+ void * p = ei_calloc(size + hdr_size, 1);
+
+ if(p)
+ {
+ /*
+ * Add the offset size to malloc's pointer (we will always store that)
+ * Then align the resulting value to the arget alignment
+ */
+ ptr = (void *) align_up(((uintptr_t)p + PTR_OFFSET_SZ), align);
+
+ //Calculate the offset and store it behind our aligned pointer
+ *((offset_t *)ptr - 1) = (offset_t)((uintptr_t)ptr - (uintptr_t)p);
+
+ } // else NULL, could not malloc
+ } //else NULL, invalid arguments
+
+ return ptr;
+}
+
+/**
+* aligned_free works like free(), but we work backwards from the returned
+* pointer to find the correct offset and pointer location to return to free()
+* Note that it is VERY BAD to call free() on an aligned_malloc() pointer.
+*/
+__attribute__((unused)) void ei_aligned_free(void * ptr)
+{
+ assert(ptr);
+
+ /*
+ * Walk backwards from the passed-in pointer to get the pointer offset
+ * We convert to an offset_t pointer and rely on pointer math to get the data
+ */
+ offset_t offset = *((offset_t *)ptr - 1);
+
+ /*
+ * Once we have the offset, we can get our original pointer and call free
+ */
+ void * p = (void *)((uint8_t *)ptr - offset);
+ ei_free(p);
+}
+
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+
+#endif // _EDGE_IMPULSE_ALIGNED_MALLOC_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_classifier_config.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_classifier_config.h
new file mode 100644
index 0000000..8865a85
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_classifier_config.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EI_CLASSIFIER_CONFIG_H_
+#define _EI_CLASSIFIER_CONFIG_H_
+
+// clang-format off
+
+// This is a file that's only used in benchmarking to override HW optimized kernels
+#ifdef __has_include
+ #if __has_include("source/benchmark.h")
+ #include "source/benchmark.h"
+ #endif
+#endif
+
+#if EI_CLASSIFIER_TFLITE_ENABLE_SILABS_MVP == 1
+ #define EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN 0
+ #define EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES 1
+#endif
+
+#ifndef EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN
+#if defined(__MBED__)
+ #include "mbed_version.h"
+ #if (MBED_VERSION < MBED_ENCODE_VERSION((5), (7), (0)))
+ #define EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN 0
+ #else
+ #define EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN 1
+ #endif // Mbed OS 5.7 version check
+
+// __ARM_ARCH_PROFILE is a predefine of arm-gcc. __TARGET_* is armcc
+#elif __ARM_ARCH_PROFILE == 'M' || defined(__TARGET_CPU_CORTEX_M0) || defined(__TARGET_CPU_CORTEX_M0PLUS) || defined(__TARGET_CPU_CORTEX_M3) || defined(__TARGET_CPU_CORTEX_M4) || defined(__TARGET_CPU_CORTEX_M7) || defined(ARDUINO_NRF52_ADAFRUIT)
+ #define EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN 1
+#else
+ #define EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN 0
+#endif
+#endif // EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN
+
+// CMSIS-NN falls back to reference kernels when __ARM_FEATURE_DSP and __ARM_FEATURE_MVE are not defined
+// we should never use those... So disable CMSIS-NN in that case and throw a warning
+#if EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1
+ #if !defined(__ARM_FEATURE_DSP) && !defined(__ARM_FEATURE_MVE)
+ #pragma message( \
+ "CMSIS-NN enabled, but neither __ARM_FEATURE_DSP nor __ARM_FEATURE_MVE defined. Falling back.")
+ #undef EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN
+ #define EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN 0
+ #endif
+#endif // EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1
+
+#if EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1
+#define CMSIS_NN 1
+#define EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES 1
+#endif
+
+#ifndef EI_CLASSIFIER_TFLITE_ENABLE_ARC
+#ifdef CPU_ARC
+#define EI_CLASSIFIER_TFLITE_ENABLE_ARC 1
+#else
+#define EI_CLASSIFIER_TFLITE_ENABLE_ARC 0
+#endif // CPU_ARC
+#endif // EI_CLASSIFIER_TFLITE_ENABLE_ARC
+
+#ifndef EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN
+ #if defined(ESP32)
+ #include "sdkconfig.h"
+ #define EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN 1
+ #define ESP_NN 1
+ #endif // ESP32 check
+ #if defined(CONFIG_IDF_TARGET_ESP32S3)
+ #define EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 1
+ #endif // ESP32S3 check
+#else
+ #define ESP_NN 1
+#endif
+
+// no include checks in the compiler? then just include metadata and then ops_define (optional if on EON model)
+#ifndef __has_include
+ #include "model-parameters/model_metadata.h"
+ #if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) && (EI_CLASSIFIER_COMPILED == 1)
+ #include "tflite-model/trained_model_ops_define.h"
+ #endif
+#else
+ #if __has_include("tflite-model/trained_model_ops_define.h")
+ #include "tflite-model/trained_model_ops_define.h"
+ #endif
+#endif // __has_include
+
+// clang-format on
+#endif // _EI_CLASSIFIER_CONFIG_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_classifier_smooth.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_classifier_smooth.h
new file mode 100644
index 0000000..31be582
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_classifier_smooth.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EI_CLASSIFIER_SMOOTH_H_
+#define _EI_CLASSIFIER_SMOOTH_H_
+
+#if EI_CLASSIFIER_OBJECT_DETECTION != 1
+
+#include
+
+typedef struct ei_classifier_smooth {
+ int *last_readings;
+ size_t last_readings_size;
+ uint8_t min_readings_same;
+ float classifier_confidence;
+ float anomaly_confidence;
+ uint8_t count[EI_CLASSIFIER_LABEL_COUNT + 2] = { 0 };
+ size_t count_size = EI_CLASSIFIER_LABEL_COUNT + 2;
+} ei_classifier_smooth_t;
+
+/**
+ * Initialize a smooth structure. This is useful if you don't want to trust
+ * single readings, but rather want consensus
+ * (e.g. 7 / 10 readings should be the same before I draw any ML conclusions).
+ * This allocates memory on the heap!
+ * @param smooth Pointer to an uninitialized ei_classifier_smooth_t struct
+ * @param n_readings Number of readings you want to store
+ * @param min_readings_same Minimum readings that need to be the same before concluding (needs to be lower than n_readings)
+ * @param classifier_confidence Minimum confidence in a class (default 0.8)
+ * @param anomaly_confidence Maximum error for anomalies (default 0.3)
+ */
+void ei_classifier_smooth_init(ei_classifier_smooth_t *smooth, size_t n_readings,
+ uint8_t min_readings_same, float classifier_confidence = 0.8,
+ float anomaly_confidence = 0.3) {
+ smooth->last_readings = (int*)ei_malloc(n_readings * sizeof(int));
+ for (size_t ix = 0; ix < n_readings; ix++) {
+ smooth->last_readings[ix] = -1; // -1 == uncertain
+ }
+ smooth->last_readings_size = n_readings;
+ smooth->min_readings_same = min_readings_same;
+ smooth->classifier_confidence = classifier_confidence;
+ smooth->anomaly_confidence = anomaly_confidence;
+ smooth->count_size = EI_CLASSIFIER_LABEL_COUNT + 2;
+}
+
+/**
+ * Call when a new reading comes in.
+ * @param smooth Pointer to an initialized ei_classifier_smooth_t struct
+ * @param result Pointer to a result structure (after calling ei_run_classifier)
+ * @returns Label, either 'uncertain', 'anomaly', or a label from the result struct
+ */
+const char* ei_classifier_smooth_update(ei_classifier_smooth_t *smooth, ei_impulse_result_t *result) {
+ // clear out the count array
+ memset(smooth->count, 0, EI_CLASSIFIER_LABEL_COUNT + 2);
+
+ // roll through the last_readings buffer
+ numpy::roll(smooth->last_readings, smooth->last_readings_size, -1);
+
+ int reading = -1; // uncertain
+
+ // print the predictions
+ // printf("[");
+ for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) {
+ if (result->classification[ix].value >= smooth->classifier_confidence) {
+ reading = (int)ix;
+ }
+ }
+#if EI_CLASSIFIER_HAS_ANOMALY == 1
+ if (result->anomaly >= smooth->anomaly_confidence) {
+ reading = -2; // anomaly
+ }
+#endif
+
+ smooth->last_readings[smooth->last_readings_size - 1] = reading;
+
+ // now count last 10 readings and see what we actually see...
+ for (size_t ix = 0; ix < smooth->last_readings_size; ix++) {
+ if (smooth->last_readings[ix] >= 0) {
+ smooth->count[smooth->last_readings[ix]]++;
+ }
+ else if (smooth->last_readings[ix] == -1) { // uncertain
+ smooth->count[EI_CLASSIFIER_LABEL_COUNT]++;
+ }
+ else if (smooth->last_readings[ix] == -2) { // anomaly
+ smooth->count[EI_CLASSIFIER_LABEL_COUNT + 1]++;
+ }
+ }
+
+ // then loop over the count and see which is highest
+ uint8_t top_result = 0;
+ uint8_t top_count = 0;
+ bool met_confidence_threshold = false;
+ uint8_t confidence_threshold = smooth->min_readings_same; // XX% of windows should be the same
+ for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT + 2; ix++) {
+ if (smooth->count[ix] > top_count) {
+ top_result = ix;
+ top_count = smooth->count[ix];
+ }
+ if (smooth->count[ix] >= confidence_threshold) {
+ met_confidence_threshold = true;
+ }
+ }
+
+ if (met_confidence_threshold) {
+ if (top_result == EI_CLASSIFIER_LABEL_COUNT) {
+ return "uncertain";
+ }
+ else if (top_result == EI_CLASSIFIER_LABEL_COUNT + 1) {
+ return "anomaly";
+ }
+ else {
+ return result->classification[top_result].label;
+ }
+ }
+ return "uncertain";
+}
+
+/**
+ * Clear up a smooth structure
+ */
+void ei_classifier_smooth_free(ei_classifier_smooth_t *smooth) {
+ ei_free(smooth->last_readings);
+}
+
+#endif // #if EI_CLASSIFIER_OBJECT_DETECTION != 1
+
+#endif // _EI_CLASSIFIER_SMOOTH_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_classifier_types.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_classifier_types.h
new file mode 100644
index 0000000..45fc645
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_classifier_types.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EDGE_IMPULSE_RUN_CLASSIFIER_TYPES_H_
+#define _EDGE_IMPULSE_RUN_CLASSIFIER_TYPES_H_
+
+#include
+// needed for standalone C example
+#include "model-parameters/model_metadata.h"
+
+#ifndef EI_CLASSIFIER_MAX_OBJECT_DETECTION_COUNT
+#define EI_CLASSIFIER_MAX_OBJECT_DETECTION_COUNT 10
+#endif
+
+#ifndef EI_CLASSIFIER_MAX_LABELS_COUNT
+#define EI_CLASSIFIER_MAX_LABELS_COUNT 25
+#endif
+
+typedef struct {
+ const char *label;
+ float value;
+} ei_impulse_result_classification_t;
+
+typedef struct {
+ float mean_value;
+ float max_value;
+} ei_impulse_visual_ad_result_t;
+
+typedef struct {
+ const char *label;
+ uint32_t x;
+ uint32_t y;
+ uint32_t width;
+ uint32_t height;
+ float value;
+} ei_impulse_result_bounding_box_t;
+
+typedef struct {
+ int sampling;
+ int dsp;
+ int classification;
+ int anomaly;
+ int64_t dsp_us;
+ int64_t classification_us;
+ int64_t anomaly_us;
+} ei_impulse_result_timing_t;
+
+typedef struct {
+ ei_impulse_result_bounding_box_t *bounding_boxes;
+ uint32_t bounding_boxes_count;
+ ei_impulse_result_classification_t classification[EI_CLASSIFIER_MAX_LABELS_COUNT];
+ float anomaly;
+ ei_impulse_result_timing_t timing;
+ bool copy_output;
+#ifdef EI_CLASSIFIER_HAS_VISUAL_ANOMALY
+ ei_impulse_result_bounding_box_t *visual_ad_grid_cells;
+ uint32_t visual_ad_count;
+ ei_impulse_visual_ad_result_t visual_ad_result;
+#endif
+} ei_impulse_result_t;
+
+#endif // _EDGE_IMPULSE_RUN_CLASSIFIER_TYPES_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_fill_result_struct.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_fill_result_struct.h
new file mode 100644
index 0000000..12429ce
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_fill_result_struct.h
@@ -0,0 +1,968 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EI_CLASSIFIER_FILL_RESULT_STRUCT_H_
+#define _EI_CLASSIFIER_FILL_RESULT_STRUCT_H_
+
+using namespace ei;
+
+#include "model-parameters/model_metadata.h"
+#if EI_CLASSIFIER_HAS_MODEL_VARIABLES == 1
+#include "model-parameters/model_variables.h"
+#endif
+#include "edge-impulse-sdk/classifier/ei_model_types.h"
+#include "edge-impulse-sdk/classifier/ei_classifier_types.h"
+#include "edge-impulse-sdk/classifier/ei_nms.h"
+#include "edge-impulse-sdk/dsp/ei_vector.h"
+
+#ifndef EI_HAS_OBJECT_DETECTION
+ #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_SSD)
+ #define EI_HAS_SSD 1
+ #endif
+ #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_FOMO)
+ #define EI_HAS_FOMO 1
+ #endif
+ #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI)
+ #define EI_HAS_YOLOV5 1
+ #endif
+ #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOX)
+ #define EI_HAS_YOLOX 1
+ #endif
+ #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV7)
+ #define EI_HAS_YOLOV7 1
+ #endif
+#endif
+
+#ifdef EI_HAS_FOMO
+typedef struct cube {
+ size_t x;
+ size_t y;
+ size_t width;
+ size_t height;
+ float confidence;
+ const char *label;
+} ei_classifier_cube_t;
+
+/**
+ * Checks whether a new section overlaps with a cube,
+ * and if so, will **update the cube**
+ */
+__attribute__((unused)) static bool ei_cube_check_overlap(ei_classifier_cube_t *c, int x, int y, int width, int height, float confidence) {
+ bool is_overlapping = !(c->x + c->width < x || c->y + c->height < y || c->x > x + width || c->y > y + height);
+ if (!is_overlapping) return false;
+
+ // if we overlap, but the x of the new box is lower than the x of the current box
+ if (x < c->x) {
+ // update x to match new box and make width larger (by the diff between the boxes)
+ c->x = x;
+ c->width += c->x - x;
+ }
+ // if we overlap, but the y of the new box is lower than the y of the current box
+ if (y < c->y) {
+ // update y to match new box and make height larger (by the diff between the boxes)
+ c->y = y;
+ c->height += c->y - y;
+ }
+ // if we overlap, and x+width of the new box is higher than the x+width of the current box
+ if (x + width > c->x + c->width) {
+ // just make the box wider
+ c->width += (x + width) - (c->x + c->width);
+ }
+ // if we overlap, and y+height of the new box is higher than the y+height of the current box
+ if (y + height > c->y + c->height) {
+ // just make the box higher
+ c->height += (y + height) - (c->y + c->height);
+ }
+ // if the new box has higher confidence, then override confidence of the whole box
+ if (confidence > c->confidence) {
+ c->confidence = confidence;
+ }
+ return true;
+}
+
+__attribute__((unused)) static void ei_handle_cube(std::vector *cubes, int x, int y, float vf, const char *label, float detection_threshold) {
+ if (vf < detection_threshold) return;
+
+ bool has_overlapping = false;
+ int width = 1;
+ int height = 1;
+
+ for (auto c : *cubes) {
+ // not cube for same class? continue
+ if (strcmp(c->label, label) != 0) continue;
+
+ if (ei_cube_check_overlap(c, x, y, width, height, vf)) {
+ has_overlapping = true;
+ break;
+ }
+ }
+
+ if (!has_overlapping) {
+ ei_classifier_cube_t *cube = new ei_classifier_cube_t();
+ cube->x = x;
+ cube->y = y;
+ cube->width = 1;
+ cube->height = 1;
+ cube->confidence = vf;
+ cube->label = label;
+ cubes->push_back(cube);
+ }
+}
+
+__attribute__((unused)) static void fill_result_struct_from_cubes(ei_impulse_result_t *result, std::vector *cubes, int out_width_factor, uint32_t object_detection_count) {
+ std::vector bbs;
+ static std::vector results;
+ int added_boxes_count = 0;
+ results.clear();
+ for (auto sc : *cubes) {
+ bool has_overlapping = false;
+
+ int x = sc->x;
+ int y = sc->y;
+ int width = sc->width;
+ int height = sc->height;
+ const char *label = sc->label;
+ float vf = sc->confidence;
+
+ for (auto c : bbs) {
+ // not cube for same class? continue
+ if (strcmp(c->label, label) != 0) continue;
+
+ if (ei_cube_check_overlap(c, x, y, width, height, vf)) {
+ has_overlapping = true;
+ break;
+ }
+ }
+
+ if (has_overlapping) {
+ continue;
+ }
+
+ bbs.push_back(sc);
+
+ ei_impulse_result_bounding_box_t tmp = {
+ .label = sc->label,
+ .x = (uint32_t)(sc->x * out_width_factor),
+ .y = (uint32_t)(sc->y * out_width_factor),
+ .width = (uint32_t)(sc->width * out_width_factor),
+ .height = (uint32_t)(sc->height * out_width_factor),
+ .value = sc->confidence
+ };
+
+ results.push_back(tmp);
+ added_boxes_count++;
+ }
+
+ // if we didn't detect min required objects, fill the rest with fixed value
+ if (added_boxes_count < object_detection_count) {
+ results.resize(object_detection_count);
+ for (size_t ix = added_boxes_count; ix < object_detection_count; ix++) {
+ results[ix].value = 0.0f;
+ }
+ }
+
+ for (auto c : *cubes) {
+ delete c;
+ }
+
+ result->bounding_boxes = results.data();
+ result->bounding_boxes_count = results.size();
+}
+#endif
+
+__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_fomo(const ei_impulse_t *impulse,
+ ei_impulse_result_t *result,
+ float *data,
+ int out_width,
+ int out_height) {
+#ifdef EI_HAS_FOMO
+ std::vector cubes;
+
+ int out_width_factor = impulse->input_width / out_width;
+
+ for (size_t y = 0; y < out_width; y++) {
+ // ei_printf(" [ ");
+ for (size_t x = 0; x < out_height; x++) {
+ size_t loc = ((y * out_height) + x) * (impulse->label_count + 1);
+
+ for (size_t ix = 1; ix < impulse->label_count + 1; ix++) {
+ float vf = data[loc+ix];
+
+ ei_handle_cube(&cubes, x, y, vf, impulse->categories[ix - 1], impulse->object_detection_threshold);
+ }
+ }
+ }
+
+ fill_result_struct_from_cubes(result, &cubes, out_width_factor, impulse->object_detection_count);
+
+ return EI_IMPULSE_OK;
+#else
+ return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE;
+#endif
+}
+
+__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_i8_fomo(const ei_impulse_t *impulse,
+ ei_impulse_result_t *result,
+ int8_t *data,
+ float zero_point,
+ float scale,
+ int out_width,
+ int out_height) {
+#ifdef EI_HAS_FOMO
+ std::vector cubes;
+
+ int out_width_factor = impulse->input_width / out_width;
+
+ for (size_t y = 0; y < out_width; y++) {
+ // ei_printf(" [ ");
+ for (size_t x = 0; x < out_height; x++) {
+ size_t loc = ((y * out_height) + x) * (impulse->label_count + 1);
+
+ for (size_t ix = 1; ix < impulse->label_count + 1; ix++) {
+ int8_t v = data[loc+ix];
+ float vf = static_cast(v - zero_point) * scale;
+
+ ei_handle_cube(&cubes, x, y, vf, impulse->categories[ix - 1], impulse->object_detection_threshold);
+ }
+ }
+ }
+
+ fill_result_struct_from_cubes(result, &cubes, out_width_factor, impulse->object_detection_count);
+
+ return EI_IMPULSE_OK;
+#else
+ return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE;
+#endif
+}
+
+/**
+ * Fill the result structure from an unquantized output tensor
+ * (we don't support quantized here a.t.m.)
+ */
+__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_object_detection(const ei_impulse_t *impulse,
+ ei_impulse_result_t *result,
+ float *data,
+ float *scores,
+ float *labels,
+ bool debug) {
+#ifdef EI_HAS_SSD
+ static std::vector results;
+ results.clear();
+ results.resize(impulse->object_detection_count);
+ for (size_t ix = 0; ix < impulse->object_detection_count; ix++) {
+
+ float score = scores[ix];
+ float label = labels[ix];
+
+ if (score >= impulse->object_detection_threshold) {
+ float ystart = data[(ix * 4) + 0];
+ float xstart = data[(ix * 4) + 1];
+ float yend = data[(ix * 4) + 2];
+ float xend = data[(ix * 4) + 3];
+
+ if (xstart < 0) xstart = 0;
+ if (xstart > 1) xstart = 1;
+ if (ystart < 0) ystart = 0;
+ if (ystart > 1) ystart = 1;
+ if (yend < 0) yend = 0;
+ if (yend > 1) yend = 1;
+ if (xend < 0) xend = 0;
+ if (xend > 1) xend = 1;
+ if (xend < xstart) xend = xstart;
+ if (yend < ystart) yend = ystart;
+
+ if (debug) {
+ ei_printf("%s (", impulse->categories[(uint32_t)label]);
+ ei_printf_float(label);
+ ei_printf("): ");
+ ei_printf_float(score);
+ ei_printf(" [ ");
+ ei_printf_float(xstart);
+ ei_printf(", ");
+ ei_printf_float(ystart);
+ ei_printf(", ");
+ ei_printf_float(xend);
+ ei_printf(", ");
+ ei_printf_float(yend);
+ ei_printf(" ]\n");
+ }
+
+ results[ix].label = impulse->categories[(uint32_t)label];
+ results[ix].x = static_cast(xstart * static_cast(impulse->input_width));
+ results[ix].y = static_cast(ystart * static_cast(impulse->input_height));
+ results[ix].width = static_cast((xend - xstart) * static_cast(impulse->input_width));
+ results[ix].height = static_cast((yend - ystart) * static_cast(impulse->input_height));
+ results[ix].value = score;
+ }
+ else {
+ results[ix].value = 0.0f;
+ }
+ }
+ result->bounding_boxes = results.data();
+ result->bounding_boxes_count = results.size();
+
+ return EI_IMPULSE_OK;
+#else
+ return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE;
+#endif
+}
+
+/**
+ * Fill the result structure from a quantized output tensor
+ */
+__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_i8(const ei_impulse_t *impulse,
+ ei_impulse_result_t *result,
+ int8_t *data,
+ float zero_point,
+ float scale,
+ bool debug) {
+ for (uint32_t ix = 0; ix < impulse->label_count; ix++) {
+ float value = static_cast(data[ix] - zero_point) * scale;
+
+ if (debug) {
+ ei_printf("%s:\t", impulse->categories[ix]);
+ ei_printf_float(value);
+ ei_printf("\n");
+ }
+ result->classification[ix].label = impulse->categories[ix];
+ result->classification[ix].value = value;
+ }
+
+ return EI_IMPULSE_OK;
+}
+
+/**
+ * Fill the result structure from an unquantized output tensor
+ */
+__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32(const ei_impulse_t *impulse,
+ ei_impulse_result_t *result,
+ float *data,
+ bool debug) {
+ for (uint32_t ix = 0; ix < impulse->label_count; ix++) {
+ float value = data[ix];
+
+ if (debug) {
+ ei_printf("%s:\t", impulse->categories[ix]);
+ ei_printf_float(value);
+ ei_printf("\n");
+ }
+ result->classification[ix].label = impulse->categories[ix];
+ result->classification[ix].value = value;
+ }
+
+ return EI_IMPULSE_OK;
+}
+
+/**
+ * Fill the visual anomaly result structures from an unquantized output tensor
+ */
+__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_visual_ad_struct_f32(const ei_impulse_t *impulse,
+ ei_impulse_result_t *result,
+ float *data,
+ bool debug) {
+#ifdef EI_CLASSIFIER_HAS_VISUAL_ANOMALY
+ float max_val = 0;
+ float sum_val = 0;
+ // the feature extractor output will be 1/8 of input
+ // due to the cut-off layer chosen in MobileNetV2
+ uint32_t grid_size_x = (impulse->input_width / 8) / 2 - 1;
+ uint32_t grid_size_y = (impulse->input_height / 8) / 2 - 1;
+
+ for (uint32_t ix = 0; ix < grid_size_x * grid_size_y; ix++) {
+ float value = data[ix];
+ sum_val += value;
+ if (value > max_val) {
+ max_val = value;
+ }
+ }
+
+ result->visual_ad_result.mean_value = sum_val / (grid_size_x * grid_size_y);
+ result->visual_ad_result.max_value = max_val;
+
+ static ei_vector results;
+
+ int added_boxes_count = 0;
+ results.clear();
+
+ for (uint32_t x = 0; x <= grid_size_x - 1; x++) {
+ for (uint32_t y = 0; y <= grid_size_y - 1; y++) {
+ if (data[x * grid_size_x + y] >= impulse->object_detection_threshold) {
+ ei_impulse_result_bounding_box_t tmp = {
+ .label = "anomaly",
+ .x = static_cast(y * (static_cast(impulse->input_height) / grid_size_y)),
+ .y = static_cast(x * (static_cast(impulse->input_width) / grid_size_x)),
+ .width = (impulse->input_width / grid_size_x),
+ .height = (impulse->input_height / grid_size_y),
+ .value = data[x * grid_size_x + y]
+ };
+
+ results.push_back(tmp);
+ added_boxes_count++;
+ }
+ }
+ }
+
+ // if we didn't detect min required objects, fill the rest with fixed value
+ if (added_boxes_count < impulse->object_detection_count) {
+ results.resize(impulse->object_detection_count);
+ for (size_t ix = added_boxes_count; ix < impulse->object_detection_count; ix++) {
+ results[ix].value = 0.0f;
+ }
+ }
+
+ result->visual_ad_grid_cells = results.data();
+ result->visual_ad_count = results.size();
+#endif
+ return EI_IMPULSE_OK;
+}
+
+/**
+ * Fill the result structure from an unquantized output tensor
+ */
+__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_yolov5(const ei_impulse_t *impulse,
+ ei_impulse_result_t *result,
+ int version,
+ float *data,
+ size_t output_features_count) {
+#ifdef EI_HAS_YOLOV5
+ static std::vector results;
+ results.clear();
+
+ size_t col_size = 5 + impulse->label_count;
+ size_t row_count = output_features_count / col_size;
+
+ for (size_t ix = 0; ix < row_count; ix++) {
+ size_t base_ix = ix * col_size;
+ float xc = data[base_ix + 0];
+ float yc = data[base_ix + 1];
+ float w = data[base_ix + 2];
+ float h = data[base_ix + 3];
+ float x = xc - (w / 2.0f);
+ float y = yc - (h / 2.0f);
+ if (x < 0) {
+ x = 0;
+ }
+ if (y < 0) {
+ y = 0;
+ }
+ if (x + w > impulse->input_width) {
+ w = impulse->input_width - x;
+ }
+ if (y + h > impulse->input_height) {
+ h = impulse->input_height - y;
+ }
+
+ if (w < 0 || h < 0) {
+ continue;
+ }
+
+ float score = data[base_ix + 4];
+
+ uint32_t label = 0;
+ for (size_t lx = 0; lx < impulse->label_count; lx++) {
+ float l = data[base_ix + 5 + lx];
+ if (l > 0.5f) {
+ label = lx;
+ break;
+ }
+ }
+
+ if (score >= impulse->object_detection_threshold && score <= 1.0f) {
+ ei_impulse_result_bounding_box_t r;
+ r.label = impulse->categories[label];
+
+ if (version != 5) {
+ x *= static_cast(impulse->input_width);
+ y *= static_cast(impulse->input_height);
+ w *= static_cast(impulse->input_width);
+ h *= static_cast(impulse->input_height);
+ }
+
+ r.x = static_cast(x);
+ r.y = static_cast(y);
+ r.width = static_cast(w);
+ r.height = static_cast(h);
+ r.value = score;
+ results.push_back(r);
+ }
+ }
+
+ EI_IMPULSE_ERROR nms_res = ei_run_nms(&results);
+ if (nms_res != EI_IMPULSE_OK) {
+ return nms_res;
+ }
+
+ // if we didn't detect min required objects, fill the rest with fixed value
+ size_t added_boxes_count = results.size();
+ size_t min_object_detection_count = impulse->object_detection_count;
+ if (added_boxes_count < min_object_detection_count) {
+ results.resize(min_object_detection_count);
+ for (size_t ix = added_boxes_count; ix < min_object_detection_count; ix++) {
+ results[ix].value = 0.0f;
+ }
+ }
+
+ result->bounding_boxes = results.data();
+ result->bounding_boxes_count = results.size();
+
+ return EI_IMPULSE_OK;
+#else
+ return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE;
+#endif
+}
+
+/**
+ * Fill the result structure from a quantized output tensor
+*/
+template
+__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_quantized_yolov5(const ei_impulse_t *impulse,
+ ei_impulse_result_t *result,
+ int version,
+ T *data,
+ float zero_point,
+ float scale,
+ size_t output_features_count) {
+#ifdef EI_HAS_YOLOV5
+ static std::vector results;
+ results.clear();
+
+ size_t col_size = 5 + impulse->label_count;
+ size_t row_count = output_features_count / col_size;
+
+ for (size_t ix = 0; ix < row_count; ix++) {
+ size_t base_ix = ix * col_size;
+ float xc = (data[base_ix + 0] - zero_point) * scale;
+ float yc = (data[base_ix + 1] - zero_point) * scale;
+ float w = (data[base_ix + 2] - zero_point) * scale;
+ float h = (data[base_ix + 3] - zero_point) * scale;
+ float x = xc - (w / 2.0f);
+ float y = yc - (h / 2.0f);
+ if (x < 0) {
+ x = 0;
+ }
+ if (y < 0) {
+ y = 0;
+ }
+ if (x + w > impulse->input_width) {
+ w = impulse->input_width - x;
+ }
+ if (y + h > impulse->input_height) {
+ h = impulse->input_height - y;
+ }
+
+ if (w < 0 || h < 0) {
+ continue;
+ }
+
+ float score = (data[base_ix + 4] - zero_point) * scale;
+
+ uint32_t label = 0;
+ for (size_t lx = 0; lx < impulse->label_count; lx++) {
+ float l = (data[base_ix + 5 + lx] - zero_point) * scale;
+ if (l > 0.5f) {
+ label = lx;
+ break;
+ }
+ }
+
+ if (score >= impulse->object_detection_threshold && score <= 1.0f) {
+ ei_impulse_result_bounding_box_t r;
+ r.label = ei_classifier_inferencing_categories[label];
+
+ if (version != 5) {
+ x *= static_cast(impulse->input_width);
+ y *= static_cast(impulse->input_height);
+ w *= static_cast(impulse->input_width);
+ h *= static_cast(impulse->input_height);
+ }
+
+ r.x = static_cast(x);
+ r.y = static_cast(y);
+ r.width = static_cast(w);
+ r.height = static_cast(h);
+ r.value = score;
+ results.push_back(r);
+ }
+ }
+
+ EI_IMPULSE_ERROR nms_res = ei_run_nms(&results);
+ if (nms_res != EI_IMPULSE_OK) {
+ return nms_res;
+ }
+
+ // if we didn't detect min required objects, fill the rest with fixed value
+ size_t added_boxes_count = results.size();
+ size_t min_object_detection_count = impulse->object_detection_count;
+ if (added_boxes_count < min_object_detection_count) {
+ results.resize(min_object_detection_count);
+ for (size_t ix = added_boxes_count; ix < min_object_detection_count; ix++) {
+ results[ix].value = 0.0f;
+ }
+ }
+
+ result->bounding_boxes = results.data();
+ result->bounding_boxes_count = results.size();
+
+ return EI_IMPULSE_OK;
+#else
+ return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE;
+#endif
+}
+
+/**
+ * Fill the result structure from an unquantized output tensor
+ * (we don't support quantized here a.t.m.)
+ */
+__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_yolox(const ei_impulse_t *impulse, ei_impulse_result_t *result,
+ float *data,
+ size_t output_features_count) {
+#ifdef EI_HAS_YOLOX
+ static std::vector results;
+ results.clear();
+
+ // START: def yolox_postprocess()
+
+ // if not p6:
+ // strides = [8, 16, 32]
+ // else:
+ // strides = [8, 16, 32, 64]
+ const std::vector strides { 8, 16, 32 };
+
+ // hsizes = [img_size[0] // stride for stride in strides]
+ // wsizes = [img_size[1] // stride for stride in strides]
+ std::vector hsizes(strides.size());
+ std::vector wsizes(strides.size());
+ for (int ix = 0; ix < (int)strides.size(); ix++) {
+ hsizes[ix] = (int)floor((float)impulse->input_width / (float)strides[ix]);
+ wsizes[ix] = (int)floor((float)impulse->input_height / (float)strides[ix]);
+ }
+
+ // for hsize, wsize, stride in zip(hsizes, wsizes, strides):
+ // grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
+ // grids.append(grid)
+ // shape = grid.shape[:2]
+ // expanded_strides.append(np.full((*shape, 1), stride))
+ std::vector grids;
+ std::vector expanded_strides;
+
+ for (int ix = 0; ix < (int)strides.size(); ix++) {
+ int hsize = hsizes.at(ix);
+ int wsize = wsizes.at(ix);
+ int stride = strides.at(ix);
+
+ // xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
+ // grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
+ matrix_i32_t *grid = new matrix_i32_t(hsize * wsize, 2);
+ int grid_ix = 0;
+ for (int h = 0; h < hsize; h++) {
+ for (int w = 0; w < wsize; w++) {
+ grid->buffer[grid_ix + 0] = w;
+ grid->buffer[grid_ix + 1] = h;
+ grid_ix += 2;
+ }
+ }
+ grids.push_back(grid);
+
+ // shape = grid.shape[:2]
+ // expanded_strides.append(np.full((*shape, 1), stride))
+ matrix_i32_t *expanded_stride = new matrix_i32_t(hsize * wsize, 1);
+ for (int ix = 0; ix < hsize * wsize; ix++) {
+ expanded_stride->buffer[ix] = stride;
+ }
+ expanded_strides.push_back(expanded_stride);
+ }
+
+ // grids = np.concatenate(grids, 1)
+ int total_grid_rows = 0;
+ for (auto g : grids) {
+ total_grid_rows += g->rows;
+ }
+ matrix_i32_t c_grid(total_grid_rows, 2);
+ int c_grid_ix = 0;
+ for (auto g : grids) {
+ for (int row = 0; row < (int)g->rows; row++) {
+ c_grid.buffer[c_grid_ix + 0] = g->buffer[(row * 2) + 0];
+ c_grid.buffer[c_grid_ix + 1] = g->buffer[(row * 2) + 1];
+ c_grid_ix += 2;
+ }
+ delete g;
+ }
+
+ // expanded_strides = np.concatenate(expanded_strides, 1)
+ int total_stride_rows = 0;
+ for (auto g : expanded_strides) {
+ total_stride_rows += g->rows;
+ }
+ matrix_i32_t c_expanded_strides(total_stride_rows, 1);
+ int c_expanded_strides_ix = 0;
+ for (auto g : expanded_strides) {
+ for (int row = 0; row < (int)g->rows; row++) {
+ c_expanded_strides.buffer[c_expanded_strides_ix + 0] = g->buffer[(row * 1) + 0];
+ c_expanded_strides_ix += 1;
+ }
+ delete g;
+ }
+
+ const int output_rows = output_features_count / (5 + impulse->label_count);
+ matrix_t outputs(output_rows, 5 + impulse->label_count, data);
+ for (int row = 0; row < (int)outputs.rows; row++) {
+ float v0 = outputs.buffer[(row * outputs.cols) + 0];
+ float v1 = outputs.buffer[(row * outputs.cols) + 1];
+ float v2 = outputs.buffer[(row * outputs.cols) + 2];
+ float v3 = outputs.buffer[(row * outputs.cols) + 3];
+
+ float cgrid0 = (float)c_grid.buffer[(row * c_grid.cols) + 0];
+ float cgrid1 = (float)c_grid.buffer[(row * c_grid.cols) + 1];
+
+ float stride = (float)c_expanded_strides.buffer[row];
+
+ // outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
+ outputs.buffer[(row * outputs.cols) + 0] = (v0 + cgrid0) * stride;
+ outputs.buffer[(row * outputs.cols) + 1] = (v1 + cgrid1) * stride;
+
+ // outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
+ outputs.buffer[(row * outputs.cols) + 2] = exp(v2) * stride;
+ outputs.buffer[(row * outputs.cols) + 3] = exp(v3) * stride;
+ }
+
+ // END: def yolox_postprocess()
+
+ // boxes = predictions[:, :4]
+ matrix_t boxes(outputs.rows, 4);
+ for (int row = 0; row < (int)outputs.rows; row++) {
+ boxes.buffer[(row * boxes.cols) + 0] = outputs.buffer[(row * outputs.cols) + 0];
+ boxes.buffer[(row * boxes.cols) + 1] = outputs.buffer[(row * outputs.cols) + 1];
+ boxes.buffer[(row * boxes.cols) + 2] = outputs.buffer[(row * outputs.cols) + 2];
+ boxes.buffer[(row * boxes.cols) + 3] = outputs.buffer[(row * outputs.cols) + 3];
+ }
+
+ // scores = predictions[:, 4:5] * predictions[:, 5:]
+ matrix_t scores(outputs.rows, impulse->label_count);
+ for (int row = 0; row < (int)outputs.rows; row++) {
+ float confidence = outputs.buffer[(row * outputs.cols) + 4];
+ for (int cc = 0; cc < impulse->label_count; cc++) {
+ scores.buffer[(row * scores.cols) + cc] = confidence * outputs.buffer[(row * outputs.cols) + (5 + cc)];
+ }
+ }
+
+ // iterate through scores to see if we have anything with confidence
+ for (int row = 0; row < (int)scores.rows; row++) {
+ for (int col = 0; col < (int)scores.cols; col++) {
+ float confidence = scores.buffer[(row * scores.cols) + col];
+
+ if (confidence >= impulse->object_detection_threshold && confidence <= 1.0f) {
+ ei_impulse_result_bounding_box_t r;
+ r.label = impulse->categories[col];
+ r.value = confidence;
+
+ // now find the box...
+ float xcenter = boxes.buffer[(row * boxes.cols) + 0];
+ float ycenter = boxes.buffer[(row * boxes.cols) + 1];
+ float width = boxes.buffer[(row * boxes.cols) + 2];
+ float height = boxes.buffer[(row * boxes.cols) + 3];
+
+ int x = (int)(xcenter - (width / 2.0f));
+ int y = (int)(ycenter - (height / 2.0f));
+
+ if (x < 0) {
+ x = 0;
+ }
+ if (x > (int)impulse->input_width) {
+ x = impulse->input_width;
+ }
+ if (y < 0) {
+ y = 0;
+ }
+ if (y > (int)impulse->input_height) {
+ y = impulse->input_height;
+ }
+
+ r.x = x;
+ r.y = y;
+ r.width = (int)round(width);
+ r.height = (int)round(height);
+
+ results.push_back(r);
+ }
+ }
+ }
+
+ EI_IMPULSE_ERROR nms_res = ei_run_nms(&results);
+ if (nms_res != EI_IMPULSE_OK) {
+ return nms_res;
+ }
+
+ // if we didn't detect min required objects, fill the rest with fixed value
+ size_t added_boxes_count = results.size();
+ size_t min_object_detection_count = impulse->object_detection_count;
+ if (added_boxes_count < min_object_detection_count) {
+ results.resize(min_object_detection_count);
+ for (size_t ix = added_boxes_count; ix < min_object_detection_count; ix++) {
+ results[ix].value = 0.0f;
+ }
+ }
+
+ result->bounding_boxes = results.data();
+ result->bounding_boxes_count = results.size();
+
+ return EI_IMPULSE_OK;
+#else
+ return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE;
+#endif // EI_HAS_YOLOX
+}
+
+/**
+ * Fill the result structure from an unquantized output tensor
+ * (we don't support quantized here a.t.m.)
+ */
+__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_yolox_detect(const ei_impulse_t *impulse, ei_impulse_result_t *result,
+ float *data,
+ size_t output_features_count) {
+#ifdef EI_HAS_YOLOX
+ static std::vector results;
+ results.clear();
+
+ // expected format [xmin ymin xmax ymax score label]
+ const int output_rows = output_features_count / 6;
+ matrix_t outputs(output_rows, 6, data);
+
+ // iterate through scores to see if we have anything with confidence
+ for (int row = 0; row < (int)outputs.rows; row++) {
+ float confidence = outputs.buffer[(row * outputs.cols) + 4];
+ int class_idx = (int)outputs.buffer[(row * outputs.cols) + 5];
+
+ if (confidence >= impulse->object_detection_threshold && confidence <= 1.0f) {
+ ei_impulse_result_bounding_box_t r;
+ r.label = ei_classifier_inferencing_categories[class_idx];
+ r.value = confidence;
+
+ // now find the box...
+ float xmin = outputs.buffer[(row * outputs.cols) + 0];
+ float ymin = outputs.buffer[(row * outputs.cols) + 1];
+ float xmax = outputs.buffer[(row * outputs.cols) + 2];
+ float ymax = outputs.buffer[(row * outputs.cols) + 3];
+
+ float width = xmax - xmin;
+ float height = ymax - ymin;
+
+ int x = (int)xmin;
+ int y = (int)ymin;
+
+ if (x < 0) {
+ x = 0;
+ }
+ if (x > (int)impulse->input_width) {
+ x = impulse->input_width;
+ }
+ if (y < 0) {
+ y = 0;
+ }
+ if (y > (int)impulse->input_height) {
+ y = impulse->input_height;
+ }
+
+ r.x = x;
+ r.y = y;
+ r.width = (int)round(width);
+ r.height = (int)round(height);
+
+ results.push_back(r);
+ }
+ }
+
+ result->bounding_boxes = results.data();
+ result->bounding_boxes_count = results.size();
+
+ return EI_IMPULSE_OK;
+#else
+ return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE;
+#endif // EI_HAS_YOLOX
+}
+
+/**
+ * Fill the result structure from an unquantized output tensor
+ * (we don't support quantized here a.t.m.)
+ */
+__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_yolov7(const ei_impulse_t *impulse, ei_impulse_result_t *result,
+ float *data,
+ size_t output_features_count) {
+#ifdef EI_HAS_YOLOV7
+ static std::vector results;
+ results.clear();
+
+ size_t col_size = 7;
+ size_t row_count = output_features_count / col_size;
+
+ // output is:
+ // batch_id, xmin, ymin, xmax, ymax, cls_id, score
+ for (size_t ix = 0; ix < row_count; ix++) {
+ size_t base_ix = ix * col_size;
+ float xmin = data[base_ix + 1];
+ float ymin = data[base_ix + 2];
+ float xmax = data[base_ix + 3];
+ float ymax = data[base_ix + 4];
+ uint32_t label = (uint32_t)data[base_ix + 5];
+ float score = data[base_ix + 6];
+
+ if (score >= impulse->object_detection_threshold && score <= 1.0f) {
+ ei_impulse_result_bounding_box_t r;
+ r.label = ei_classifier_inferencing_categories[label];
+
+ r.x = static_cast(xmin);
+ r.y = static_cast(ymin);
+ r.width = static_cast(xmax - xmin);
+ r.height = static_cast(ymax - ymin);
+ r.value = score;
+ results.push_back(r);
+ }
+ }
+
+ // if we didn't detect min required objects, fill the rest with fixed value
+ size_t added_boxes_count = results.size();
+ size_t min_object_detection_count = impulse->object_detection_count;
+ if (added_boxes_count < min_object_detection_count) {
+ results.resize(min_object_detection_count);
+ for (size_t ix = added_boxes_count; ix < min_object_detection_count; ix++) {
+ results[ix].value = 0.0f;
+ }
+ }
+
+ result->bounding_boxes = results.data();
+ result->bounding_boxes_count = results.size();
+
+ return EI_IMPULSE_OK;
+#else
+ return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE;
+#endif // #ifdef EI_HAS_YOLOV7
+}
+
+#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0
+bool find_mtx_by_idx(ei_feature_t* mtx, ei::matrix_t** matrix, uint32_t mtx_id, size_t mtx_size) {
+ for (size_t i = 0; i < mtx_size; i++) {
+ if (&mtx[i] == NULL) {
+ continue;
+ }
+ if (mtx[i].blockId == mtx_id || mtx[i].blockId == 0) {
+ *matrix = mtx[i].matrix;
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
+#endif // _EI_CLASSIFIER_FILL_RESULT_STRUCT_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_model_types.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_model_types.h
new file mode 100644
index 0000000..dd9156c
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_model_types.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EDGE_IMPULSE_MODEL_TYPES_H_
+#define _EDGE_IMPULSE_MODEL_TYPES_H_
+
+#include
+
+#include "edge-impulse-sdk/classifier/ei_classifier_types.h"
+#include "edge-impulse-sdk/dsp/numpy.hpp"
+#if EI_CLASSIFIER_USE_FULL_TFLITE || (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_AKIDA) || (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_MEMRYX)
+#include "tensorflow-lite/tensorflow/lite/c/common.h"
+#else
+#include "edge-impulse-sdk/tensorflow/lite/c/common.h"
+#endif // EI_CLASSIFIER_USE_FULL_TFLITE
+
+#define EI_CLASSIFIER_NONE 255
+#define EI_CLASSIFIER_UTENSOR 1
+#define EI_CLASSIFIER_TFLITE 2
+#define EI_CLASSIFIER_CUBEAI 3
+#define EI_CLASSIFIER_TFLITE_FULL 4
+#define EI_CLASSIFIER_TENSAIFLOW 5
+#define EI_CLASSIFIER_TENSORRT 6
+#define EI_CLASSIFIER_DRPAI 7
+#define EI_CLASSIFIER_TFLITE_TIDL 8
+#define EI_CLASSIFIER_AKIDA 9
+#define EI_CLASSIFIER_SYNTIANT 10
+#define EI_CLASSIFIER_ONNX_TIDL 11
+#define EI_CLASSIFIER_MEMRYX 12
+
+#define EI_CLASSIFIER_SENSOR_UNKNOWN -1
+#define EI_CLASSIFIER_SENSOR_MICROPHONE 1
+#define EI_CLASSIFIER_SENSOR_ACCELEROMETER 2
+#define EI_CLASSIFIER_SENSOR_CAMERA 3
+#define EI_CLASSIFIER_SENSOR_9DOF 4
+#define EI_CLASSIFIER_SENSOR_ENVIRONMENTAL 5
+#define EI_CLASSIFIER_SENSOR_FUSION 6
+
+// These must match the enum values in TensorFlow Lite's "TfLiteType"
+#define EI_CLASSIFIER_DATATYPE_FLOAT32 1
+#define EI_CLASSIFIER_DATATYPE_INT8 9
+
+#define EI_CLASSIFIER_LAST_LAYER_UNKNOWN -1
+#define EI_CLASSIFIER_LAST_LAYER_SSD 1
+#define EI_CLASSIFIER_LAST_LAYER_FOMO 2
+#define EI_CLASSIFIER_LAST_LAYER_YOLOV5 3
+#define EI_CLASSIFIER_LAST_LAYER_YOLOX 4
+#define EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI 5
+#define EI_CLASSIFIER_LAST_LAYER_YOLOV7 6
+
+#define EI_CLASSIFIER_IMAGE_SCALING_NONE 0
+#define EI_CLASSIFIER_IMAGE_SCALING_0_255 1
+#define EI_CLASSIFIER_IMAGE_SCALING_TORCH 2
+#define EI_CLASSIFIER_IMAGE_SCALING_MIN1_1 3
+
+struct ei_impulse;
+
+typedef struct {
+ ei::matrix_t* matrix;
+ uint32_t blockId;
+} ei_feature_t;
+
+typedef struct {
+ uint16_t implementation_version;
+ bool is_configured;
+ uint32_t average_window_duration_ms;
+ float detection_threshold;
+ uint32_t suppression_ms;
+ uint32_t suppression_flags;
+} ei_model_performance_calibration_t;
+
+typedef struct {
+ uint32_t blockId;
+ size_t n_output_features;
+ int (*extract_fn)(ei::signal_t *signal, ei::matrix_t *output_matrix, void *config, const float frequency);
+ void *config;
+ uint8_t *axes;
+ size_t axes_size;
+} ei_model_dsp_t;
+
+typedef struct {
+ float *centroid;
+ float max_error;
+} ei_classifier_anom_cluster_t;
+
+typedef struct {
+ uint32_t blockId;
+ bool keep_output;
+ EI_IMPULSE_ERROR (*infer_fn)(const ei_impulse *impulse, ei_feature_t *fmatrix, uint32_t* input_block_ids, uint32_t input_block_ids_size, ei_impulse_result_t *result, void *config, bool debug);
+ void *config;
+ int image_scaling;
+ const uint32_t* input_block_ids;
+ const uint32_t input_block_ids_size;
+ uint32_t output_features_count;
+} ei_learning_block_t;
+
+typedef struct {
+ uint16_t implementation_version;
+ uint8_t input_datatype;
+ bool input_quantized;
+ float input_scale;
+ float input_zeropoint;
+ uint8_t output_datatype;
+ bool output_quantized;
+ float output_scale;
+ float output_zeropoint;
+} ei_config_tensaiflow_graph_t;
+
+typedef struct {
+ uint16_t implementation_version;
+ const unsigned char *model;
+ size_t model_size;
+ size_t arena_size;
+} ei_config_tflite_graph_t;
+
+typedef struct {
+ uint16_t implementation_version;
+ TfLiteStatus (*model_init)(void*(*alloc_fnc)(size_t, size_t));
+ TfLiteStatus (*model_invoke)();
+ TfLiteStatus (*model_reset)(void (*free)(void* ptr));
+ TfLiteStatus (*model_input)(int, TfLiteTensor*);
+ TfLiteStatus (*model_output)(int, TfLiteTensor*);
+} ei_config_tflite_eon_graph_t;
+
+typedef struct {
+ uint16_t implementation_version;
+ uint32_t block_id;
+ /* object detection */
+ bool object_detection;
+ int8_t object_detection_last_layer;
+ uint8_t output_data_tensor;
+ uint8_t output_labels_tensor;
+ uint8_t output_score_tensor;
+ /* tflite graph params */
+ bool quantized;
+ bool compiled;
+ /* tflite graph config pointer */
+ void *graph_config;
+} ei_learning_block_config_tflite_graph_t;
+
+typedef struct {
+ uint16_t implementation_version;
+ const uint16_t *anom_axis;
+ uint16_t anom_axes_size;
+ const ei_classifier_anom_cluster_t *anom_clusters;
+ uint16_t anom_cluster_count;
+ const float *anom_scale;
+ const float *anom_mean;
+} ei_learning_block_config_anomaly_kmeans_t;
+
+typedef struct {
+ uint16_t implementation_version;
+ const uint16_t *anom_axis;
+ uint16_t anom_axes_size;
+ float anomaly_threshold;
+ bool visual;
+ void* graph_config;
+} ei_learning_block_config_anomaly_gmm_t;
+
+typedef struct ei_impulse {
+ /* project details */
+ uint32_t project_id;
+ const char *project_owner;
+ const char *project_name;
+ uint32_t deploy_version;
+
+ /* DSP details */
+ uint32_t nn_input_frame_size;
+ uint32_t raw_sample_count;
+ uint32_t raw_samples_per_frame;
+ uint32_t dsp_input_frame_size;
+ uint32_t input_width;
+ uint32_t input_height;
+ uint32_t input_frames;
+ float interval_ms;
+ float frequency;
+ size_t dsp_blocks_size;
+ ei_model_dsp_t *dsp_blocks;
+
+ /* object detection */
+ bool object_detection;
+ uint16_t object_detection_count;
+ float object_detection_threshold;
+ int8_t object_detection_last_layer;
+ uint32_t fomo_output_size;
+ uint32_t tflite_output_features_count;
+
+ /* learning blocks */
+ const size_t learning_blocks_size;
+ const ei_learning_block_t *learning_blocks;
+
+ /* inference parameters */
+ uint32_t inferencing_engine;
+
+ /* sensors and on-device inference */
+ uint32_t sensor;
+ const char *fusion_string;
+ uint32_t slice_size;
+ uint32_t slices_per_model_window;
+
+ /* output details */
+ uint16_t has_anomaly;
+ uint16_t label_count;
+ const ei_model_performance_calibration_t calibration;
+ const char **categories;
+} ei_impulse_t;
+
+typedef struct {
+ uint32_t block_id;
+ uint16_t implementation_version;
+ int axes;
+ const unsigned char *model;
+ size_t model_size;
+ size_t arena_size;
+} ei_dsp_config_tflite_t;
+
+typedef struct {
+ uint32_t block_id;
+ uint16_t implementation_version;
+ int axes;
+ TfLiteStatus (*init_fn)(void*(*alloc_fnc)(size_t, size_t));
+ TfLiteStatus (*invoke_fn)();
+ TfLiteStatus (*reset_fn)(void (*free)(void* ptr));
+ TfLiteStatus (*input_fn)(int, TfLiteTensor*);
+ TfLiteStatus (*output_fn)(int, TfLiteTensor*);
+} ei_dsp_config_tflite_eon_t;
+
+#endif // _EDGE_IMPULSE_MODEL_TYPES_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_nms.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_nms.h
new file mode 100644
index 0000000..5f6a4aa
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_nms.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EDGE_IMPULSE_NMS_H_
+#define _EDGE_IMPULSE_NMS_H_
+
+#include "model-parameters/model_metadata.h"
+#if EI_CLASSIFIER_HAS_MODEL_VARIABLES == 1
+#include "model-parameters/model_variables.h"
+#endif
+#include "edge-impulse-sdk/classifier/ei_model_types.h"
+#include "edge-impulse-sdk/classifier/ei_classifier_types.h"
+#include "edge-impulse-sdk/porting/ei_classifier_porting.h"
+
+#if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOX)
+
+// The code below comes from tensorflow/lite/kernels/internal/reference/non_max_suppression.h
+// Copyright 2019 The TensorFlow Authors. All rights reserved.
+// Licensed under the Apache License, Version 2.0
+#include
+#include
+#include
+#include
+
+// A pair of diagonal corners of the box.
+struct BoxCornerEncoding {
+ float y1;
+ float x1;
+ float y2;
+ float x2;
+};
+
+static inline float ComputeIntersectionOverUnion(const float* boxes, const int i,
+ const int j) {
+ auto& box_i = reinterpret_cast(boxes)[i];
+ auto& box_j = reinterpret_cast(boxes)[j];
+ const float box_i_y_min = std::min(box_i.y1, box_i.y2);
+ const float box_i_y_max = std::max(box_i.y1, box_i.y2);
+ const float box_i_x_min = std::min(box_i.x1, box_i.x2);
+ const float box_i_x_max = std::max(box_i.x1, box_i.x2);
+ const float box_j_y_min = std::min(box_j.y1, box_j.y2);
+ const float box_j_y_max = std::max(box_j.y1, box_j.y2);
+ const float box_j_x_min = std::min(box_j.x1, box_j.x2);
+ const float box_j_x_max = std::max(box_j.x1, box_j.x2);
+
+ const float area_i =
+ (box_i_y_max - box_i_y_min) * (box_i_x_max - box_i_x_min);
+ const float area_j =
+ (box_j_y_max - box_j_y_min) * (box_j_x_max - box_j_x_min);
+ if (area_i <= 0 || area_j <= 0) return 0.0;
+ const float intersection_ymax = std::min(box_i_y_max, box_j_y_max);
+ const float intersection_xmax = std::min(box_i_x_max, box_j_x_max);
+ const float intersection_ymin = std::max(box_i_y_min, box_j_y_min);
+ const float intersection_xmin = std::max(box_i_x_min, box_j_x_min);
+ const float intersection_area =
+ std::max(intersection_ymax - intersection_ymin, 0.0) *
+ std::max(intersection_xmax - intersection_xmin, 0.0);
+ return intersection_area / (area_i + area_j - intersection_area);
+}
+
+// Implements (Single-Class) Soft NMS (with Gaussian weighting).
+// Supports functionality of TensorFlow ops NonMaxSuppressionV4 & V5.
+// Reference: "Soft-NMS - Improving Object Detection With One Line of Code"
+// [Bodla et al, https://arxiv.org/abs/1704.04503]
+// Implementation adapted from the TensorFlow NMS code at
+// tensorflow/core/kernels/non_max_suppression_op.cc.
+//
+// Arguments:
+// boxes: box encodings in format [y1, x1, y2, x2], shape: [num_boxes, 4]
+// num_boxes: number of candidates
+// scores: scores for candidate boxes, in the same order. shape: [num_boxes]
+// max_output_size: the maximum number of selections.
+// iou_threshold: Intersection-over-Union (IoU) threshold for NMS
+// score_threshold: All candidate scores below this value are rejected
+// soft_nms_sigma: Soft NMS parameter, used for decaying scores
+//
+// Outputs:
+// selected_indices: all the selected indices. Underlying array must have
+// length >= max_output_size. Cannot be null.
+// selected_scores: scores of selected indices. Defer from original value for
+// Soft NMS. If not null, array must have length >= max_output_size.
+// num_selected_indices: Number of selections. Only these many elements are
+// set in selected_indices, selected_scores. Cannot be null.
+//
+// Assumes inputs are valid (for eg, iou_threshold must be >= 0).
+static inline void NonMaxSuppression(const float* boxes, const int num_boxes,
+ const float* scores, const int max_output_size,
+ const float iou_threshold,
+ const float score_threshold,
+ const float soft_nms_sigma, int* selected_indices,
+ float* selected_scores,
+ int* num_selected_indices) {
+ struct Candidate {
+ int index;
+ float score;
+ int suppress_begin_index;
+ };
+
+ // Priority queue to hold candidates.
+ auto cmp = [](const Candidate bs_i, const Candidate bs_j) {
+ return bs_i.score < bs_j.score;
+ };
+ std::priority_queue, decltype(cmp)>
+ candidate_priority_queue(cmp);
+ // Populate queue with candidates above the score threshold.
+ for (int i = 0; i < num_boxes; ++i) {
+ if (scores[i] > score_threshold) {
+ candidate_priority_queue.emplace(Candidate({i, scores[i], 0}));
+ }
+ }
+
+ *num_selected_indices = 0;
+ int num_outputs = std::min(static_cast(candidate_priority_queue.size()),
+ max_output_size);
+ if (num_outputs == 0) return;
+
+ // NMS loop.
+ float scale = 0;
+ if (soft_nms_sigma > 0.0) {
+ scale = -0.5 / soft_nms_sigma;
+ }
+ while (*num_selected_indices < num_outputs &&
+ !candidate_priority_queue.empty()) {
+ Candidate next_candidate = candidate_priority_queue.top();
+ const float original_score = next_candidate.score;
+ candidate_priority_queue.pop();
+
+ // Overlapping boxes are likely to have similar scores, therefore we
+ // iterate through the previously selected boxes backwards in order to
+ // see if `next_candidate` should be suppressed. We also enforce a property
+ // that a candidate can be suppressed by another candidate no more than
+ // once via `suppress_begin_index` which tracks which previously selected
+ // boxes have already been compared against next_candidate prior to a given
+ // iteration. These previous selected boxes are then skipped over in the
+ // following loop.
+ bool should_hard_suppress = false;
+ for (int j = *num_selected_indices - 1;
+ j >= next_candidate.suppress_begin_index; --j) {
+ const float iou = ComputeIntersectionOverUnion(
+ boxes, next_candidate.index, selected_indices[j]);
+
+ // First decide whether to perform hard suppression.
+ if (iou >= iou_threshold) {
+ should_hard_suppress = true;
+ break;
+ }
+
+ // Suppress score if NMS sigma > 0.
+ if (soft_nms_sigma > 0.0) {
+ next_candidate.score =
+ next_candidate.score * std::exp(scale * iou * iou);
+ }
+
+ // If score has fallen below score_threshold, it won't be pushed back into
+ // the queue.
+ if (next_candidate.score <= score_threshold) break;
+ }
+ // If `next_candidate.score` has not dropped below `score_threshold`
+ // by this point, then we know that we went through all of the previous
+ // selections and can safely update `suppress_begin_index` to
+ // `selected.size()`. If on the other hand `next_candidate.score`
+ // *has* dropped below the score threshold, then since `suppress_weight`
+ // always returns values in [0, 1], further suppression by items that were
+ // not covered in the above for loop would not have caused the algorithm
+ // to select this item. We thus do the same update to
+ // `suppress_begin_index`, but really, this element will not be added back
+ // into the priority queue.
+ next_candidate.suppress_begin_index = *num_selected_indices;
+
+ if (!should_hard_suppress) {
+ if (next_candidate.score == original_score) {
+ // Suppression has not occurred, so select next_candidate.
+ selected_indices[*num_selected_indices] = next_candidate.index;
+ if (selected_scores) {
+ selected_scores[*num_selected_indices] = next_candidate.score;
+ }
+ ++*num_selected_indices;
+ }
+ if (next_candidate.score > score_threshold) {
+ // Soft suppression might have occurred and current score is still
+ // greater than score_threshold; add next_candidate back onto priority
+ // queue.
+ candidate_priority_queue.push(next_candidate);
+ }
+ }
+ }
+}
+
+/**
+ * Run non-max suppression over the results array (for bounding boxes)
+ */
+EI_IMPULSE_ERROR ei_run_nms(std::vector *results) {
+
+ size_t bb_count = 0;
+ for (size_t ix = 0; ix < results->size(); ix++) {
+ auto bb = results->at(ix);
+ if (bb.value == 0) {
+ continue;
+ }
+ bb_count++;
+ }
+
+ float *boxes = (float*)malloc(4 * bb_count * sizeof(float));
+ float *scores = (float*)malloc(1 * bb_count * sizeof(float));
+ int *selected_indices = (int*)malloc(1 * bb_count * sizeof(int));
+ float *selected_scores = (float*)malloc(1 * bb_count * sizeof(float));
+
+ if (!scores || !boxes || !selected_indices || !selected_scores) {
+ free(boxes);
+ free(scores);
+ free(selected_indices);
+ free(selected_scores);
+ return EI_IMPULSE_OUT_OF_MEMORY;
+ }
+
+ size_t box_ix = 0;
+ for (size_t ix = 0; ix < results->size(); ix++) {
+ auto bb = results->at(ix);
+ if (bb.value == 0) {
+ continue;
+ }
+ boxes[(box_ix * 4) + 0] = bb.y;
+ boxes[(box_ix * 4) + 1] = bb.x;
+ boxes[(box_ix * 4) + 2] = bb.y + bb.height;
+ boxes[(box_ix * 4) + 3] = bb.x + bb.width;
+ scores[box_ix] = bb.value;
+
+ box_ix++;
+ }
+
+ // boxes: box encodings in format [y1, x1, y2, x2], shape: [num_boxes, 4]
+ // num_boxes: number of candidates
+ // scores: scores for candidate boxes, in the same order. shape: [num_boxes]
+ // max_output_size: the maximum number of selections.
+ // iou_threshold: Intersection-over-Union (IoU) threshold for NMS
+ // score_threshold: All candidate scores below this value are rejected
+ // soft_nms_sigma: Soft NMS parameter, used for decaying scores
+
+ int num_selected_indices;
+
+ NonMaxSuppression(
+ (const float*)boxes, // boxes
+ bb_count, // num_boxes
+ (const float*)scores, // scores
+ bb_count, // max_output_size
+ 0.2f, // iou_threshold
+ 0.0f, // score_threshold
+ 0.0f, // soft_nms_sigma
+ selected_indices,
+ selected_scores,
+ &num_selected_indices);
+
+ std::vector new_results;
+
+ for (size_t ix = 0; ix < (size_t)num_selected_indices; ix++) {
+ auto bb = results->at(selected_indices[ix]);
+
+ printf("Found bb with label %s\n", bb.label);
+
+ ei_impulse_result_bounding_box_t r;
+ r.label = bb.label;
+ r.x = bb.x;
+ r.y = bb.y;
+ r.width = bb.width;
+ r.height = bb.height;
+ r.value = selected_scores[ix];
+ new_results.push_back(r);
+ }
+
+ results->clear();
+
+ for (size_t ix = 0; ix < new_results.size(); ix++) {
+ results->push_back(new_results[ix]);
+ }
+
+ free(boxes);
+ free(scores);
+ free(selected_indices);
+ free(selected_scores);
+
+ return EI_IMPULSE_OK;
+}
+
+#endif // #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOX)
+
+#endif // _EDGE_IMPULSE_NMS_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_performance_calibration.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_performance_calibration.h
new file mode 100644
index 0000000..a14c1e5
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_performance_calibration.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef EI_PERFORMANCE_CALIBRATION_H
+#define EI_PERFORMANCE_CALIBRATION_H
+
+/* Includes ---------------------------------------------------------------- */
+#include "edge-impulse-sdk/dsp/numpy_types.h"
+#include "edge-impulse-sdk/dsp/returntypes.hpp"
+#include "ei_model_types.h"
+
+/* Private const types ----------------------------------------------------- */
+#define MEM_ERROR "ERR: Failed to allocate memory for performance calibration\r\n"
+
+#define EI_PC_RET_NO_EVENT_DETECTED -1
+#define EI_PC_RET_MEMORY_ERROR -2
+
+class RecognizeEvents {
+
+public:
+ RecognizeEvents(
+ const ei_model_performance_calibration_t *config,
+ uint32_t n_labels,
+ uint32_t sample_length,
+ float sample_interval_ms)
+ {
+ this->_score_array = nullptr;
+ this->_running_sum = nullptr;
+ this->_detection_threshold = config->detection_threshold;
+ this->_suppression_flags = config->suppression_flags;
+ this->_should_boost = config->is_configured;
+ this->_n_labels = n_labels;
+
+ /* Determine sample length in ms */
+ float sample_length_ms = (static_cast(sample_length) * sample_interval_ms);
+
+ /* Calculate number of inference runs needed for the duration window */
+ this->_average_window_duration_samples =
+ (config->average_window_duration_ms < static_cast(sample_length_ms))
+ ? 1
+ : static_cast(static_cast(config->average_window_duration_ms) / sample_length_ms);
+
+ /* Calculate number of inference runs for suppression */
+ this->_suppression_samples = (config->suppression_ms < static_cast(sample_length_ms))
+ ? 0
+ : static_cast(static_cast(config->suppression_ms) / sample_length_ms);
+
+ /* Detection threshold should be high enough to only classifiy 1 possibly output */
+ if (this->_detection_threshold <= (1.f / this->_n_labels)) {
+ ei_printf("ERR: Classifier detection threshold too low\r\n");
+ return;
+ }
+
+ /* Array to store scores for all labels */
+ this->_score_array = (float *)ei_malloc(
+ this->_average_window_duration_samples * this->_n_labels * sizeof(float));
+
+ if (this->_score_array == NULL) {
+ ei_printf(MEM_ERROR);
+ return;
+ }
+
+ for (uint32_t i = 0; i < this->_average_window_duration_samples * this->_n_labels; i++) {
+ this->_score_array[i] = 0.f;
+ }
+ this->_score_idx = 0;
+
+ /* Running sum for all labels */
+ this->_running_sum = (float *)ei_malloc(this->_n_labels * sizeof(float));
+
+ if (this->_running_sum != NULL) {
+ for (uint32_t i = 0; i < this->_n_labels; i++) {
+ this->_running_sum[i] = 0.f;
+ }
+ }
+ else {
+ ei_printf(MEM_ERROR);
+ return;
+ }
+
+ this->_suppression_count = this->_suppression_samples;
+ this->_n_scores_in_array = 0;
+ }
+
+ ~RecognizeEvents()
+ {
+ if (this->_score_array) {
+ ei_free((void *)this->_score_array);
+ }
+ if (this->_running_sum) {
+ ei_free((void *)this->_running_sum);
+ }
+ }
+
+ bool should_boost()
+ {
+ return this->_should_boost;
+ }
+
+ int32_t trigger(ei_impulse_result_classification_t *scores)
+ {
+ int32_t recognized_event = EI_PC_RET_NO_EVENT_DETECTED;
+ float current_top_score = 0.f;
+ uint32_t current_top_index = 0;
+
+ /* Check pointers */
+ if (this->_score_array == NULL || this->_running_sum == NULL) {
+ return EI_PC_RET_MEMORY_ERROR;
+ }
+
+ /* Update the score array and running sum */
+ for (uint32_t i = 0; i < this->_n_labels; i++) {
+ this->_running_sum[i] -= this->_score_array[(this->_score_idx * this->_n_labels) + i];
+ this->_running_sum[i] += scores[i].value;
+ this->_score_array[(this->_score_idx * this->_n_labels) + i] = scores[i].value;
+ }
+
+ if (++this->_score_idx >= this->_average_window_duration_samples) {
+ this->_score_idx = 0;
+ }
+
+ /* Number of samples to average, increases until the buffer is full */
+ if (this->_n_scores_in_array < this->_average_window_duration_samples) {
+ this->_n_scores_in_array++;
+ }
+
+ /* Average data and place in scores & determine top score */
+ for (uint32_t i = 0; i < this->_n_labels; i++) {
+ scores[i].value = this->_running_sum[i] / this->_n_scores_in_array;
+
+ if (scores[i].value > current_top_score) {
+ if(this->_suppression_flags == 0) {
+ current_top_score = scores[i].value;
+ current_top_index = i;
+ }
+ else if(this->_suppression_flags & (1 << i)) {
+ current_top_score = scores[i].value;
+ current_top_index = i;
+ }
+ }
+ }
+
+ /* Check threshold, suppression */
+ if (this->_suppression_samples && this->_suppression_count < this->_suppression_samples) {
+ this->_suppression_count++;
+ }
+ else {
+ if (current_top_score >= this->_detection_threshold) {
+ recognized_event = current_top_index;
+
+ if (this->_suppression_flags & (1 << current_top_index)) {
+ this->_suppression_count = 0;
+ }
+ }
+ }
+
+ return recognized_event;
+ };
+
+ void *operator new(size_t size)
+ {
+ void *p = ei_malloc(size);
+ return p;
+ }
+
+ void operator delete(void *p)
+ {
+ ei_free(p);
+ }
+
+private:
+ uint32_t _average_window_duration_samples;
+ float _detection_threshold;
+ bool _should_boost;
+ uint32_t _suppression_samples;
+ uint32_t _suppression_count;
+ uint32_t _suppression_flags;
+ uint32_t _n_labels;
+ float *_score_array;
+ uint32_t _score_idx;
+ float *_running_sum;
+ uint32_t _n_scores_in_array;
+};
+
+#endif //EI_PERFORMANCE_CALIBRATION
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_quantize.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_quantize.h
new file mode 100644
index 0000000..727d920
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_quantize.h
@@ -0,0 +1,37 @@
+/* Edge Impulse inferencing library
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __EI_QUANTIZE__H__
+#define __EI_QUANTIZE__H__
+
+#include
+#include
+
+static int32_t pre_cast_quantize(float value, float scale, int32_t zero_point, bool is_signed) {
+
+ int32_t max_value = is_signed ? 127 : 255;
+ int32_t min_value = is_signed ? -128 : 0;
+ // Saturate/clip any overflows post scaling
+ return std::min( std::max( static_cast(round(value / scale)) + zero_point, min_value), max_value);
+}
+
+#endif //!__EI_QUANTIZE__H__
\ No newline at end of file
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h
new file mode 100644
index 0000000..7c8ad69
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier.h
@@ -0,0 +1,816 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EDGE_IMPULSE_RUN_CLASSIFIER_H_
+#define _EDGE_IMPULSE_RUN_CLASSIFIER_H_
+
+#include "model-parameters/model_metadata.h"
+
+#include "ei_run_dsp.h"
+#include "ei_classifier_types.h"
+#include "ei_signal_with_axes.h"
+#include "ei_performance_calibration.h"
+
+#include "edge-impulse-sdk/porting/ei_classifier_porting.h"
+
+#if EI_CLASSIFIER_HAS_ANOMALY
+#include "inferencing_engines/anomaly.h"
+#endif
+
+#if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1
+#include "ei_sampler.h"
+#endif
+
+#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) && (EI_CLASSIFIER_COMPILED != 1)
+#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_micro.h"
+#elif EI_CLASSIFIER_COMPILED == 1
+#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_eon.h"
+#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL
+#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_full.h"
+#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_TIDL
+#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_tidl.h"
+#elif (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSORRT)
+#include "edge-impulse-sdk/classifier/inferencing_engines/tensorrt.h"
+#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW
+#include "edge-impulse-sdk/classifier/inferencing_engines/tensaiflow.h"
+#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI
+#include "edge-impulse-sdk/classifier/inferencing_engines/drpai.h"
+#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_AKIDA
+#include "edge-impulse-sdk/classifier/inferencing_engines/akida.h"
+#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_ONNX_TIDL
+#include "edge-impulse-sdk/classifier/inferencing_engines/onnx_tidl.h"
+#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_MEMRYX
+#include "edge-impulse-sdk/classifier/inferencing_engines/memryx.h"
+#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_NONE
+// noop
+#else
+#error "Unknown inferencing engine"
+#endif
+
+#include "model-parameters/model_variables.h"
+
+#ifdef __cplusplus
+namespace {
+#endif // __cplusplus
+
+/* Function prototypes ----------------------------------------------------- */
+extern "C" EI_IMPULSE_ERROR run_inference(const ei_impulse_t *impulse, ei_feature_t *fmatrix, ei_impulse_result_t *result, bool debug);
+extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized(const ei_impulse_t *impulse, signal_t *signal, ei_impulse_result_t *result, bool debug);
+static EI_IMPULSE_ERROR can_run_classifier_image_quantized(const ei_impulse_t *impulse, ei_learning_block_t block_ptr);
+
+/* Private variables ------------------------------------------------------- */
+
+static uint64_t classifier_continuous_features_written = 0;
+static RecognizeEvents *avg_scores = NULL;
+
+/* Private functions ------------------------------------------------------- */
+
+/* These functions (up to Public functions section) are not exposed to end-user,
+therefore changes are allowed. */
+
+#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
+static const float torch_mean[] = { 0.485, 0.456, 0.406 };
+static const float torch_std[] = { 0.229, 0.224, 0.225 };
+
+static EI_IMPULSE_ERROR scale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) {
+ if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
+ // @todo; could we write some faster vector math here?
+ for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
+ fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] - torch_mean[0]) / torch_std[0];
+ fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] - torch_mean[1]) / torch_std[1];
+ fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] - torch_mean[2]) / torch_std[2];
+ }
+ }
+ else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) {
+ int scale_res = numpy::scale(fmatrix, 255.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ }
+ else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN1_1) {
+ int scale_res = numpy::scale(fmatrix, 2.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ scale_res = numpy::subtract(fmatrix, 1.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ }
+
+ return EI_IMPULSE_OK;
+}
+
+static EI_IMPULSE_ERROR unscale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) {
+ if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
+ // @todo; could we write some faster vector math here?
+ for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) {
+ fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] * torch_std[0]) + torch_mean[0];
+ fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] * torch_std[1]) + torch_mean[1];
+ fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] * torch_std[2]) + torch_mean[2];
+ }
+ }
+ else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) {
+ int scale_res = numpy::scale(fmatrix, 1 / 255.0f);
+ if (scale_res != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ }
+ return EI_IMPULSE_OK;
+}
+#endif
+
+
+/**
+ * @brief Display the results of the inference
+ *
+ * @param result The result
+ */
+__attribute__((unused)) void display_results(ei_impulse_result_t* result)
+{
+ // print the predictions
+ ei_printf("Predictions (DSP: %d ms., Classification: %d ms., Anomaly: %d ms.): \n",
+ result->timing.dsp, result->timing.classification, result->timing.anomaly);
+#if EI_CLASSIFIER_OBJECT_DETECTION == 1
+ bool bb_found = result->bounding_boxes[0].value > 0;
+ for (size_t ix = 0; ix < result->bounding_boxes_count; ix++) {
+ auto bb = result->bounding_boxes[ix];
+ if (bb.value == 0) {
+ continue;
+ }
+ ei_printf(" %s (", bb.label);
+ ei_printf_float(bb.value);
+ ei_printf(") [ x: %u, y: %u, width: %u, height: %u ]\n", bb.x, bb.y, bb.width, bb.height);
+ }
+
+ if (!bb_found) {
+ ei_printf(" No objects found\n");
+ }
+#else
+ for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) {
+ ei_printf(" %s: ", result->classification[ix].label);
+ ei_printf_float(result->classification[ix].value);
+ ei_printf("\n");
+ }
+#if EI_CLASSIFIER_HAS_ANOMALY == 1
+ ei_printf(" anomaly score: ");
+ ei_printf_float(result->anomaly);
+ ei_printf("\n");
+#endif
+#endif
+}
+
+/**
+ * @brief Do inferencing over the processed feature matrix
+ *
+ * @param impulse struct with information about model and DSP
+ * @param fmatrix Processed matrix
+ * @param result Output classifier results
+ * @param[in] debug Debug output enable
+ *
+ * @return The ei impulse error.
+ */
+extern "C" EI_IMPULSE_ERROR run_inference(
+ const ei_impulse_t *impulse,
+ ei_feature_t *fmatrix,
+ ei_impulse_result_t *result,
+ bool debug = false)
+{
+ for (size_t ix = 0; ix < impulse->learning_blocks_size; ix++) {
+
+ ei_learning_block_t block = impulse->learning_blocks[ix];
+
+#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
+ // we do not plan to have multiple dsp blocks with image
+ // so just apply scaling to the first one
+ EI_IMPULSE_ERROR scale_res = scale_fmatrix(&block, fmatrix[0].matrix);
+ if (scale_res != EI_IMPULSE_OK) {
+ return scale_res;
+ }
+#endif
+
+ result->copy_output = block.keep_output;
+
+ EI_IMPULSE_ERROR res = block.infer_fn(impulse, fmatrix, (uint32_t*)block.input_block_ids, block.input_block_ids_size, result, block.config, debug);
+ if (res != EI_IMPULSE_OK) {
+ return res;
+ }
+
+#if EI_CLASSIFIER_LOAD_IMAGE_SCALING
+ // undo scaling
+ scale_res = unscale_fmatrix(&block, fmatrix[0].matrix);
+ if (scale_res != EI_IMPULSE_OK) {
+ return scale_res;
+ }
+#endif
+ }
+
+ if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) {
+ return EI_IMPULSE_CANCELED;
+ }
+
+ return EI_IMPULSE_OK;
+}
+
+/**
+ * @brief Process a complete impulse
+ *
+ * @param impulse struct with information about model and DSP
+ * @param signal Sample data
+ * @param result Output classifier results
+ * @param[in] debug Debug output enable
+ *
+ * @return The ei impulse error.
+ */
+extern "C" EI_IMPULSE_ERROR process_impulse(const ei_impulse_t *impulse,
+ signal_t *signal,
+ ei_impulse_result_t *result,
+ bool debug = false)
+{
+
+#if (EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_ONNX_TIDL)) || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI
+ // Shortcut for quantized image models
+ ei_learning_block_t block = impulse->learning_blocks[0];
+ if (can_run_classifier_image_quantized(impulse, block) == EI_IMPULSE_OK) {
+ return run_classifier_image_quantized(impulse, signal, result, debug);
+ }
+#endif
+
+ memset(result, 0, sizeof(ei_impulse_result_t));
+ uint32_t block_num = impulse->dsp_blocks_size + impulse->learning_blocks_size;
+
+ // smart pointer to features array
+ std::unique_ptr features_ptr(new ei_feature_t[block_num]);
+ ei_feature_t* features = features_ptr.get();
+
+ // have it outside of the loop to avoid going out of scope
+ std::unique_ptr *matrix_ptrs = new std::unique_ptr[block_num];
+
+ uint64_t dsp_start_us = ei_read_timer_us();
+
+ size_t out_features_index = 0;
+
+ for (size_t ix = 0; ix < impulse->dsp_blocks_size; ix++) {
+ ei_model_dsp_t block = impulse->dsp_blocks[ix];
+ matrix_ptrs[ix] = std::unique_ptr(new ei::matrix_t(1, block.n_output_features));
+ features[ix].matrix = matrix_ptrs[ix].get();
+ features[ix].blockId = block.blockId;
+
+ if (out_features_index + block.n_output_features > impulse->nn_input_frame_size) {
+ ei_printf("ERR: Would write outside feature buffer\n");
+ delete[] matrix_ptrs;
+ return EI_IMPULSE_DSP_ERROR;
+ }
+
+#if EIDSP_SIGNAL_C_FN_POINTER
+ if (block.axes_size != impulse->raw_samples_per_frame) {
+ ei_printf("ERR: EIDSP_SIGNAL_C_FN_POINTER can only be used when all axes are selected for DSP blocks\n");
+ delete[] matrix_ptrs;
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ int ret = block.extract_fn(signal, features[ix].matrix, block.config, impulse->frequency);
+#else
+ SignalWithAxes swa(signal, block.axes, block.axes_size, impulse);
+ int ret = block.extract_fn(swa.get_signal(), features[ix].matrix, block.config, impulse->frequency);
+#endif
+
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: Failed to run DSP process (%d)\n", ret);
+ delete[] matrix_ptrs;
+ return EI_IMPULSE_DSP_ERROR;
+ }
+
+ if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) {
+ delete[] matrix_ptrs;
+ return EI_IMPULSE_CANCELED;
+ }
+
+ out_features_index += block.n_output_features;
+ }
+
+#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0
+ for (size_t ix = 0; ix < impulse->learning_blocks_size; ix++) {
+ ei_learning_block_t block = impulse->learning_blocks[ix];
+
+ if (block.keep_output) {
+ matrix_ptrs[impulse->dsp_blocks_size + ix] = std::unique_ptr(new ei::matrix_t(1, block.output_features_count));
+ features[impulse->dsp_blocks_size + ix].matrix = matrix_ptrs[impulse->dsp_blocks_size + ix].get();
+ features[impulse->dsp_blocks_size+ ix].blockId = block.blockId;
+ }
+ }
+#endif // EI_CLASSIFIER_SINGLE_FEATURE_INPUT
+
+ result->timing.dsp_us = ei_read_timer_us() - dsp_start_us;
+ result->timing.dsp = (int)(result->timing.dsp_us / 1000);
+
+ if (debug) {
+ ei_printf("Features (%d ms.): ", result->timing.dsp);
+ for (size_t ix = 0; ix < block_num; ix++) {
+ if (features[ix].matrix == nullptr) {
+ continue;
+ }
+ for (size_t jx = 0; jx < features[ix].matrix->cols; jx++) {
+ ei_printf_float(features[ix].matrix->buffer[jx]);
+ ei_printf(" ");
+ }
+ ei_printf("\n");
+ }
+ }
+
+ if (debug) {
+ ei_printf("Running impulse...\n");
+ }
+
+ EI_IMPULSE_ERROR res = run_inference(impulse, features, result, debug);
+
+ delete[] matrix_ptrs;
+
+ return res;
+}
+
+/**
+ * @brief Process a complete impulse for continuous inference
+ *
+ * @param impulse struct with information about model and DSP
+ * @param signal Sample data
+ * @param result Output classifier results
+ * @param[in] debug Debug output enable
+ *
+ * @return The ei impulse error.
+ */
+extern "C" EI_IMPULSE_ERROR process_impulse_continuous(const ei_impulse_t *impulse,
+ signal_t *signal,
+ ei_impulse_result_t *result,
+ bool debug,
+ bool enable_maf)
+{
+
+ static ei::matrix_t static_features_matrix(1, impulse->nn_input_frame_size);
+ if (!static_features_matrix.buffer) {
+ return EI_IMPULSE_ALLOC_FAILED;
+ }
+
+ memset(result, 0, sizeof(ei_impulse_result_t));
+
+ EI_IMPULSE_ERROR ei_impulse_error = EI_IMPULSE_OK;
+
+ uint64_t dsp_start_us = ei_read_timer_us();
+
+ size_t out_features_index = 0;
+ bool is_mfcc = false;
+ bool is_mfe = false;
+ bool is_spectrogram = false;
+
+ for (size_t ix = 0; ix < impulse->dsp_blocks_size; ix++) {
+ ei_model_dsp_t block = impulse->dsp_blocks[ix];
+
+ if (out_features_index + block.n_output_features > impulse->nn_input_frame_size) {
+ ei_printf("ERR: Would write outside feature buffer\n");
+ return EI_IMPULSE_DSP_ERROR;
+ }
+
+ ei::matrix_t fm(1, block.n_output_features,
+ static_features_matrix.buffer + out_features_index);
+
+ int (*extract_fn_slice)(ei::signal_t *signal, ei::matrix_t *output_matrix, void *config, const float frequency, matrix_size_t *out_matrix_size);
+
+ /* Switch to the slice version of the mfcc feature extract function */
+ if (block.extract_fn == extract_mfcc_features) {
+ extract_fn_slice = &extract_mfcc_per_slice_features;
+ is_mfcc = true;
+ }
+ else if (block.extract_fn == extract_spectrogram_features) {
+ extract_fn_slice = &extract_spectrogram_per_slice_features;
+ is_spectrogram = true;
+ }
+ else if (block.extract_fn == extract_mfe_features) {
+ extract_fn_slice = &extract_mfe_per_slice_features;
+ is_mfe = true;
+ }
+ else {
+ ei_printf("ERR: Unknown extract function, only MFCC, MFE and spectrogram supported\n");
+ return EI_IMPULSE_DSP_ERROR;
+ }
+
+ matrix_size_t features_written;
+
+#if EIDSP_SIGNAL_C_FN_POINTER
+ if (block.axes_size != impulse->raw_samples_per_frame) {
+ ei_printf("ERR: EIDSP_SIGNAL_C_FN_POINTER can only be used when all axes are selected for DSP blocks\n");
+ return EI_IMPULSE_DSP_ERROR;
+ }
+ int ret = extract_fn_slice(signal, &fm, block.config, impulse->frequency, &features_written);
+#else
+ SignalWithAxes swa(signal, block.axes, block.axes_size, impulse);
+ int ret = extract_fn_slice(swa.get_signal(), &fm, block.config, impulse->frequency, &features_written);
+#endif
+
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: Failed to run DSP process (%d)\n", ret);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+
+ if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) {
+ return EI_IMPULSE_CANCELED;
+ }
+
+ classifier_continuous_features_written += (features_written.rows * features_written.cols);
+
+ out_features_index += block.n_output_features;
+ }
+
+ result->timing.dsp_us = ei_read_timer_us() - dsp_start_us;
+ result->timing.dsp = (int)(result->timing.dsp_us / 1000);
+
+ if (debug) {
+ ei_printf("\r\nFeatures (%d ms.): ", result->timing.dsp);
+ for (size_t ix = 0; ix < static_features_matrix.cols; ix++) {
+ ei_printf_float(static_features_matrix.buffer[ix]);
+ ei_printf(" ");
+ }
+ ei_printf("\n");
+ }
+
+ if (classifier_continuous_features_written >= impulse->nn_input_frame_size) {
+ dsp_start_us = ei_read_timer_us();
+
+ ei_feature_t feature;
+ std::unique_ptr matrix_ptr(new ei::matrix_t(1, impulse->nn_input_frame_size));
+ feature.matrix = matrix_ptr.get();
+ feature.blockId = 0;
+
+ /* Create a copy of the matrix for normalization */
+ for (size_t m_ix = 0; m_ix < impulse->nn_input_frame_size; m_ix++) {
+ feature.matrix->buffer[m_ix] = static_features_matrix.buffer[m_ix];
+ }
+
+ if (is_mfcc) {
+ calc_cepstral_mean_and_var_normalization_mfcc(feature.matrix, impulse->dsp_blocks[0].config);
+ }
+ else if (is_spectrogram) {
+ calc_cepstral_mean_and_var_normalization_spectrogram(feature.matrix, impulse->dsp_blocks[0].config);
+ }
+ else if (is_mfe) {
+ calc_cepstral_mean_and_var_normalization_mfe(feature.matrix, impulse->dsp_blocks[0].config);
+ }
+ result->timing.dsp_us += ei_read_timer_us() - dsp_start_us;
+ result->timing.dsp = (int)(result->timing.dsp_us / 1000);
+
+ if (debug) {
+ ei_printf("Running impulse...\n");
+ }
+
+ ei_impulse_error = run_inference(impulse, &feature, result, debug);
+
+#if EI_CLASSIFIER_CALIBRATION_ENABLED
+ if (impulse->sensor == EI_CLASSIFIER_SENSOR_MICROPHONE) {
+ if((void *)avg_scores != NULL && enable_maf == true) {
+ if (enable_maf && !impulse->calibration.is_configured) {
+ // perfcal is not configured, print msg first time
+ static bool has_printed_msg = false;
+
+ if (!has_printed_msg) {
+ ei_printf("WARN: run_classifier_continuous, enable_maf is true, but performance calibration is not configured.\n");
+ ei_printf(" Previously we'd run a moving-average filter over your outputs in this case, but this is now disabled.\n");
+ ei_printf(" Go to 'Performance calibration' in your Edge Impulse project to configure post-processing parameters.\n");
+ ei_printf(" (You can enable this from 'Dashboard' if it's not visible in your project)\n");
+ ei_printf("\n");
+
+ has_printed_msg = true;
+ }
+ }
+ else {
+ // perfcal is configured
+ static bool has_printed_msg = false;
+
+ if (!has_printed_msg) {
+ ei_printf("\nPerformance calibration is configured for your project. If no event is detected, all values are 0.\r\n\n");
+ has_printed_msg = true;
+ }
+
+ int label_detected = avg_scores->trigger(result->classification);
+
+ if (avg_scores->should_boost()) {
+ for (int i = 0; i < impulse->label_count; i++) {
+ if (i == label_detected) {
+ result->classification[i].value = 1.0f;
+ }
+ else {
+ result->classification[i].value = 0.0f;
+ }
+ }
+ }
+ }
+ }
+ }
+#endif
+ }
+ else {
+ for (int i = 0; i < impulse->label_count; i++) {
+ // set label correctly in the result struct if we have no results (otherwise is nullptr)
+ result->classification[i].label = impulse->categories[(uint32_t)i];
+ }
+ }
+
+ return ei_impulse_error;
+}
+
+/**
+ * Check if the current impulse could be used by 'run_classifier_image_quantized'
+ */
+__attribute__((unused)) static EI_IMPULSE_ERROR can_run_classifier_image_quantized(const ei_impulse_t *impulse, ei_learning_block_t block_ptr) {
+
+ if (impulse->inferencing_engine != EI_CLASSIFIER_TFLITE
+ && impulse->inferencing_engine != EI_CLASSIFIER_TENSAIFLOW
+ && impulse->inferencing_engine != EI_CLASSIFIER_DRPAI
+ && impulse->inferencing_engine != EI_CLASSIFIER_ONNX_TIDL) // check later
+ {
+ return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
+ }
+
+ // visual anomaly also needs to go through the normal path
+ if (impulse->has_anomaly){
+ return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES;
+ }
+
+ // Check if we have tflite graph
+ if (block_ptr.infer_fn != run_nn_inference) {
+ return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES;
+ }
+
+ // Check if we have a quantized NN Input layer (input is always quantized for DRP-AI)
+ ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)block_ptr.config;
+ if (block_config->quantized != 1) {
+ return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES;
+ }
+
+ // And if we have one DSP block which operates on images...
+ if (impulse->dsp_blocks_size != 1 || impulse->dsp_blocks[0].extract_fn != extract_image_features) {
+ return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES;
+ }
+
+ return EI_IMPULSE_OK;
+}
+
+#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_ONNX_TIDL)
+
+/**
+ * Special function to run the classifier on images, only works on TFLite models (either interpreter, EON, tensaiflow, drpai, tidl, memryx)
+ * that allocates a lot less memory by quantizing in place. This only works if 'can_run_classifier_image_quantized'
+ * returns EI_IMPULSE_OK.
+ */
+extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized(
+ const ei_impulse_t *impulse,
+ signal_t *signal,
+ ei_impulse_result_t *result,
+ bool debug = false)
+{
+ memset(result, 0, sizeof(ei_impulse_result_t));
+
+ return run_nn_inference_image_quantized(impulse, signal, result, impulse->learning_blocks[0].config, debug);
+}
+
+#endif // #if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI)
+
+/* Public functions ------------------------------------------------------- */
+
+/* Thread carefully: public functions are not to be changed
+to preserve backwards compatibility. */
+
+/**
+ * @brief Init static vars
+ */
+extern "C" void run_classifier_init()
+{
+
+ classifier_continuous_features_written = 0;
+ ei_dsp_clear_continuous_audio_state();
+
+#if EI_CLASSIFIER_CALIBRATION_ENABLED
+
+ const ei_impulse_t impulse = ei_default_impulse;
+ const ei_model_performance_calibration_t *calibration = &impulse.calibration;
+
+ if(calibration != NULL) {
+ avg_scores = new RecognizeEvents(calibration,
+ impulse.label_count, impulse.slice_size, impulse.interval_ms);
+ }
+#endif
+}
+
+/**
+ * @brief Init static vars, for multi-model support
+ */
+__attribute__((unused)) void run_classifier_init(const ei_impulse_t *impulse)
+{
+ classifier_continuous_features_written = 0;
+ ei_dsp_clear_continuous_audio_state();
+
+#if EI_CLASSIFIER_CALIBRATION_ENABLED
+ const ei_model_performance_calibration_t *calibration = &impulse->calibration;
+
+ if(calibration != NULL) {
+ avg_scores = new RecognizeEvents(calibration,
+ impulse->label_count, impulse->slice_size, impulse->interval_ms);
+ }
+#endif
+}
+
+extern "C" void run_classifier_deinit(void)
+{
+ if((void *)avg_scores != NULL) {
+ delete avg_scores;
+ }
+}
+
+/**
+ * @brief Fill the complete matrix with sample slices. From there, run inference
+ * on the matrix.
+ *
+ * @param signal Sample data
+ * @param result Classification output
+ * @param[in] debug Debug output enable boot
+ *
+ * @return The ei impulse error.
+ */
+extern "C" EI_IMPULSE_ERROR run_classifier_continuous(
+ signal_t *signal,
+ ei_impulse_result_t *result,
+ bool debug = false,
+ bool enable_maf = true)
+{
+ const ei_impulse_t impulse = ei_default_impulse;
+ return process_impulse_continuous(&impulse, signal, result, debug, enable_maf);
+}
+
+/**
+ * @brief Fill the complete matrix with sample slices. From there, run impulse
+ * on the matrix.
+ *
+ * @param impulse struct with information about model and DSP
+ * @param signal Sample data
+ * @param result Classification output
+ * @param[in] debug Debug output enable boot
+ *
+ * @return The ei impulse error.
+ */
+__attribute__((unused)) EI_IMPULSE_ERROR run_classifier_continuous(
+ const ei_impulse_t *impulse,
+ signal_t *signal,
+ ei_impulse_result_t *result,
+ bool debug = false,
+ bool enable_maf = true)
+{
+ return process_impulse_continuous(impulse, signal, result, debug, enable_maf);
+}
+
+/**
+ * Run the classifier over a raw features array
+ * @param raw_features Raw features array
+ * @param raw_features_size Size of the features array
+ * @param result Object to store the results in
+ * @param debug Whether to show debug messages (default: false)
+ */
+extern "C" EI_IMPULSE_ERROR run_classifier(
+ signal_t *signal,
+ ei_impulse_result_t *result,
+ bool debug = false)
+{
+ const ei_impulse_t impulse = ei_default_impulse;
+ return process_impulse(&impulse, signal, result, debug);
+}
+
+/**
+ * Run the impulse over a raw features array
+ * @param impulse struct with information about model and DSP
+ * @param raw_features Raw features array
+ * @param raw_features_size Size of the features array
+ * @param result Object to store the results in
+ * @param debug Whether to show debug messages (default: false)
+ */
+__attribute__((unused)) EI_IMPULSE_ERROR run_classifier(
+ const ei_impulse_t *impulse,
+ signal_t *signal,
+ ei_impulse_result_t *result,
+ bool debug = false)
+{
+ return process_impulse(impulse, signal, result, debug);
+}
+
+/* Deprecated functions ------------------------------------------------------- */
+
+/* These functions are being deprecated and possibly will be removed or moved in future.
+Do not use these - if possible, change your code to reflect the upcoming changes. */
+
+#if EIDSP_SIGNAL_C_FN_POINTER == 0
+
+/**
+ * Run the impulse, if you provide an instance of sampler it will also persist the data for you
+ * @param sampler Instance to an **initialized** sampler
+ * @param result Object to store the results in
+ * @param data_fn Function to retrieve data from sensors
+ * @param debug Whether to log debug messages (default false)
+ */
+__attribute__((unused)) EI_IMPULSE_ERROR run_impulse(
+#if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1
+ EdgeSampler *sampler,
+#endif
+ ei_impulse_result_t *result,
+#ifdef __MBED__
+ mbed::Callback data_fn,
+#else
+ std::function data_fn,
+#endif
+ bool debug = false) {
+
+ const ei_impulse_t impulse = ei_default_impulse;
+
+ float *x = (float*)calloc(impulse.dsp_input_frame_size, sizeof(float));
+ if (!x) {
+ return EI_IMPULSE_OUT_OF_MEMORY;
+ }
+
+ uint64_t next_tick = 0;
+
+ uint64_t sampling_us_start = ei_read_timer_us();
+
+ // grab some data
+ for (int i = 0; i < (int)impulse.dsp_input_frame_size; i += impulse.raw_samples_per_frame) {
+ uint64_t curr_us = ei_read_timer_us() - sampling_us_start;
+
+ next_tick = curr_us + (impulse.interval_ms * 1000);
+
+ data_fn(x + i, impulse.raw_samples_per_frame);
+#if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1
+ if (sampler != NULL) {
+ sampler->write_sensor_data(x + i, impulse.raw_samples_per_frame);
+ }
+#endif
+
+ if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) {
+ free(x);
+ return EI_IMPULSE_CANCELED;
+ }
+
+ while (next_tick > ei_read_timer_us() - sampling_us_start);
+ }
+
+ result->timing.sampling = (ei_read_timer_us() - sampling_us_start) / 1000;
+
+ signal_t signal;
+ int err = numpy::signal_from_buffer(x, impulse.dsp_input_frame_size, &signal);
+ if (err != 0) {
+ free(x);
+ ei_printf("ERR: signal_from_buffer failed (%d)\n", err);
+ return EI_IMPULSE_DSP_ERROR;
+ }
+
+ EI_IMPULSE_ERROR r = run_classifier(&signal, result, debug);
+ free(x);
+ return r;
+}
+
+#if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1
+/**
+ * Run the impulse, does not persist data
+ * @param result Object to store the results in
+ * @param data_fn Function to retrieve data from sensors
+ * @param debug Whether to log debug messages (default false)
+ */
+__attribute__((unused)) EI_IMPULSE_ERROR run_impulse(
+ ei_impulse_result_t *result,
+#ifdef __MBED__
+ mbed::Callback data_fn,
+#else
+ std::function data_fn,
+#endif
+ bool debug = false) {
+ return run_impulse(NULL, result, data_fn, debug);
+}
+#endif
+
+#endif // #if EIDSP_SIGNAL_C_FN_POINTER == 0
+
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+
+#endif // _EDGE_IMPULSE_RUN_CLASSIFIER_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier_c.cpp b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier_c.cpp
new file mode 100644
index 0000000..4419384
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier_c.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#if defined(__cplusplus) && EI_C_LINKAGE == 1
+
+#include "ei_run_classifier_c.h"
+
+/**
+ * This function definition is just there to make sure
+ * that the symbol is not removed from the library.
+ */
+EI_IMPULSE_ERROR ei_run_classifier(
+ signal_t *signal,
+ ei_impulse_result_t *result,
+ bool debug) {
+
+ return run_classifier(signal, result, debug);
+}
+
+#endif // #if defined(__cplusplus) && EI_C_LINKAGE == 1
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier_c.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier_c.h
new file mode 100644
index 0000000..426958b
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier_c.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EDGE_IMPULSE_RUN_CLASSIFIER_C_H_
+#define _EDGE_IMPULSE_RUN_CLASSIFIER_C_H_
+
+#if defined(__cplusplus) && EI_C_LINKAGE == 1
+
+#include "ei_run_classifier.h"
+
+/**
+ * Run the classifier over a raw features array
+ * @param raw_features Raw features array
+ * @param raw_features_size Size of the features array
+ * @param result Object to store the results in
+ * @param debug Whether to show debug messages (default: false)
+ */
+extern "C" EI_IMPULSE_ERROR ei_run_classifier(
+ signal_t *signal,
+ ei_impulse_result_t *result,
+ bool debug = false);
+
+#endif // #if defined(__cplusplus) && EI_C_LINKAGE == 1
+
+#endif // _EDGE_IMPULSE_RUN_CLASSIFIER_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier_image.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier_image.h
new file mode 100644
index 0000000..37ff775
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_classifier_image.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2022 Edge Impulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EDGE_IMPULSE_RUN_CLASSIFIER_IMAGE_H_
+#define _EDGE_IMPULSE_RUN_CLASSIFIER_IMAGE_H_
+
+#include "ei_run_classifier.h"
+
+
+
+#endif // _EDGE_IMPULSE_RUN_CLASSIFIER_IMAGE_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_run_dsp.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_dsp.h
new file mode 100644
index 0000000..d04c144
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_run_dsp.h
@@ -0,0 +1,1523 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EDGE_IMPULSE_RUN_DSP_H_
+#define _EDGE_IMPULSE_RUN_DSP_H_
+
+#include "edge-impulse-sdk/classifier/ei_model_types.h"
+#include "edge-impulse-sdk/dsp/spectral/spectral.hpp"
+#include "edge-impulse-sdk/dsp/speechpy/speechpy.hpp"
+#include "edge-impulse-sdk/classifier/ei_signal_with_range.h"
+#include "model-parameters/model_metadata.h"
+
+#if defined(__cplusplus) && EI_C_LINKAGE == 1
+extern "C" {
+ extern void ei_printf(const char *format, ...);
+}
+#else
+extern void ei_printf(const char *format, ...);
+#endif
+
+#ifdef __cplusplus
+namespace {
+#endif // __cplusplus
+
+using namespace ei;
+
+#if defined(EI_DSP_IMAGE_BUFFER_STATIC_SIZE)
+float ei_dsp_image_buffer[EI_DSP_IMAGE_BUFFER_STATIC_SIZE];
+#endif
+
+// this is the frame we work on... allocate it statically so we share between invocations
+static float *ei_dsp_cont_current_frame = nullptr;
+static size_t ei_dsp_cont_current_frame_size = 0;
+static int ei_dsp_cont_current_frame_ix = 0;
+
+__attribute__((unused)) int extract_spectral_analysis_features(
+ signal_t *signal,
+ matrix_t *output_matrix,
+ void *config_ptr,
+ const float frequency)
+{
+ ei_dsp_config_spectral_analysis_t *config = (ei_dsp_config_spectral_analysis_t *)config_ptr;
+
+ // input matrix from the raw signal
+ matrix_t input_matrix(signal->total_length / config->axes, config->axes);
+ if (!input_matrix.buffer) {
+ EIDSP_ERR(EIDSP_OUT_OF_MEM);
+ }
+
+ signal->get_data(0, signal->total_length, input_matrix.buffer);
+
+#if EI_DSP_PARAMS_SPECTRAL_ANALYSIS_ANALYSIS_TYPE_WAVELET || EI_DSP_PARAMS_ALL
+ if (strcmp(config->analysis_type, "Wavelet") == 0) {
+ return spectral::wavelet::extract_wavelet_features(&input_matrix, output_matrix, config, frequency);
+ }
+#endif
+
+#if EI_DSP_PARAMS_SPECTRAL_ANALYSIS_ANALYSIS_TYPE_FFT || EI_DSP_PARAMS_ALL
+ if (strcmp(config->analysis_type, "FFT") == 0) {
+ if (config->implementation_version == 1) {
+ return spectral::feature::extract_spectral_analysis_features_v1(
+ &input_matrix,
+ output_matrix,
+ config,
+ frequency);
+ } else if (config->implementation_version == 4) {
+ return spectral::feature::extract_spectral_analysis_features_v4(
+ &input_matrix,
+ output_matrix,
+ config,
+ frequency);
+ } else {
+ return spectral::feature::extract_spectral_analysis_features_v2(
+ &input_matrix,
+ output_matrix,
+ config,
+ frequency);
+ }
+ }
+#endif
+
+#if !EI_DSP_PARAMS_GENERATED || EI_DSP_PARAMS_ALL || !(EI_DSP_PARAMS_SPECTRAL_ANALYSIS_ANALYSIS_TYPE_FFT || EI_DSP_PARAMS_SPECTRAL_ANALYSIS_ANALYSIS_TYPE_WAVELET)
+ if (config->implementation_version == 1) {
+ return spectral::feature::extract_spectral_analysis_features_v1(
+ &input_matrix,
+ output_matrix,
+ config,
+ frequency);
+ }
+ if (config->implementation_version == 2) {
+ return spectral::feature::extract_spectral_analysis_features_v2(
+ &input_matrix,
+ output_matrix,
+ config,
+ frequency);
+ }
+#endif
+ return EIDSP_NOT_SUPPORTED;
+}
+
+__attribute__((unused)) int extract_raw_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float frequency) {
+ ei_dsp_config_raw_t config = *((ei_dsp_config_raw_t*)config_ptr);
+
+ // Because of rounding errors during re-sampling the output size of the block might be
+ // smaller than the input of the block. Make sure we don't write outside of the bounds
+ // of the array:
+ // https://forum.edgeimpulse.com/t/using-custom-sensors-on-raspberry-pi-4/3506/7
+ size_t els_to_copy = signal->total_length;
+ if (els_to_copy > output_matrix->rows * output_matrix->cols) {
+ els_to_copy = output_matrix->rows * output_matrix->cols;
+ }
+
+ signal->get_data(0, els_to_copy, output_matrix->buffer);
+
+ // scale the signal
+ int ret = numpy::scale(output_matrix, config.scale_axes);
+ if (ret != EIDSP_OK) {
+ EIDSP_ERR(ret);
+ }
+
+ return EIDSP_OK;
+}
+
+__attribute__((unused)) int extract_flatten_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float frequency) {
+ ei_dsp_config_flatten_t config = *((ei_dsp_config_flatten_t*)config_ptr);
+
+ uint32_t expected_matrix_size = 0;
+ if (config.average) expected_matrix_size += config.axes;
+ if (config.minimum) expected_matrix_size += config.axes;
+ if (config.maximum) expected_matrix_size += config.axes;
+ if (config.rms) expected_matrix_size += config.axes;
+ if (config.stdev) expected_matrix_size += config.axes;
+ if (config.skewness) expected_matrix_size += config.axes;
+ if (config.kurtosis) expected_matrix_size += config.axes;
+
+ if (output_matrix->rows * output_matrix->cols != expected_matrix_size) {
+ EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH);
+ }
+
+ int ret;
+
+ // input matrix from the raw signal
+ matrix_t input_matrix(signal->total_length / config.axes, config.axes);
+ if (!input_matrix.buffer) {
+ EIDSP_ERR(EIDSP_OUT_OF_MEM);
+ }
+ signal->get_data(0, signal->total_length, input_matrix.buffer);
+
+ // scale the signal
+ ret = numpy::scale(&input_matrix, config.scale_axes);
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: Failed to scale signal (%d)\n", ret);
+ EIDSP_ERR(ret);
+ }
+
+ // transpose the matrix so we have one row per axis (nifty!)
+ ret = numpy::transpose(&input_matrix);
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: Failed to transpose matrix (%d)\n", ret);
+ EIDSP_ERR(ret);
+ }
+
+ size_t out_matrix_ix = 0;
+
+ for (size_t row = 0; row < input_matrix.rows; row++) {
+ matrix_t row_matrix(1, input_matrix.cols, input_matrix.buffer + (row * input_matrix.cols));
+
+ if (config.average) {
+ float fbuffer;
+ matrix_t out_matrix(1, 1, &fbuffer);
+ numpy::mean(&row_matrix, &out_matrix);
+ output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0];
+ }
+
+ if (config.minimum) {
+ float fbuffer;
+ matrix_t out_matrix(1, 1, &fbuffer);
+ numpy::min(&row_matrix, &out_matrix);
+ output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0];
+ }
+
+ if (config.maximum) {
+ float fbuffer;
+ matrix_t out_matrix(1, 1, &fbuffer);
+ numpy::max(&row_matrix, &out_matrix);
+ output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0];
+ }
+
+ if (config.rms) {
+ float fbuffer;
+ matrix_t out_matrix(1, 1, &fbuffer);
+ numpy::rms(&row_matrix, &out_matrix);
+ output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0];
+ }
+
+ if (config.stdev) {
+ float fbuffer;
+ matrix_t out_matrix(1, 1, &fbuffer);
+ numpy::stdev(&row_matrix, &out_matrix);
+ output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0];
+ }
+
+ if (config.skewness) {
+ float fbuffer;
+ matrix_t out_matrix(1, 1, &fbuffer);
+ numpy::skew(&row_matrix, &out_matrix);
+ output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0];
+ }
+
+ if (config.kurtosis) {
+ float fbuffer;
+ matrix_t out_matrix(1, 1, &fbuffer);
+ numpy::kurtosis(&row_matrix, &out_matrix);
+ output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0];
+ }
+ }
+
+ // flatten again
+ output_matrix->cols = output_matrix->rows * output_matrix->cols;
+ output_matrix->rows = 1;
+
+ return EIDSP_OK;
+}
+
+static class speechpy::processing::preemphasis *preemphasis;
+static int preemphasized_audio_signal_get_data(size_t offset, size_t length, float *out_ptr) {
+ return preemphasis->get_data(offset, length, out_ptr);
+}
+
+__attribute__((unused)) int extract_mfcc_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float sampling_frequency) {
+ ei_dsp_config_mfcc_t config = *((ei_dsp_config_mfcc_t*)config_ptr);
+
+ if (config.axes != 1) {
+ EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH);
+ }
+
+ if((config.implementation_version == 0) || (config.implementation_version > 4)) {
+ EIDSP_ERR(EIDSP_BLOCK_VERSION_INCORRECT);
+ }
+
+ if (signal->total_length == 0) {
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ const uint32_t frequency = static_cast(sampling_frequency);
+
+ // preemphasis class to preprocess the audio...
+ class speechpy::processing::preemphasis pre(signal, config.pre_shift, config.pre_cof, false);
+ preemphasis = ⪯
+
+ signal_t preemphasized_audio_signal;
+ preemphasized_audio_signal.total_length = signal->total_length;
+ preemphasized_audio_signal.get_data = &preemphasized_audio_signal_get_data;
+
+ // calculate the size of the MFCC matrix
+ matrix_size_t out_matrix_size =
+ speechpy::feature::calculate_mfcc_buffer_size(
+ signal->total_length, frequency, config.frame_length, config.frame_stride, config.num_cepstral, config.implementation_version);
+ /* Only throw size mismatch error calculated buffer doesn't fit for continuous inferencing */
+ if (out_matrix_size.rows * out_matrix_size.cols > output_matrix->rows * output_matrix->cols) {
+ ei_printf("out_matrix = %dx%d\n", (int)output_matrix->rows, (int)output_matrix->cols);
+ ei_printf("calculated size = %dx%d\n", (int)out_matrix_size.rows, (int)out_matrix_size.cols);
+ EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH);
+ }
+
+ output_matrix->rows = out_matrix_size.rows;
+ output_matrix->cols = out_matrix_size.cols;
+
+ // and run the MFCC extraction
+ int ret = speechpy::feature::mfcc(output_matrix, &preemphasized_audio_signal,
+ frequency, config.frame_length, config.frame_stride, config.num_cepstral, config.num_filters, config.fft_length,
+ config.low_frequency, config.high_frequency, true, config.implementation_version);
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: MFCC failed (%d)\n", ret);
+ EIDSP_ERR(ret);
+ }
+
+ // cepstral mean and variance normalization
+ ret = speechpy::processing::cmvnw(output_matrix, config.win_size, true, false);
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: cmvnw failed (%d)\n", ret);
+ EIDSP_ERR(ret);
+ }
+
+ output_matrix->cols = out_matrix_size.rows * out_matrix_size.cols;
+ output_matrix->rows = 1;
+
+ return EIDSP_OK;
+}
+
+
+static int extract_mfcc_run_slice(signal_t *signal, matrix_t *output_matrix, ei_dsp_config_mfcc_t *config, const float sampling_frequency, matrix_size_t *matrix_size_out, int implementation_version) {
+ uint32_t frequency = (uint32_t)sampling_frequency;
+
+ int x;
+
+ // calculate the size of the spectrogram matrix
+ matrix_size_t out_matrix_size =
+ speechpy::feature::calculate_mfcc_buffer_size(
+ signal->total_length, frequency, config->frame_length, config->frame_stride, config->num_cepstral,
+ implementation_version);
+
+ // we roll the output matrix back so we have room at the end...
+ x = numpy::roll(output_matrix->buffer, output_matrix->rows * output_matrix->cols,
+ -(out_matrix_size.rows * out_matrix_size.cols));
+ if (x != EIDSP_OK) {
+ EIDSP_ERR(x);
+ }
+
+ // slice in the output matrix to write to
+ // the offset in the classification matrix here is always at the end
+ size_t output_matrix_offset = (output_matrix->rows * output_matrix->cols) -
+ (out_matrix_size.rows * out_matrix_size.cols);
+
+ matrix_t output_matrix_slice(out_matrix_size.rows, out_matrix_size.cols, output_matrix->buffer + output_matrix_offset);
+
+ // and run the MFCC extraction
+ x = speechpy::feature::mfcc(&output_matrix_slice, signal,
+ frequency, config->frame_length, config->frame_stride, config->num_cepstral, config->num_filters, config->fft_length,
+ config->low_frequency, config->high_frequency, true, implementation_version);
+ if (x != EIDSP_OK) {
+ ei_printf("ERR: MFCC failed (%d)\n", x);
+ EIDSP_ERR(x);
+ }
+
+ matrix_size_out->rows += out_matrix_size.rows;
+ if (out_matrix_size.cols > 0) {
+ matrix_size_out->cols = out_matrix_size.cols;
+ }
+
+ return EIDSP_OK;
+}
+
+__attribute__((unused)) int extract_mfcc_per_slice_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float sampling_frequency, matrix_size_t *matrix_size_out) {
+#if defined(__cplusplus) && EI_C_LINKAGE == 1
+ ei_printf("ERR: Continuous audio is not supported when EI_C_LINKAGE is defined\n");
+ EIDSP_ERR(EIDSP_NOT_SUPPORTED);
+#else
+
+ ei_dsp_config_mfcc_t config = *((ei_dsp_config_mfcc_t*)config_ptr);
+
+ if (config.axes != 1) {
+ EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH);
+ }
+
+ if((config.implementation_version == 0) || (config.implementation_version > 4)) {
+ EIDSP_ERR(EIDSP_BLOCK_VERSION_INCORRECT);
+ }
+
+ if (signal->total_length == 0) {
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ const uint32_t frequency = static_cast(sampling_frequency);
+
+ // preemphasis class to preprocess the audio...
+ class speechpy::processing::preemphasis pre(signal, config.pre_shift, config.pre_cof, false);
+ preemphasis = ⪯
+
+ signal_t preemphasized_audio_signal;
+ preemphasized_audio_signal.total_length = signal->total_length;
+ preemphasized_audio_signal.get_data = &preemphasized_audio_signal_get_data;
+
+ // Go from the time (e.g. 0.25 seconds to number of frames based on freq)
+ const size_t frame_length_values = frequency * config.frame_length;
+ const size_t frame_stride_values = frequency * config.frame_stride;
+ const int frame_overlap_values = static_cast(frame_length_values) - static_cast(frame_stride_values);
+
+ if (frame_overlap_values < 0) {
+ ei_printf("ERR: frame_length (");
+ ei_printf_float(config.frame_length);
+ ei_printf(") cannot be lower than frame_stride (");
+ ei_printf_float(config.frame_stride);
+ ei_printf(") for continuous classification\n");
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ int x;
+
+ // have current frame, but wrong size? then free
+ if (ei_dsp_cont_current_frame && ei_dsp_cont_current_frame_size != frame_length_values) {
+ ei_free(ei_dsp_cont_current_frame);
+ ei_dsp_cont_current_frame = nullptr;
+ }
+
+ int implementation_version = config.implementation_version;
+
+ // this is the offset in the signal from which we'll work
+ size_t offset_in_signal = 0;
+
+ if (!ei_dsp_cont_current_frame) {
+ ei_dsp_cont_current_frame = (float*)ei_calloc(frame_length_values * sizeof(float), 1);
+ if (!ei_dsp_cont_current_frame) {
+ EIDSP_ERR(EIDSP_OUT_OF_MEM);
+ }
+ ei_dsp_cont_current_frame_size = frame_length_values;
+ ei_dsp_cont_current_frame_ix = 0;
+ }
+
+
+ if ((frame_length_values) > preemphasized_audio_signal.total_length + ei_dsp_cont_current_frame_ix) {
+ ei_printf("ERR: frame_length (%d) cannot be larger than signal's total length (%d) for continuous classification\n",
+ (int)frame_length_values, (int)preemphasized_audio_signal.total_length + ei_dsp_cont_current_frame_ix);
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ matrix_size_out->rows = 0;
+ matrix_size_out->cols = 0;
+
+ // for continuous use v2 stack frame calculations
+ if (implementation_version == 1) {
+ implementation_version = 2;
+ }
+
+ if (ei_dsp_cont_current_frame_ix > (int)ei_dsp_cont_current_frame_size) {
+ ei_printf("ERR: ei_dsp_cont_current_frame_ix is larger than frame size (ix=%d size=%d)\n",
+ ei_dsp_cont_current_frame_ix, (int)ei_dsp_cont_current_frame_size);
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ // if we still have some code from previous run
+ while (ei_dsp_cont_current_frame_ix > 0) {
+ // then from the current frame we need to read `frame_length_values - ei_dsp_cont_current_frame_ix`
+ // starting at offset 0
+ x = preemphasized_audio_signal.get_data(0, frame_length_values - ei_dsp_cont_current_frame_ix, ei_dsp_cont_current_frame + ei_dsp_cont_current_frame_ix);
+ if (x != EIDSP_OK) {
+ EIDSP_ERR(x);
+ }
+
+ // now ei_dsp_cont_current_frame is complete
+ signal_t frame_signal;
+ x = numpy::signal_from_buffer(ei_dsp_cont_current_frame, frame_length_values, &frame_signal);
+ if (x != EIDSP_OK) {
+ EIDSP_ERR(x);
+ }
+
+ x = extract_mfcc_run_slice(&frame_signal, output_matrix, &config, sampling_frequency, matrix_size_out, implementation_version);
+ if (x != EIDSP_OK) {
+ EIDSP_ERR(x);
+ }
+
+ // if there's overlap between frames we roll through
+ if (frame_stride_values > 0) {
+ numpy::roll(ei_dsp_cont_current_frame, frame_length_values, -frame_stride_values);
+ }
+
+ ei_dsp_cont_current_frame_ix -= frame_stride_values;
+ }
+
+ if (ei_dsp_cont_current_frame_ix < 0) {
+ offset_in_signal = -ei_dsp_cont_current_frame_ix;
+ ei_dsp_cont_current_frame_ix = 0;
+ }
+
+ if (offset_in_signal >= signal->total_length) {
+ offset_in_signal -= signal->total_length;
+ return EIDSP_OK;
+ }
+
+ // now... we need to discard part of the signal...
+ SignalWithRange signal_with_range(&preemphasized_audio_signal, offset_in_signal, signal->total_length);
+
+ signal_t *range_signal = signal_with_range.get_signal();
+ size_t range_signal_orig_length = range_signal->total_length;
+
+ // then we'll just go through normal processing of the signal:
+ x = extract_mfcc_run_slice(range_signal, output_matrix, &config, sampling_frequency, matrix_size_out, implementation_version);
+ if (x != EIDSP_OK) {
+ EIDSP_ERR(x);
+ }
+
+ // Make sure v1 model are reset to the original length;
+ range_signal->total_length = range_signal_orig_length;
+
+ // update offset
+ int length_of_signal_used = speechpy::processing::calculate_signal_used(range_signal->total_length, sampling_frequency,
+ config.frame_length, config.frame_stride, false, implementation_version);
+ offset_in_signal += length_of_signal_used;
+
+ // see what's left?
+ int bytes_left_end_of_frame = signal->total_length - offset_in_signal;
+ bytes_left_end_of_frame += frame_overlap_values;
+
+ if (bytes_left_end_of_frame > 0) {
+ // then read that into the ei_dsp_cont_current_frame buffer
+ x = preemphasized_audio_signal.get_data(
+ (preemphasized_audio_signal.total_length - bytes_left_end_of_frame),
+ bytes_left_end_of_frame,
+ ei_dsp_cont_current_frame);
+ if (x != EIDSP_OK) {
+ EIDSP_ERR(x);
+ }
+ }
+
+ ei_dsp_cont_current_frame_ix = bytes_left_end_of_frame;
+
+ preemphasis = nullptr;
+
+ return EIDSP_OK;
+#endif
+}
+
+__attribute__((unused)) int extract_spectrogram_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float sampling_frequency) {
+ ei_dsp_config_spectrogram_t config = *((ei_dsp_config_spectrogram_t*)config_ptr);
+
+ if (config.axes != 1) {
+ EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH);
+ }
+
+ if (signal->total_length == 0) {
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ const uint32_t frequency = static_cast(sampling_frequency);
+
+ // calculate the size of the MFE matrix
+ matrix_size_t out_matrix_size =
+ speechpy::feature::calculate_mfe_buffer_size(
+ signal->total_length, frequency, config.frame_length, config.frame_stride, config.fft_length / 2 + 1,
+ config.implementation_version);
+ /* Only throw size mismatch error calculated buffer doesn't fit for continuous inferencing */
+ if (out_matrix_size.rows * out_matrix_size.cols > output_matrix->rows * output_matrix->cols) {
+ ei_printf("out_matrix = %dx%d\n", (int)output_matrix->rows, (int)output_matrix->cols);
+ ei_printf("calculated size = %dx%d\n", (int)out_matrix_size.rows, (int)out_matrix_size.cols);
+ EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH);
+ }
+
+ output_matrix->rows = out_matrix_size.rows;
+ output_matrix->cols = out_matrix_size.cols;
+
+ int ret = speechpy::feature::spectrogram(output_matrix, signal,
+ sampling_frequency, config.frame_length, config.frame_stride, config.fft_length, config.implementation_version);
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: Spectrogram failed (%d)\n", ret);
+ EIDSP_ERR(ret);
+ }
+
+ if (config.implementation_version < 3) {
+ ret = numpy::normalize(output_matrix);
+ if (ret != EIDSP_OK) {
+ EIDSP_ERR(ret);
+ }
+ }
+ else {
+ // normalization
+ ret = speechpy::processing::spectrogram_normalization(output_matrix, config.noise_floor_db, config.implementation_version == 3);
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: normalization failed (%d)\n", ret);
+ EIDSP_ERR(ret);
+ }
+ }
+
+ output_matrix->cols = out_matrix_size.rows * out_matrix_size.cols;
+ output_matrix->rows = 1;
+
+ return EIDSP_OK;
+}
+
+
+static int extract_spectrogram_run_slice(signal_t *signal, matrix_t *output_matrix, ei_dsp_config_spectrogram_t *config, const float sampling_frequency, matrix_size_t *matrix_size_out) {
+ uint32_t frequency = (uint32_t)sampling_frequency;
+
+ int x;
+
+ // calculate the size of the spectrogram matrix
+ matrix_size_t out_matrix_size =
+ speechpy::feature::calculate_mfe_buffer_size(
+ signal->total_length, frequency, config->frame_length, config->frame_stride, config->fft_length / 2 + 1,
+ config->implementation_version);
+
+ // we roll the output matrix back so we have room at the end...
+ x = numpy::roll(output_matrix->buffer, output_matrix->rows * output_matrix->cols,
+ -(out_matrix_size.rows * out_matrix_size.cols));
+ if (x != EIDSP_OK) {
+ if (preemphasis) {
+ delete preemphasis;
+ }
+ EIDSP_ERR(x);
+ }
+
+ // slice in the output matrix to write to
+ // the offset in the classification matrix here is always at the end
+ size_t output_matrix_offset = (output_matrix->rows * output_matrix->cols) -
+ (out_matrix_size.rows * out_matrix_size.cols);
+
+ matrix_t output_matrix_slice(out_matrix_size.rows, out_matrix_size.cols, output_matrix->buffer + output_matrix_offset);
+
+ // and run the spectrogram extraction
+ int ret = speechpy::feature::spectrogram(&output_matrix_slice, signal,
+ frequency, config->frame_length, config->frame_stride, config->fft_length, config->implementation_version);
+
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: Spectrogram failed (%d)\n", ret);
+ EIDSP_ERR(ret);
+ }
+
+ matrix_size_out->rows += out_matrix_size.rows;
+ if (out_matrix_size.cols > 0) {
+ matrix_size_out->cols = out_matrix_size.cols;
+ }
+
+ return EIDSP_OK;
+}
+
+__attribute__((unused)) int extract_spectrogram_per_slice_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float sampling_frequency, matrix_size_t *matrix_size_out) {
+#if defined(__cplusplus) && EI_C_LINKAGE == 1
+ ei_printf("ERR: Continuous audio is not supported when EI_C_LINKAGE is defined\n");
+ EIDSP_ERR(EIDSP_NOT_SUPPORTED);
+#else
+
+ ei_dsp_config_spectrogram_t config = *((ei_dsp_config_spectrogram_t*)config_ptr);
+
+ static bool first_run = false;
+
+ if (config.axes != 1) {
+ EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH);
+ }
+
+ if (signal->total_length == 0) {
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ const uint32_t frequency = static_cast(sampling_frequency);
+
+ /* Fake an extra frame_length for stack frames calculations. There, 1 frame_length is always
+ subtracted and there for never used. But skip the first slice to fit the feature_matrix
+ buffer */
+ if(config.implementation_version < 2) {
+
+ if (first_run == true) {
+ signal->total_length += (size_t)(config.frame_length * (float)frequency);
+ }
+
+ first_run = true;
+ }
+
+ // Go from the time (e.g. 0.25 seconds to number of frames based on freq)
+ const size_t frame_length_values = frequency * config.frame_length;
+ const size_t frame_stride_values = frequency * config.frame_stride;
+ const int frame_overlap_values = static_cast(frame_length_values) - static_cast(frame_stride_values);
+
+ if (frame_overlap_values < 0) {
+ ei_printf("ERR: frame_length (");
+ ei_printf_float(config.frame_length);
+ ei_printf(") cannot be lower than frame_stride (");
+ ei_printf_float(config.frame_stride);
+ ei_printf(") for continuous classification\n");
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ if (frame_length_values > signal->total_length) {
+ ei_printf("ERR: frame_length (%d) cannot be larger than signal's total length (%d) for continuous classification\n",
+ (int)frame_length_values, (int)signal->total_length);
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ int x;
+
+ // have current frame, but wrong size? then free
+ if (ei_dsp_cont_current_frame && ei_dsp_cont_current_frame_size != frame_length_values) {
+ ei_free(ei_dsp_cont_current_frame);
+ ei_dsp_cont_current_frame = nullptr;
+ }
+
+ if (!ei_dsp_cont_current_frame) {
+ ei_dsp_cont_current_frame = (float*)ei_calloc(frame_length_values * sizeof(float), 1);
+ if (!ei_dsp_cont_current_frame) {
+ EIDSP_ERR(EIDSP_OUT_OF_MEM);
+ }
+ ei_dsp_cont_current_frame_size = frame_length_values;
+ ei_dsp_cont_current_frame_ix = 0;
+ }
+
+ matrix_size_out->rows = 0;
+ matrix_size_out->cols = 0;
+
+ // this is the offset in the signal from which we'll work
+ size_t offset_in_signal = 0;
+
+ if (ei_dsp_cont_current_frame_ix > (int)ei_dsp_cont_current_frame_size) {
+ ei_printf("ERR: ei_dsp_cont_current_frame_ix is larger than frame size\n");
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ // if we still have some code from previous run
+ while (ei_dsp_cont_current_frame_ix > 0) {
+ // then from the current frame we need to read `frame_length_values - ei_dsp_cont_current_frame_ix`
+ // starting at offset 0
+ x = signal->get_data(0, frame_length_values - ei_dsp_cont_current_frame_ix, ei_dsp_cont_current_frame + ei_dsp_cont_current_frame_ix);
+ if (x != EIDSP_OK) {
+ EIDSP_ERR(x);
+ }
+
+ // now ei_dsp_cont_current_frame is complete
+ signal_t frame_signal;
+ x = numpy::signal_from_buffer(ei_dsp_cont_current_frame, frame_length_values, &frame_signal);
+ if (x != EIDSP_OK) {
+ EIDSP_ERR(x);
+ }
+
+ x = extract_spectrogram_run_slice(&frame_signal, output_matrix, &config, sampling_frequency, matrix_size_out);
+ if (x != EIDSP_OK) {
+ EIDSP_ERR(x);
+ }
+
+ // if there's overlap between frames we roll through
+ if (frame_stride_values > 0) {
+ numpy::roll(ei_dsp_cont_current_frame, frame_length_values, -frame_stride_values);
+ }
+
+ ei_dsp_cont_current_frame_ix -= frame_stride_values;
+ }
+
+ if (ei_dsp_cont_current_frame_ix < 0) {
+ offset_in_signal = -ei_dsp_cont_current_frame_ix;
+ ei_dsp_cont_current_frame_ix = 0;
+ }
+
+ if (offset_in_signal >= signal->total_length) {
+ offset_in_signal -= signal->total_length;
+ return EIDSP_OK;
+ }
+
+ // now... we need to discard part of the signal...
+ SignalWithRange signal_with_range(signal, offset_in_signal, signal->total_length);
+
+ signal_t *range_signal = signal_with_range.get_signal();
+ size_t range_signal_orig_length = range_signal->total_length;
+
+ // then we'll just go through normal processing of the signal:
+ x = extract_spectrogram_run_slice(range_signal, output_matrix, &config, sampling_frequency, matrix_size_out);
+ if (x != EIDSP_OK) {
+ EIDSP_ERR(x);
+ }
+
+ // update offset
+ int length_of_signal_used = speechpy::processing::calculate_signal_used(range_signal->total_length, sampling_frequency,
+ config.frame_length, config.frame_stride, false, config.implementation_version);
+ offset_in_signal += length_of_signal_used;
+
+ // not sure why this is being manipulated...
+ range_signal->total_length = range_signal_orig_length;
+
+ // see what's left?
+ int bytes_left_end_of_frame = signal->total_length - offset_in_signal;
+ bytes_left_end_of_frame += frame_overlap_values;
+
+ if (bytes_left_end_of_frame > 0) {
+ // then read that into the ei_dsp_cont_current_frame buffer
+ x = signal->get_data(
+ (signal->total_length - bytes_left_end_of_frame),
+ bytes_left_end_of_frame,
+ ei_dsp_cont_current_frame);
+ if (x != EIDSP_OK) {
+ EIDSP_ERR(x);
+ }
+ }
+
+ ei_dsp_cont_current_frame_ix = bytes_left_end_of_frame;
+
+ if (config.implementation_version < 2) {
+ if (first_run == true) {
+ signal->total_length -= (size_t)(config.frame_length * (float)frequency);
+ }
+ }
+
+ return EIDSP_OK;
+#endif
+}
+
+
+__attribute__((unused)) int extract_mfe_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float sampling_frequency) {
+ ei_dsp_config_mfe_t config = *((ei_dsp_config_mfe_t*)config_ptr);
+
+ if (config.axes != 1) {
+ EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH);
+ }
+
+ if (signal->total_length == 0) {
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ if ((config.implementation_version == 0) || (config.implementation_version > 4)) {
+ EIDSP_ERR(EIDSP_BLOCK_VERSION_INCORRECT);
+ }
+
+ const uint32_t frequency = static_cast(sampling_frequency);
+
+ signal_t preemphasized_audio_signal;
+
+ // before version 3 we did not have preemphasis
+ if (config.implementation_version < 3) {
+ preemphasis = nullptr;
+
+ preemphasized_audio_signal.total_length = signal->total_length;
+ preemphasized_audio_signal.get_data = signal->get_data;
+ }
+ else {
+ // preemphasis class to preprocess the audio...
+ class speechpy::processing::preemphasis *pre = new class speechpy::processing::preemphasis(signal, 1, 0.98f, true);
+ preemphasis = pre;
+
+ preemphasized_audio_signal.total_length = signal->total_length;
+ preemphasized_audio_signal.get_data = &preemphasized_audio_signal_get_data;
+ }
+
+ // calculate the size of the MFE matrix
+ matrix_size_t out_matrix_size =
+ speechpy::feature::calculate_mfe_buffer_size(
+ preemphasized_audio_signal.total_length, frequency, config.frame_length, config.frame_stride, config.num_filters,
+ config.implementation_version);
+ /* Only throw size mismatch error calculated buffer doesn't fit for continuous inferencing */
+ if (out_matrix_size.rows * out_matrix_size.cols > output_matrix->rows * output_matrix->cols) {
+ ei_printf("out_matrix = %dx%d\n", (int)output_matrix->rows, (int)output_matrix->cols);
+ ei_printf("calculated size = %dx%d\n", (int)out_matrix_size.rows, (int)out_matrix_size.cols);
+ if (preemphasis) {
+ delete preemphasis;
+ }
+ EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH);
+ }
+
+ output_matrix->rows = out_matrix_size.rows;
+ output_matrix->cols = out_matrix_size.cols;
+
+ int ret;
+ // This probably seems incorrect, but the mfe func can actually handle all versions
+ // There's a subtle issue with cmvn and v2, not worth tracking down
+ // So for v2 and v1, we'll just use the old code
+ // (the new mfe does away with the intermediate filterbank matrix)
+ if (config.implementation_version > 2) {
+ ret = speechpy::feature::mfe(output_matrix, nullptr, &preemphasized_audio_signal,
+ frequency, config.frame_length, config.frame_stride, config.num_filters, config.fft_length,
+ config.low_frequency, config.high_frequency, config.implementation_version);
+ } else {
+ ret = speechpy::feature::mfe_v3(output_matrix, nullptr, &preemphasized_audio_signal,
+ frequency, config.frame_length, config.frame_stride, config.num_filters, config.fft_length,
+ config.low_frequency, config.high_frequency, config.implementation_version);
+ }
+
+ if (preemphasis) {
+ delete preemphasis;
+ }
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: MFE failed (%d)\n", ret);
+ EIDSP_ERR(ret);
+ }
+
+ if (config.implementation_version < 3) {
+ // cepstral mean and variance normalization
+ ret = speechpy::processing::cmvnw(output_matrix, config.win_size, false, true);
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: cmvnw failed (%d)\n", ret);
+ EIDSP_ERR(ret);
+ }
+ }
+ else {
+ // normalization
+ ret = speechpy::processing::mfe_normalization(output_matrix, config.noise_floor_db);
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: normalization failed (%d)\n", ret);
+ EIDSP_ERR(ret);
+ }
+ }
+
+ output_matrix->cols = out_matrix_size.rows * out_matrix_size.cols;
+ output_matrix->rows = 1;
+
+ return EIDSP_OK;
+}
+
+static int extract_mfe_run_slice(signal_t *signal, matrix_t *output_matrix, ei_dsp_config_mfe_t *config, const float sampling_frequency, matrix_size_t *matrix_size_out) {
+ uint32_t frequency = (uint32_t)sampling_frequency;
+
+ int x;
+
+ // calculate the size of the spectrogram matrix
+ matrix_size_t out_matrix_size =
+ speechpy::feature::calculate_mfe_buffer_size(
+ signal->total_length, frequency, config->frame_length, config->frame_stride, config->num_filters,
+ config->implementation_version);
+
+ // we roll the output matrix back so we have room at the end...
+ x = numpy::roll(output_matrix->buffer, output_matrix->rows * output_matrix->cols,
+ -(out_matrix_size.rows * out_matrix_size.cols));
+ if (x != EIDSP_OK) {
+ EIDSP_ERR(x);
+ }
+
+ // slice in the output matrix to write to
+ // the offset in the classification matrix here is always at the end
+ size_t output_matrix_offset = (output_matrix->rows * output_matrix->cols) -
+ (out_matrix_size.rows * out_matrix_size.cols);
+
+ matrix_t output_matrix_slice(out_matrix_size.rows, out_matrix_size.cols, output_matrix->buffer + output_matrix_offset);
+
+ // and run the MFE extraction
+ // This probably seems incorrect, but the mfe func can actually handle all versions
+ // There's a subtle issue with cmvn and v2, not worth tracking down
+ // So for v2 and v1, we'll just use the old code
+ // (the new mfe does away with the intermediate filterbank matrix)
+ if (config->implementation_version > 2) {
+ x = speechpy::feature::mfe(&output_matrix_slice, nullptr, signal,
+ frequency, config->frame_length, config->frame_stride, config->num_filters, config->fft_length,
+ config->low_frequency, config->high_frequency, config->implementation_version);
+ } else {
+ x = speechpy::feature::mfe_v3(&output_matrix_slice, nullptr, signal,
+ frequency, config->frame_length, config->frame_stride, config->num_filters, config->fft_length,
+ config->low_frequency, config->high_frequency, config->implementation_version);
+ }
+ if (x != EIDSP_OK) {
+ ei_printf("ERR: MFE failed (%d)\n", x);
+ EIDSP_ERR(x);
+ }
+
+ matrix_size_out->rows += out_matrix_size.rows;
+ if (out_matrix_size.cols > 0) {
+ matrix_size_out->cols = out_matrix_size.cols;
+ }
+
+ return EIDSP_OK;
+}
+
+__attribute__((unused)) int extract_mfe_per_slice_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float sampling_frequency, matrix_size_t *matrix_size_out) {
+#if defined(__cplusplus) && EI_C_LINKAGE == 1
+ ei_printf("ERR: Continuous audio is not supported when EI_C_LINKAGE is defined\n");
+ EIDSP_ERR(EIDSP_NOT_SUPPORTED);
+#else
+
+ ei_dsp_config_mfe_t config = *((ei_dsp_config_mfe_t*)config_ptr);
+
+ // signal is already the right size,
+ // output matrix is not the right size, but we can start writing at offset 0 and then it's OK too
+
+ static bool first_run = false;
+
+ if (config.axes != 1) {
+ EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH);
+ }
+
+ if ((config.implementation_version == 0) || (config.implementation_version > 4)) {
+ EIDSP_ERR(EIDSP_BLOCK_VERSION_INCORRECT);
+ }
+
+ if (signal->total_length == 0) {
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ const uint32_t frequency = static_cast(sampling_frequency);
+
+ // Fake an extra frame_length for stack frames calculations. There, 1 frame_length is always
+ // subtracted and there for never used. But skip the first slice to fit the feature_matrix
+ // buffer
+ if (config.implementation_version == 1) {
+ if (first_run == true) {
+ signal->total_length += (size_t)(config.frame_length * (float)frequency);
+ }
+
+ first_run = true;
+ }
+
+ // ok all setup, let's construct the signal (with preemphasis for impl version >3)
+ signal_t preemphasized_audio_signal;
+
+ // before version 3 we did not have preemphasis
+ if (config.implementation_version < 3) {
+ preemphasis = nullptr;
+ preemphasized_audio_signal.total_length = signal->total_length;
+ preemphasized_audio_signal.get_data = signal->get_data;
+ }
+ else {
+ // preemphasis class to preprocess the audio...
+ class speechpy::processing::preemphasis *pre = new class speechpy::processing::preemphasis(signal, 1, 0.98f, true);
+ preemphasis = pre;
+ preemphasized_audio_signal.total_length = signal->total_length;
+ preemphasized_audio_signal.get_data = &preemphasized_audio_signal_get_data;
+ }
+
+ // Go from the time (e.g. 0.25 seconds to number of frames based on freq)
+ const size_t frame_length_values = frequency * config.frame_length;
+ const size_t frame_stride_values = frequency * config.frame_stride;
+ const int frame_overlap_values = static_cast(frame_length_values) - static_cast(frame_stride_values);
+
+ if (frame_overlap_values < 0) {
+ ei_printf("ERR: frame_length (");
+ ei_printf_float(config.frame_length);
+ ei_printf(") cannot be lower than frame_stride (");
+ ei_printf_float(config.frame_stride);
+ ei_printf(") for continuous classification\n");
+
+ if (preemphasis) {
+ delete preemphasis;
+ }
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ if (frame_length_values > preemphasized_audio_signal.total_length) {
+ ei_printf("ERR: frame_length (%d) cannot be larger than signal's total length (%d) for continuous classification\n",
+ (int)frame_length_values, (int)preemphasized_audio_signal.total_length);
+ if (preemphasis) {
+ delete preemphasis;
+ }
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ int x;
+
+ // have current frame, but wrong size? then free
+ if (ei_dsp_cont_current_frame && ei_dsp_cont_current_frame_size != frame_length_values) {
+ ei_free(ei_dsp_cont_current_frame);
+ ei_dsp_cont_current_frame = nullptr;
+ }
+
+ if (!ei_dsp_cont_current_frame) {
+ ei_dsp_cont_current_frame = (float*)ei_calloc(frame_length_values * sizeof(float), 1);
+ if (!ei_dsp_cont_current_frame) {
+ if (preemphasis) {
+ delete preemphasis;
+ }
+ EIDSP_ERR(EIDSP_OUT_OF_MEM);
+ }
+ ei_dsp_cont_current_frame_size = frame_length_values;
+ ei_dsp_cont_current_frame_ix = 0;
+ }
+
+ matrix_size_out->rows = 0;
+ matrix_size_out->cols = 0;
+
+ // this is the offset in the signal from which we'll work
+ size_t offset_in_signal = 0;
+
+ if (ei_dsp_cont_current_frame_ix > (int)ei_dsp_cont_current_frame_size) {
+ ei_printf("ERR: ei_dsp_cont_current_frame_ix is larger than frame size\n");
+ if (preemphasis) {
+ delete preemphasis;
+ }
+ EIDSP_ERR(EIDSP_PARAMETER_INVALID);
+ }
+
+ // if we still have some code from previous run
+ while (ei_dsp_cont_current_frame_ix > 0) {
+ // then from the current frame we need to read `frame_length_values - ei_dsp_cont_current_frame_ix`
+ // starting at offset 0
+ x = preemphasized_audio_signal.get_data(0, frame_length_values - ei_dsp_cont_current_frame_ix, ei_dsp_cont_current_frame + ei_dsp_cont_current_frame_ix);
+ if (x != EIDSP_OK) {
+ if (preemphasis) {
+ delete preemphasis;
+ }
+ EIDSP_ERR(x);
+ }
+
+ // now ei_dsp_cont_current_frame is complete
+ signal_t frame_signal;
+ x = numpy::signal_from_buffer(ei_dsp_cont_current_frame, frame_length_values, &frame_signal);
+ if (x != EIDSP_OK) {
+ if (preemphasis) {
+ delete preemphasis;
+ }
+ EIDSP_ERR(x);
+ }
+
+ x = extract_mfe_run_slice(&frame_signal, output_matrix, &config, sampling_frequency, matrix_size_out);
+ if (x != EIDSP_OK) {
+ if (preemphasis) {
+ delete preemphasis;
+ }
+ EIDSP_ERR(x);
+ }
+
+ // if there's overlap between frames we roll through
+ if (frame_stride_values > 0) {
+ numpy::roll(ei_dsp_cont_current_frame, frame_length_values, -frame_stride_values);
+ }
+
+ ei_dsp_cont_current_frame_ix -= frame_stride_values;
+ }
+
+ if (ei_dsp_cont_current_frame_ix < 0) {
+ offset_in_signal = -ei_dsp_cont_current_frame_ix;
+ ei_dsp_cont_current_frame_ix = 0;
+ }
+
+ if (offset_in_signal >= signal->total_length) {
+ if (preemphasis) {
+ delete preemphasis;
+ }
+ offset_in_signal -= signal->total_length;
+ return EIDSP_OK;
+ }
+
+ // now... we need to discard part of the signal...
+ SignalWithRange signal_with_range(&preemphasized_audio_signal, offset_in_signal, signal->total_length);
+
+ signal_t *range_signal = signal_with_range.get_signal();
+ size_t range_signal_orig_length = range_signal->total_length;
+
+ // then we'll just go through normal processing of the signal:
+ x = extract_mfe_run_slice(range_signal, output_matrix, &config, sampling_frequency, matrix_size_out);
+ if (x != EIDSP_OK) {
+ if (preemphasis) {
+ delete preemphasis;
+ }
+ EIDSP_ERR(x);
+ }
+
+ // update offset
+ int length_of_signal_used = speechpy::processing::calculate_signal_used(range_signal->total_length, sampling_frequency,
+ config.frame_length, config.frame_stride, false, config.implementation_version);
+ offset_in_signal += length_of_signal_used;
+
+ // not sure why this is being manipulated...
+ range_signal->total_length = range_signal_orig_length;
+
+ // see what's left?
+ int bytes_left_end_of_frame = signal->total_length - offset_in_signal;
+ bytes_left_end_of_frame += frame_overlap_values;
+
+ if (bytes_left_end_of_frame > 0) {
+ // then read that into the ei_dsp_cont_current_frame buffer
+ x = preemphasized_audio_signal.get_data(
+ (preemphasized_audio_signal.total_length - bytes_left_end_of_frame),
+ bytes_left_end_of_frame,
+ ei_dsp_cont_current_frame);
+ if (x != EIDSP_OK) {
+ if (preemphasis) {
+ delete preemphasis;
+ }
+ EIDSP_ERR(x);
+ }
+ }
+
+ ei_dsp_cont_current_frame_ix = bytes_left_end_of_frame;
+
+
+ if (config.implementation_version == 1) {
+ if (first_run == true) {
+ signal->total_length -= (size_t)(config.frame_length * (float)frequency);
+ }
+ }
+
+ if (preemphasis) {
+ delete preemphasis;
+ }
+
+ return EIDSP_OK;
+#endif
+}
+
+__attribute__((unused)) int extract_image_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float frequency) {
+ ei_dsp_config_image_t config = *((ei_dsp_config_image_t*)config_ptr);
+
+ int16_t channel_count = strcmp(config.channels, "Grayscale") == 0 ? 1 : 3;
+
+ size_t output_ix = 0;
+
+#if defined(EI_DSP_IMAGE_BUFFER_STATIC_SIZE)
+ const size_t page_size = EI_DSP_IMAGE_BUFFER_STATIC_SIZE;
+#else
+ const size_t page_size = 1024;
+#endif
+
+ // buffered read from the signal
+ size_t bytes_left = signal->total_length;
+ for (size_t ix = 0; ix < signal->total_length; ix += page_size) {
+ size_t elements_to_read = bytes_left > page_size ? page_size : bytes_left;
+
+#if defined(EI_DSP_IMAGE_BUFFER_STATIC_SIZE)
+ matrix_t input_matrix(elements_to_read, config.axes, ei_dsp_image_buffer);
+#else
+ matrix_t input_matrix(elements_to_read, config.axes);
+#endif
+ if (!input_matrix.buffer) {
+ EIDSP_ERR(EIDSP_OUT_OF_MEM);
+ }
+ signal->get_data(ix, elements_to_read, input_matrix.buffer);
+
+ for (size_t jx = 0; jx < elements_to_read; jx++) {
+ uint32_t pixel = static_cast(input_matrix.buffer[jx]);
+
+ // rgb to 0..1
+ float r = static_cast(pixel >> 16 & 0xff) / 255.0f;
+ float g = static_cast(pixel >> 8 & 0xff) / 255.0f;
+ float b = static_cast(pixel & 0xff) / 255.0f;
+
+ if (channel_count == 3) {
+ output_matrix->buffer[output_ix++] = r;
+ output_matrix->buffer[output_ix++] = g;
+ output_matrix->buffer[output_ix++] = b;
+ }
+ else {
+ // ITU-R 601-2 luma transform
+ // see: https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.convert
+ float v = (0.299f * r) + (0.587f * g) + (0.114f * b);
+ output_matrix->buffer[output_ix++] = v;
+ }
+ }
+
+ bytes_left -= elements_to_read;
+ }
+
+ return EIDSP_OK;
+}
+
+#if (EI_CLASSIFIER_QUANTIZATION_ENABLED == 1) && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI)
+
+__attribute__((unused)) int extract_drpai_features_quantized(signal_t *signal, matrix_u8_t *output_matrix, void *config_ptr, const float frequency) {
+ ei_dsp_config_image_t config = *((ei_dsp_config_image_t*)config_ptr);
+
+ int16_t channel_count = strcmp(config.channels, "Grayscale") == 0 ? 1 : 3;
+
+ size_t output_ix = 0;
+
+#if defined(EI_DSP_IMAGE_BUFFER_STATIC_SIZE)
+ const size_t page_size = EI_DSP_IMAGE_BUFFER_STATIC_SIZE;
+#else
+ const size_t page_size = 1024;
+#endif
+
+ // buffered read from the signal
+ size_t bytes_left = signal->total_length;
+ for (size_t ix = 0; ix < signal->total_length; ix += page_size) {
+ size_t elements_to_read = bytes_left > page_size ? page_size : bytes_left;
+
+#if defined(EI_DSP_IMAGE_BUFFER_STATIC_SIZE)
+ matrix_t input_matrix(elements_to_read, config.axes, ei_dsp_image_buffer);
+#else
+ matrix_t input_matrix(elements_to_read, config.axes);
+#endif
+ if (!input_matrix.buffer) {
+ EIDSP_ERR(EIDSP_OUT_OF_MEM);
+ }
+ signal->get_data(ix, elements_to_read, input_matrix.buffer);
+
+ for (size_t jx = 0; jx < elements_to_read; jx++) {
+ uint32_t pixel = static_cast(input_matrix.buffer[jx]);
+
+ if (channel_count == 3) {
+ uint8_t r = static_cast(pixel >> 16 & 0xff);
+ uint8_t g = static_cast(pixel >> 8 & 0xff);
+ uint8_t b = static_cast(pixel & 0xff);
+
+ output_matrix->buffer[output_ix++] = r;
+ output_matrix->buffer[output_ix++] = g;
+ output_matrix->buffer[output_ix++] = b;
+ }
+ else {
+ //NOTE: not implementing greyscale yet
+ }
+ }
+ bytes_left -= elements_to_read;
+ }
+
+ return EIDSP_OK;
+}
+
+#endif //(EI_CLASSIFIER_QUANTIZATION_ENABLED == 1) && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI)
+
+#if (EI_CLASSIFIER_QUANTIZATION_ENABLED == 1) && (EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_DRPAI)
+
+__attribute__((unused)) int extract_image_features_quantized(signal_t *signal, matrix_i8_t *output_matrix, void *config_ptr, float scale, float zero_point, const float frequency,
+ int image_scaling) {
+ ei_dsp_config_image_t config = *((ei_dsp_config_image_t*)config_ptr);
+
+ int16_t channel_count = strcmp(config.channels, "Grayscale") == 0 ? 1 : 3;
+
+ size_t output_ix = 0;
+
+ const int32_t iRedToGray = (int32_t)(0.299f * 65536.0f);
+ const int32_t iGreenToGray = (int32_t)(0.587f * 65536.0f);
+ const int32_t iBlueToGray = (int32_t)(0.114f * 65536.0f);
+
+ static const float torch_mean[] = { 0.485, 0.456, 0.406 };
+ static const float torch_std[] = { 0.229, 0.224, 0.225 };
+
+#if defined(EI_DSP_IMAGE_BUFFER_STATIC_SIZE)
+ const size_t page_size = EI_DSP_IMAGE_BUFFER_STATIC_SIZE;
+#else
+ const size_t page_size = 1024;
+#endif
+
+ // buffered read from the signal
+ size_t bytes_left = signal->total_length;
+ for (size_t ix = 0; ix < signal->total_length; ix += page_size) {
+ size_t elements_to_read = bytes_left > page_size ? page_size : bytes_left;
+
+#if defined(EI_DSP_IMAGE_BUFFER_STATIC_SIZE)
+ matrix_t input_matrix(elements_to_read, config.axes, ei_dsp_image_buffer);
+#else
+ matrix_t input_matrix(elements_to_read, config.axes);
+#endif
+ if (!input_matrix.buffer) {
+ EIDSP_ERR(EIDSP_OUT_OF_MEM);
+ }
+ signal->get_data(ix, elements_to_read, input_matrix.buffer);
+
+ for (size_t jx = 0; jx < elements_to_read; jx++) {
+ uint32_t pixel = static_cast(input_matrix.buffer[jx]);
+
+ if (channel_count == 3) {
+ // fast code path
+ if (scale == 0.003921568859368563f && zero_point == -128 && image_scaling == EI_CLASSIFIER_IMAGE_SCALING_NONE) {
+ int32_t r = static_cast(pixel >> 16 & 0xff);
+ int32_t g = static_cast(pixel >> 8 & 0xff);
+ int32_t b = static_cast(pixel & 0xff);
+
+ output_matrix->buffer[output_ix++] = static_cast(r + zero_point);
+ output_matrix->buffer[output_ix++] = static_cast(g + zero_point);
+ output_matrix->buffer[output_ix++] = static_cast(b + zero_point);
+ }
+ // slow code path
+ else {
+ float r = static_cast(pixel >> 16 & 0xff);
+ float g = static_cast(pixel >> 8 & 0xff);
+ float b = static_cast(pixel & 0xff);
+
+ if (image_scaling == EI_CLASSIFIER_IMAGE_SCALING_NONE) {
+ r /= 255.0f;
+ g /= 255.0f;
+ b /= 255.0f;
+ }
+ else if (image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
+ r /= 255.0f;
+ g /= 255.0f;
+ b /= 255.0f;
+
+ r = (r - torch_mean[0]) / torch_std[0];
+ g = (g - torch_mean[1]) / torch_std[1];
+ b = (b - torch_mean[2]) / torch_std[2];
+ }
+
+ output_matrix->buffer[output_ix++] = static_cast(round(r / scale) + zero_point);
+ output_matrix->buffer[output_ix++] = static_cast(round(g / scale) + zero_point);
+ output_matrix->buffer[output_ix++] = static_cast(round(b / scale) + zero_point);
+ }
+ }
+ else {
+ // fast code path
+ if (scale == 0.003921568859368563f && zero_point == -128 && image_scaling == EI_CLASSIFIER_IMAGE_SCALING_NONE) {
+ int32_t r = static_cast(pixel >> 16 & 0xff);
+ int32_t g = static_cast(pixel >> 8 & 0xff);
+ int32_t b = static_cast(pixel & 0xff);
+
+ // ITU-R 601-2 luma transform
+ // see: https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.convert
+ int32_t gray = (iRedToGray * r) + (iGreenToGray * g) + (iBlueToGray * b);
+ gray >>= 16; // scale down to int8_t
+ gray += zero_point;
+ if (gray < - 128) gray = -128;
+ else if (gray > 127) gray = 127;
+ output_matrix->buffer[output_ix++] = static_cast(gray);
+ }
+ // slow code path
+ else {
+ float r = static_cast(pixel >> 16 & 0xff);
+ float g = static_cast(pixel >> 8 & 0xff);
+ float b = static_cast(pixel & 0xff);
+
+ if (image_scaling == EI_CLASSIFIER_IMAGE_SCALING_NONE) {
+ r /= 255.0f;
+ g /= 255.0f;
+ b /= 255.0f;
+ }
+ else if (image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) {
+ r /= 255.0f;
+ g /= 255.0f;
+ b /= 255.0f;
+
+ r = (r - torch_mean[0]) / torch_std[0];
+ g = (g - torch_mean[1]) / torch_std[1];
+ b = (b - torch_mean[2]) / torch_std[2];
+ }
+
+ // ITU-R 601-2 luma transform
+ // see: https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.convert
+ float v = (0.299f * r) + (0.587f * g) + (0.114f * b);
+ output_matrix->buffer[output_ix++] = static_cast(round(v / scale) + zero_point);
+ }
+ }
+ }
+
+ bytes_left -= elements_to_read;
+
+ }
+ return EIDSP_OK;
+}
+#endif // (EI_CLASSIFIER_QUANTIZATION_ENABLED == 1) && (EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_DRPAI)
+
+/**
+ * Clear all state regarding continuous audio. Invoke this function after continuous audio loop ends.
+ */
+__attribute__((unused)) int ei_dsp_clear_continuous_audio_state() {
+ if (ei_dsp_cont_current_frame) {
+ ei_free(ei_dsp_cont_current_frame);
+ }
+
+ ei_dsp_cont_current_frame = nullptr;
+ ei_dsp_cont_current_frame_size = 0;
+ ei_dsp_cont_current_frame_ix = 0;
+
+ return EIDSP_OK;
+}
+
+/**
+ * @brief Calculates the cepstral mean and variable normalization.
+ *
+ * @param matrix Source and destination matrix
+ * @param config_ptr ei_dsp_config_mfcc_t struct pointer
+ */
+__attribute__((unused)) void calc_cepstral_mean_and_var_normalization_mfcc(ei_matrix *matrix, void *config_ptr)
+{
+ ei_dsp_config_mfcc_t *config = (ei_dsp_config_mfcc_t *)config_ptr;
+
+ uint32_t original_matrix_size = matrix->rows * matrix->cols;
+
+ /* Modify rows and colums ration for matrix normalization */
+ matrix->rows = original_matrix_size / config->num_cepstral;
+ matrix->cols = config->num_cepstral;
+
+ // cepstral mean and variance normalization
+ int ret = speechpy::processing::cmvnw(matrix, config->win_size, true, false);
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: cmvnw failed (%d)\n", ret);
+ return;
+ }
+
+ /* Reset rows and columns ratio */
+ matrix->rows = 1;
+ matrix->cols = original_matrix_size;
+}
+
+/**
+ * @brief Calculates the cepstral mean and variable normalization.
+ *
+ * @param matrix Source and destination matrix
+ * @param config_ptr ei_dsp_config_mfe_t struct pointer
+ */
+__attribute__((unused)) void calc_cepstral_mean_and_var_normalization_mfe(ei_matrix *matrix, void *config_ptr)
+{
+ ei_dsp_config_mfe_t *config = (ei_dsp_config_mfe_t *)config_ptr;
+
+ uint32_t original_matrix_size = matrix->rows * matrix->cols;
+
+ /* Modify rows and colums ration for matrix normalization */
+ matrix->rows = (original_matrix_size) / config->num_filters;
+ matrix->cols = config->num_filters;
+
+ if (config->implementation_version < 3) {
+ // cepstral mean and variance normalization
+ int ret = speechpy::processing::cmvnw(matrix, config->win_size, false, true);
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: cmvnw failed (%d)\n", ret);
+ return;
+ }
+ }
+ else {
+ // normalization
+ int ret = speechpy::processing::mfe_normalization(matrix, config->noise_floor_db);
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: normalization failed (%d)\n", ret);
+ return;
+ }
+ }
+
+ /* Reset rows and columns ratio */
+ matrix->rows = 1;
+ matrix->cols = (original_matrix_size);
+}
+
+/**
+ * @brief Calculates the cepstral mean and variable normalization.
+ *
+ * @param matrix Source and destination matrix
+ * @param config_ptr ei_dsp_config_spectrogram_t struct pointer
+ */
+__attribute__((unused)) void calc_cepstral_mean_and_var_normalization_spectrogram(ei_matrix *matrix, void *config_ptr)
+{
+ ei_dsp_config_spectrogram_t *config = (ei_dsp_config_spectrogram_t *)config_ptr;
+
+ uint32_t original_matrix_size = matrix->rows * matrix->cols;
+
+ /* Modify rows and colums ration for matrix normalization */
+ matrix->cols = config->fft_length / 2 + 1;
+ matrix->rows = (original_matrix_size) / matrix->cols;
+
+ if (config->implementation_version < 3) {
+ int ret = numpy::normalize(matrix);
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: normalization failed (%d)\n", ret);
+ return;
+ }
+ }
+ else {
+ // normalization
+ int ret = speechpy::processing::spectrogram_normalization(matrix, config->noise_floor_db, config->implementation_version == 3);
+ if (ret != EIDSP_OK) {
+ ei_printf("ERR: normalization failed (%d)\n", ret);
+ return;
+ }
+ }
+
+ /* Reset rows and columns ratio */
+ matrix->rows = 1;
+ matrix->cols = (original_matrix_size);
+}
+
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+
+#endif // _EDGE_IMPULSE_RUN_DSP_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_signal_with_axes.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_signal_with_axes.h
new file mode 100644
index 0000000..ccf4291
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_signal_with_axes.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EI_CLASSIFIER_SIGNAL_WITH_AXES_H_
+#define _EI_CLASSIFIER_SIGNAL_WITH_AXES_H_
+
+#include "edge-impulse-sdk/dsp/numpy_types.h"
+#include "edge-impulse-sdk/dsp/returntypes.hpp"
+#include "edge-impulse-sdk/classifier/ei_model_types.h"
+
+#if !EIDSP_SIGNAL_C_FN_POINTER
+
+using namespace ei;
+
+class SignalWithAxes {
+public:
+ SignalWithAxes(signal_t *original_signal, uint8_t *axes, size_t axes_count, const ei_impulse_t *impulse):
+ _original_signal(original_signal), _axes(axes), _axes_count(axes_count), _impulse(impulse)
+ {
+
+ }
+
+ signal_t * get_signal() {
+ if (this->_axes_count == _impulse->raw_samples_per_frame) {
+ return this->_original_signal;
+ }
+
+ wrapped_signal.total_length = _original_signal->total_length / _impulse->raw_samples_per_frame * _axes_count;
+#ifdef __MBED__
+ wrapped_signal.get_data = mbed::callback(this, &SignalWithAxes::get_data);
+#else
+ wrapped_signal.get_data = [this](size_t offset, size_t length, float *out_ptr) {
+ return this->get_data(offset, length, out_ptr);
+ };
+#endif
+ return &wrapped_signal;
+ }
+
+ int get_data(size_t offset, size_t length, float *out_ptr) {
+ size_t offset_on_original_signal = offset / _axes_count * _impulse->raw_samples_per_frame;
+ size_t length_on_original_signal = length / _axes_count * _impulse->raw_samples_per_frame;
+
+ size_t out_ptr_ix = 0;
+
+ for (size_t ix = offset_on_original_signal; ix < offset_on_original_signal + length_on_original_signal; ix += _impulse->raw_samples_per_frame) {
+ for (size_t axis_ix = 0; axis_ix < this->_axes_count; axis_ix++) {
+ int r = _original_signal->get_data(ix + _axes[axis_ix], 1, &out_ptr[out_ptr_ix++]);
+ if (r != 0) {
+ return r;
+ }
+ }
+ }
+
+ return 0;
+ }
+
+private:
+ signal_t *_original_signal;
+ uint8_t *_axes;
+ size_t _axes_count;
+ const ei_impulse_t *_impulse;
+ signal_t wrapped_signal;
+};
+
+#endif // #if !EIDSP_SIGNAL_C_FN_POINTER
+
+#endif // _EI_CLASSIFIER_SIGNAL_WITH_AXES_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/ei_signal_with_range.h b/edgeimpulse/edge-impulse-sdk/classifier/ei_signal_with_range.h
new file mode 100644
index 0000000..7571c7e
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/ei_signal_with_range.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EI_CLASSIFIER_SIGNAL_WITH_RANGE_H_
+#define _EI_CLASSIFIER_SIGNAL_WITH_RANGE_H_
+
+#include "edge-impulse-sdk/dsp/numpy_types.h"
+#include "edge-impulse-sdk/dsp/returntypes.hpp"
+
+#if !EIDSP_SIGNAL_C_FN_POINTER
+
+using namespace ei;
+
+class SignalWithRange {
+public:
+ SignalWithRange(signal_t *original_signal, uint32_t range_start, uint32_t range_end):
+ _original_signal(original_signal), _range_start(range_start), _range_end(range_end)
+ {
+
+ }
+
+ signal_t * get_signal() {
+ if (this->_range_start == 0 && this->_range_end == this->_original_signal->total_length) {
+ return this->_original_signal;
+ }
+
+ wrapped_signal.total_length = _range_end - _range_start;
+#ifdef __MBED__
+ wrapped_signal.get_data = mbed::callback(this, &SignalWithRange::get_data);
+#else
+ wrapped_signal.get_data = [this](size_t offset, size_t length, float *out_ptr) {
+ return this->get_data(offset, length, out_ptr);
+ };
+#endif
+ return &wrapped_signal;
+ }
+
+ int get_data(size_t offset, size_t length, float *out_ptr) {
+ return _original_signal->get_data(offset + _range_start, length, out_ptr);
+ }
+
+private:
+ signal_t *_original_signal;
+ uint32_t _range_start;
+ uint32_t _range_end;
+ signal_t wrapped_signal;
+};
+
+#endif // #if !EIDSP_SIGNAL_C_FN_POINTER
+
+#endif // _EI_CLASSIFIER_SIGNAL_WITH_RANGE_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/akida.h b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/akida.h
new file mode 100644
index 0000000..205b542
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/akida.h
@@ -0,0 +1,561 @@
+/* Edge Impulse inferencing library
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef EI_CLASSIFIER_INFERENCING_ENGINE_AKIDA_H
+#define EI_CLASSIFIER_INFERENCING_ENGINE_AKIDA_H
+
+#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_AKIDA)
+
+/**
+ * @brief if we are not forcing SOFTWARE inference (simulation)
+ * then make sure we will try to use hardware
+ *
+ */
+#ifndef EI_CLASSIFIER_USE_AKIDA_SOFTWARE
+#define EI_CLASSIFIER_USE_AKIDA_HARDWARE 1
+#endif
+
+/**
+ * @brief If more than one device is present in system
+ * setting this to device index can select a proper device.
+ * e.g.: set to 1 to selct /dev/akida1
+ *
+ */
+#ifndef EI_CLASSIFIER_USE_AKIDA_HARDWARE_NO
+#define EI_CLASSIFIER_USE_AKIDA_HARDWARE_NO 0
+#endif
+
+#include "model-parameters/model_metadata.h"
+#include
+#include "tensorflow-lite/tensorflow/lite/c/common.h"
+#include "tensorflow-lite/tensorflow/lite/interpreter.h"
+#include "tensorflow-lite/tensorflow/lite/kernels/register.h"
+#include "tensorflow-lite/tensorflow/lite/model.h"
+#include "tensorflow-lite/tensorflow/lite/optional_debug_tools.h"
+#include "edge-impulse-sdk/tensorflow/lite/kernels/tree_ensemble_classifier.h"
+#include "edge-impulse-sdk/classifier/ei_model_types.h"
+#include "edge-impulse-sdk/porting/ei_classifier_porting.h"
+#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h"
+#include "tensorflow-lite/tensorflow/lite/kernels/internal/reference/softmax.h"
+#undef EI_CLASSIFIER_INFERENCING_ENGINE
+#define EI_CLASSIFIER_INFERENCING_ENGINE EI_CLASSIFIER_TFLITE_FULL
+#include "tflite_helper.h"
+#undef EI_CLASSIFIER_INFERENCING_ENGINE
+#define EI_CLASSIFIER_INFERENCING_ENGINE EI_CLASSIFIER_AKIDA
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "pybind11/embed.h"
+#include "pybind11/numpy.h"
+#include "pybind11/stl.h"
+
+namespace py = pybind11;
+
+std::stringstream engine_info;
+
+static py::module_ akida;
+static py::object model;
+static py::object model_predict;
+static py::object model_forward;
+static py::object device;
+static bool akida_initialized = false;
+static std::vector input_shape;
+static tflite::RuntimeShape softmax_shape;
+static tflite::SoftmaxParams dummy_params;
+static int model_input_bits = 0;
+static float scale;
+static int down_scale;
+typedef struct {
+ std::unique_ptr model;
+ std::unique_ptr interpreter;
+} ei_tflite_state_t;
+
+std::map ei_tflite_instances;
+
+bool init_akida(const uint8_t *model_arr, size_t model_arr_size, bool debug)
+{
+ py::module_ sys;
+ py::list path;
+ constexpr char model_file_path[] = "/tmp/akida_model.fbz";
+
+ if(debug) {
+ try {
+ sys = py::module_::import("sys");
+ path = sys.attr("path");
+ ei_printf("DEBUG: sys.path:");
+ for (py::handle p: path) {
+ ei_printf("\t%s\n", p.cast().c_str());
+ }
+ }
+ catch (py::error_already_set &e) {
+ ei_printf("ERR: Importing 'sys' library failed:\n%s\n", e.what());
+ // as it is only for debug purposes, continue
+ }
+ }
+
+ try {
+ // import Python's akida module
+ akida = py::module_::import("akida");
+ }
+ catch (py::error_already_set &e) {
+ ei_printf("ERR: Importing 'akida' library failed:\n%s\n", e.what());
+ return false;
+ }
+
+ if(debug) {
+ std::string ver = akida.attr("__version__").cast();
+ ei_printf("DEBUG: Akida version: %s\n", ver.c_str());
+ }
+
+ py::object Model = akida.attr("Model");
+
+ // deploy akida model file into temporary file
+ std::ofstream model_file(model_file_path, std::ios::out | std::ios::binary);
+ model_file.write(reinterpret_cast(model_arr), model_arr_size);
+ if(model_file.bad()) {
+ ei_printf("ERR: failed to unpack model ile into %s\n", model_file_path);
+ model_file.close();
+ return false;
+ }
+ model_file.close();
+
+ // load model
+ try {
+ model = Model(model_file_path);
+ }
+ catch (py::error_already_set &e) {
+ ei_printf("ERR: Can't load model file from %s\n", model_file_path);
+ ei_printf("ERR: %s\n", e.what());
+ return false;
+ }
+
+ // get input shape from model
+ input_shape = model.attr("input_shape").cast>();
+ //TODO: temporarily only 3D input data is supported (see note in run_nn_inference)
+ if(input_shape.size() != 3) {
+ ei_printf("ERR: Unsupported input data shape. Expected 3 dimensions got %d\n", (int)input_shape.size());
+ return false;
+ }
+ // extend input by (N, ...) - hardcoded to (1, ...)
+ input_shape.insert(input_shape.begin(), (size_t)1);
+
+ // get model input_bits
+ std::vector layers = model.attr("layers").cast>();
+ auto input_layer = layers[0];
+ model_input_bits = input_layer.attr("input_bits").cast();
+ if((model_input_bits != 8) && (model_input_bits != 4)) {
+ ei_printf("ERR: Unsupported input_bits. Expected 4 or 8 got %d\n", model_input_bits);
+ return false;
+ }
+
+ // initialize scale coefficients
+ if(model_input_bits == 8) {
+ scale = 255;
+ down_scale = 1;
+ }
+ else if(model_input_bits == 4) {
+ // these values are recommended by BrainChip
+ scale = 15;
+ down_scale = 16;
+ }
+
+ if(debug) {
+ ei_printf("INFO: Model input_bits: %d\n", model_input_bits);
+ ei_printf("INFO: Scale: %f\n", scale);
+ ei_printf("INFO: Down scale: %d\n", down_scale);
+ }
+
+#if (defined(EI_CLASSIFIER_USE_AKIDA_HARDWARE) && (EI_CLASSIFIER_USE_AKIDA_HARDWARE == 1))
+ // get list of available devices
+ py::list devices = akida.attr("devices")();
+ if(devices.empty() == true) {
+ ei_printf("ERR: AKD1000 device not found!\n");
+ return false;
+ }
+
+ if(devices.size() > 1) {
+ ei_printf("More than one device found! Using /dev/akida%d\n", EI_CLASSIFIER_USE_AKIDA_HARDWARE_NO);
+ device = devices[EI_CLASSIFIER_USE_AKIDA_HARDWARE_NO];
+ }
+ else {
+ device = devices[0];
+ }
+ //TODO: check if selected device is correct (compare versions)
+ // enable power measurement
+ device.attr("soc").attr("power_measurement_enabled") = true;
+
+ // map model to the device
+ try {
+ model.attr("map")(device);
+ }
+ catch (py::error_already_set &e) {
+ ei_printf("ERR: Can't load the ML model onto the AKD1000 SoC\n");
+ ei_printf("ERR: %s\n", e.what());
+ return false;
+ }
+#elif (defined(EI_CLASSIFIER_USE_AKIDA_SOFTWARE) && (EI_CLASSIFIER_USE_AKIDA_SOFTWARE == 1))
+#warning "Akida model will be run in SIMULATION mode (not on real hardware)!"
+#else
+#error "Neither EI_CLASSIFIER_USE_AKIDA_HARDWARE or EI_CLASSIFIER_USE_AKIDA_SOFTWARE are defined or set to 1"
+#endif
+
+ // init softmax shape
+ std::vector tmp = model.attr("output_shape").cast>();
+ softmax_shape.BuildFrom(tmp);
+ // dumy beta parameter for softmax purposes
+ dummy_params.beta = 1;
+
+ // get reference to predict function
+ model_predict = model.attr("predict");
+ model_forward = model.attr("forward");
+
+ // clear info stream
+ engine_info.str("");
+
+ return true;
+}
+
+template
+void debug_print(const std::vector vec, const int val_per_row = 3)
+{
+ int n = 0;
+ for(auto it = vec.begin(); it != vec.end(); it++) {
+ ei_printf("%f ", *it);
+ if(++n > val_per_row - 1) {
+ ei_printf("\n");
+ n = 0;
+ }
+ }
+}
+
+/**
+ * @brief Do neural network inferencing over the processed feature matrix
+ *
+ * @param impulse Struct describing impulse architecture
+ * @param fmatrix Processed matrix
+ * @param result Output classifier results
+ * @param[in] debug Debug output enable
+ *
+ * @return The ei impulse error.
+ */
+EI_IMPULSE_ERROR run_nn_inference(
+ const ei_impulse_t *impulse,
+ ei_feature_t *fmatrix,
+ uint32_t* input_block_ids,
+ uint32_t input_block_ids_size,
+ ei_impulse_result_t *result,
+ void *config_ptr,
+ bool debug)
+{
+ ei_learning_block_config_tflite_graph_t *block_config = ((ei_learning_block_config_tflite_graph_t*)config_ptr);
+ ei_config_tflite_graph_t *graph_config = ((ei_config_tflite_graph_t*)block_config->graph_config);
+
+ EI_IMPULSE_ERROR fill_res = EI_IMPULSE_OK;
+
+ // init Python embedded interpreter (should be called once!)
+ static py::scoped_interpreter guard{};
+
+ // check if we've initialized the interpreter and device?
+ if (akida_initialized == false) {
+ if(init_akida(graph_config->model, graph_config->model_size, debug) == false) {
+ return EI_IMPULSE_AKIDA_ERROR;
+ }
+ akida_initialized = true;
+ }
+
+ // according to:
+ // https://doc.brainchipinc.com/api_reference/akida_apis.html#akida.Model.predict
+ // input type is always uint8
+ py::array_t input_data(input_shape);
+
+ /*
+ * convert data to uint8 and copy features into input tensor
+ * For images RGB shape is (width, height, colors)
+ * For images BW shape is (width, height, 1)
+ * For Audio shape is (width, height, 1) - spectrogram
+ * TODO: test with other ML models/data types
+ * For details see:
+ * https://pybind11.readthedocs.io/en/stable/advanced/pycpp/numpy.html#direct-access
+ */
+ auto r = input_data.mutable_unchecked<4>();
+ float temp;
+
+ size_t mtx_size = impulse->dsp_blocks_size + impulse->learning_blocks_size;
+ for (size_t i = 0; i < input_block_ids_size; i++) {
+ uint16_t cur_mtx = input_block_ids[i];
+#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0
+ ei::matrix_t* matrix = NULL;
+
+ if (!find_mtx_by_idx(fmatrix, &matrix, cur_mtx, mtx_size)) {
+ ei_printf("ERR: Cannot find matrix with id %zu\n", cur_mtx);
+ return EI_IMPULSE_INVALID_SIZE;
+ }
+#else
+ ei::matrix_t* matrix = fmatrix[0].matrix;
+#endif
+ for (py::ssize_t x = 0; x < r.shape(1); x++) {
+ for (py::ssize_t y = 0; y < r.shape(2); y++) {
+ for(py::ssize_t z = 0; z < r.shape(3); z++) {
+ temp = (matrix->buffer[x * r.shape(2) * r.shape(3) + y * r.shape(3) + z] * scale);
+ temp = std::max(0.0f, std::min(temp, 255.0f));
+ r(0, x, y, z) = (uint8_t)(temp / down_scale);
+ }
+ }
+ }
+ }
+
+ // Run inference on AKD1000
+ uint64_t ctx_start_us = ei_read_timer_us();
+ py::array_t potentials;
+ try {
+ potentials = model_predict(input_data);
+ }
+ catch (py::error_already_set &e) {
+ ei_printf("ERR: Inference error:\n%s\n", e.what());
+ return EI_IMPULSE_AKIDA_ERROR;
+ }
+ // TODO: 'forward' is returning int8 or int32, but EI SDK supports int8 or float32 only
+ // py::array_t potentials = model_forward(input_data);
+ uint64_t ctx_end_us = ei_read_timer_us();
+
+ potentials = potentials.squeeze();
+
+ if(debug) {
+ std::string ret_str = py::str(potentials).cast();
+ ei_printf("AKD1000 raw output:\n%s\n", ret_str.c_str());
+ }
+
+ // convert to vector of floats to make further processing much easier
+ std::vector potentials_v;// = potentials.cast>();
+
+ // TODO: output conversion depending on output shape?
+ if (impulse->object_detection == false) {
+ potentials_v = potentials.squeeze().cast>();
+ }
+ else {
+ // TODO: output from AkidaNet/MobileNet is always N x M x P (3 dimensions)?
+ auto q = potentials.unchecked<>();
+ for (py::ssize_t x = 0; x < q.shape(0); x++) {
+ for (py::ssize_t y = 0; y < q.shape(1); y++) {
+ for(py::ssize_t z = 0; z < q.shape(2); z++) {
+ potentials_v.push_back(q(x, y, z));
+ }
+ }
+ }
+ }
+
+ // apply softmax, becuase Akida is not supporting this operation
+ tflite::reference_ops::Softmax(dummy_params, softmax_shape, potentials_v.data(), softmax_shape, potentials_v.data());
+
+ if(debug == true) {
+ ei_printf("After softmax:\n");
+ debug_print(potentials_v);
+ }
+
+ float active_power = 0;
+#if (defined(EI_CLASSIFIER_USE_AKIDA_HARDWARE))
+ // power measurement post-processing
+ float floor_power = device.attr("soc").attr("power_meter").attr("floor").cast();
+ py::array pwr_events = device.attr("soc").attr("power_meter").attr("events")();
+ auto events = pwr_events.mutable_unchecked();
+ for (py::ssize_t i = 0; i < events.shape(0); i++) {
+ active_power += events(i).attr("power").cast();
+ }
+ active_power = (active_power/pwr_events.size()) - floor_power;
+#endif
+
+ result->timing.classification_us = ctx_end_us - ctx_start_us;
+ result->timing.classification = (int)(result->timing.classification_us / 1000);
+
+ // clear info
+ engine_info.str("");
+ engine_info << "Power consumption: " << std::fixed << std::setprecision(2) << active_power << " mW\n";
+ engine_info << "Inferences per second: " << (1000000 / result->timing.classification_us);
+
+ if (impulse->object_detection) {
+ switch (impulse->object_detection_last_layer) {
+ case EI_CLASSIFIER_LAST_LAYER_FOMO: {
+ fill_res = fill_result_struct_f32_fomo(
+ impulse,
+ result,
+ potentials_v.data(),
+ impulse->fomo_output_size,
+ impulse->fomo_output_size);
+ break;
+ }
+ case EI_CLASSIFIER_LAST_LAYER_SSD: {
+ ei_printf("ERR: MobileNet SSD models are not implemented for Akida (%d)\n",
+ impulse->object_detection_last_layer);
+ return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
+ }
+ case EI_CLASSIFIER_LAST_LAYER_YOLOV5: {
+ ei_printf("ERR: YOLO v5 models are not implemented for Akida (%d)\n",
+ impulse->object_detection_last_layer);
+ return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
+ }
+ default: {
+ ei_printf("ERR: Unsupported object detection last layer (%d)\n",
+ impulse->object_detection_last_layer);
+ return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE;
+ }
+ }
+ }
+ else {
+ fill_res = fill_result_struct_f32(impulse, result, potentials_v.data(), debug);
+ }
+
+ return fill_res;
+}
+
+/**
+ * Construct a tflite interpreter (creates it if needed)
+ */
+static EI_IMPULSE_ERROR get_interpreter(ei_learning_block_config_tflite_graph_t *block_config, tflite::Interpreter **interpreter) {
+ // not in the map yet...
+ if (!ei_tflite_instances.count(block_config->block_id)) {
+ ei_config_tflite_graph_t *graph_config = (ei_config_tflite_graph_t*)block_config->graph_config;
+ ei_tflite_state_t *new_state = new ei_tflite_state_t();
+
+ auto new_model = tflite::FlatBufferModel::BuildFromBuffer((const char*)graph_config->model, graph_config->model_size);
+ new_state->model = std::move(new_model);
+ if (!new_state->model) {
+ ei_printf("Failed to build TFLite model from buffer\n");
+ return EI_IMPULSE_TFLITE_ERROR;
+ }
+
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+#if EI_CLASSIFIER_HAS_TREE_ENSEMBLE_CLASSIFIER
+ resolver.AddCustom("TreeEnsembleClassifier",
+ tflite::ops::custom::Register_TREE_ENSEMBLE_CLASSIFIER());
+#endif
+ tflite::InterpreterBuilder builder(*new_state->model, resolver);
+ builder(&new_state->interpreter);
+
+ if (!new_state->interpreter) {
+ ei_printf("Failed to construct interpreter\n");
+ return EI_IMPULSE_TFLITE_ERROR;
+ }
+
+ if (new_state->interpreter->AllocateTensors() != kTfLiteOk) {
+ ei_printf("AllocateTensors failed\n");
+ return EI_IMPULSE_TFLITE_ERROR;
+ }
+
+ int hw_thread_count = (int)std::thread::hardware_concurrency();
+ hw_thread_count -= 1; // leave one thread free for the other application
+ if (hw_thread_count < 1) {
+ hw_thread_count = 1;
+ }
+
+ if (new_state->interpreter->SetNumThreads(hw_thread_count) != kTfLiteOk) {
+ ei_printf("SetNumThreads failed\n");
+ return EI_IMPULSE_TFLITE_ERROR;
+ }
+
+ ei_tflite_instances.insert(std::make_pair(block_config->block_id, new_state));
+ }
+
+ auto tflite_state = ei_tflite_instances[block_config->block_id];
+ *interpreter = tflite_state->interpreter.get();
+ return EI_IMPULSE_OK;
+}
+
+
+extern "C" EI_IMPULSE_ERROR run_nn_inference_from_dsp(
+ ei_learning_block_config_tflite_graph_t *block_config,
+ signal_t *signal,
+ matrix_t *output_matrix)
+{
+ tflite::Interpreter *interpreter;
+ auto interpreter_ret = get_interpreter(block_config, &interpreter);
+ if (interpreter_ret != EI_IMPULSE_OK) {
+ return interpreter_ret;
+ }
+
+ TfLiteTensor *input = interpreter->input_tensor(0);
+ TfLiteTensor *output = interpreter->output_tensor(0);
+
+ if (!input) {
+ return EI_IMPULSE_INPUT_TENSOR_WAS_NULL;
+ }
+ if (!output) {
+ return EI_IMPULSE_OUTPUT_TENSOR_WAS_NULL;
+ }
+
+ auto input_res = fill_input_tensor_from_signal(signal, input);
+ if (input_res != EI_IMPULSE_OK) {
+ return input_res;
+ }
+
+ TfLiteStatus status = interpreter->Invoke();
+ if (status != kTfLiteOk) {
+ ei_printf("ERR: interpreter->Invoke() failed with %d\n", status);
+ return EI_IMPULSE_TFLITE_ERROR;
+ }
+
+ auto output_res = fill_output_matrix_from_tensor(output, output_matrix);
+ if (output_res != EI_IMPULSE_OK) {
+ return output_res;
+ }
+
+ // on Linux we're not worried about free'ing (for now)
+
+ return EI_IMPULSE_OK;
+}
+
+__attribute__((unused)) int extract_tflite_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float frequency) {
+
+ ei_dsp_config_tflite_t *dsp_config = (ei_dsp_config_tflite_t*)config_ptr;
+
+ ei_config_tflite_graph_t ei_config_tflite_graph_0 = {
+ .implementation_version = 1,
+ .model = dsp_config->model,
+ .model_size = dsp_config->model_size,
+ .arena_size = dsp_config->arena_size
+ };
+
+ ei_learning_block_config_tflite_graph_t ei_learning_block_config = {
+ .implementation_version = 1,
+ .block_id = dsp_config->block_id,
+ .object_detection = false,
+ .object_detection_last_layer = EI_CLASSIFIER_LAST_LAYER_UNKNOWN,
+ .output_data_tensor = 0,
+ .output_labels_tensor = 255,
+ .output_score_tensor = 255,
+ .graph_config = &ei_config_tflite_graph_0
+ };
+
+ auto x = run_nn_inference_from_dsp(&ei_learning_block_config, signal, output_matrix);
+ if (x != 0) {
+ return x;
+ }
+
+ return EIDSP_OK;
+}
+
+#endif // EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_AKIDA
+
+#endif /* EI_CLASSIFIER_INFERENCING_ENGINE_AKIDA_H */
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/anomaly.h b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/anomaly.h
new file mode 100644
index 0000000..ea7d729
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/anomaly.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EDGE_IMPULSE_INFERENCING_ANOMALY_H_
+#define _EDGE_IMPULSE_INFERENCING_ANOMALY_H_
+
+#if (EI_CLASSIFIER_HAS_ANOMALY)
+
+#include
+#include
+#include
+#include
+#include
+
+#include "edge-impulse-sdk/classifier/ei_classifier_types.h"
+#include "edge-impulse-sdk/classifier/ei_aligned_malloc.h"
+#include "edge-impulse-sdk/porting/ei_classifier_porting.h"
+#include "edge-impulse-sdk/classifier/inferencing_engines/engines.h"
+#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h"
+
+#ifdef __cplusplus
+namespace {
+#endif // __cplusplus
+
+/**
+ * Standard scaler, scales all values in the input vector
+ * Note that this *modifies* the array in place!
+ * @param input Array of input values
+ * @param scale Array of scale values (obtain from StandardScaler in Python)
+ * @param mean Array of mean values (obtain from StandardScaler in Python)
+ * @param input_size Size of input, scale and mean arrays
+ */
+void standard_scaler(float *input, const float *scale, const float *mean, size_t input_size) {
+ for (size_t ix = 0; ix < input_size; ix++) {
+ input[ix] = (input[ix] - mean[ix]) / scale[ix];
+ }
+}
+
+/**
+ * Calculate the distance between input vector and the cluster
+ * @param input Array of input values (already scaled by standard_scaler)
+ * @param input_size Size of the input array
+ * @param cluster A cluster (number of centroids should match input_size)
+ */
+float calculate_cluster_distance(float *input, size_t input_size, const ei_classifier_anom_cluster_t *cluster) {
+ // todo: check input_size and centroid size?
+
+ float dist = 0.0f;
+ for (size_t ix = 0; ix < input_size; ix++) {
+ dist += pow(input[ix] - cluster->centroid[ix], 2);
+ }
+ return sqrt(dist) - cluster->max_error;
+}
+
+/**
+ * Get minimum distance to a cluster
+ * @param input Array of input values (already scaled by standard_scaler)
+ * @param input_size Size of the input array
+ * @param clusters Array of clusters
+ * @param cluster_size Size of cluster array
+ */
+float get_min_distance_to_cluster(float *input, size_t input_size, const ei_classifier_anom_cluster_t *clusters, size_t cluster_size) {
+ float min = 1000.0f;
+ for (size_t ix = 0; ix < cluster_size; ix++) {
+ float dist = calculate_cluster_distance(input, input_size, &clusters[ix]);
+ if (dist < min) {
+ min = dist;
+ }
+ }
+ return min;
+}
+
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+
+
+/**
+ * Extracts the input values from the feature matrix based on the anomaly axes.
+ * @param fmatrix Feature matrix
+ * @param input_block_ids Array of block IDs to extract from the feature matrix
+ * @param input_block_ids_size Size of input_block_ids array
+ * @param block_config Anomaly block configuration
+ * @param input Array to store the extracted input values
+ * @return EI_IMPULSE_OK if successful, otherwise an error code
+ */
+EI_IMPULSE_ERROR extract_anomaly_input_values(
+ ei_feature_t *fmatrix,
+ uint32_t* input_block_ids,
+ uint32_t input_block_ids_size,
+ uint32_t anom_axes_size,
+ const uint16_t *anom_axis,
+ float *input)
+{
+ if (input_block_ids_size == 1) {
+ for (size_t ix = 0; ix < anom_axes_size; ix++) {
+ input[ix] = fmatrix[0].matrix->buffer[anom_axis[ix]];
+ }
+ }
+ else {
+#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0
+ ei::matrix_t* matrix = NULL;
+#endif
+ // tracks where we are now in the combined feature matrix
+ uint32_t global_buf_pos = 0;
+ // we add the size of passed matrix to it
+ uint32_t buf_offset = 0;
+ // current index of input feature
+ uint32_t input_pos = 0;
+
+ for (size_t i = 0; i < input_block_ids_size; i++) {
+#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0
+ size_t cur_mtx = input_block_ids[i];
+ if (!find_mtx_by_idx(fmatrix, &matrix, cur_mtx, anom_axes_size)) {
+ ei_printf("ERR: Cannot find matrix with id %zu\n", cur_mtx);
+ return EI_IMPULSE_INVALID_SIZE;
+ }
+#else
+ ei::matrix_t* matrix = fmatrix[0].matrix;
+#endif
+ for (size_t ix = 0; ix < anom_axes_size; ix++) {
+ global_buf_pos = anom_axis[input_pos];
+ if (global_buf_pos <= buf_offset + (matrix->rows * matrix->cols)) {
+ input[input_pos] = matrix->buffer[anom_axis[input_pos] - buf_offset];
+ input_pos++;
+ if (input_pos >= anom_axes_size) { goto end; }
+ }
+ else {
+ break;
+ }
+ }
+ buf_offset += matrix->rows * matrix->cols;
+ }
+ end:;
+ }
+ return EI_IMPULSE_OK;
+}
+
+
+EI_IMPULSE_ERROR run_kmeans_anomaly(
+ const ei_impulse_t *impulse,
+ ei_feature_t *fmatrix,
+ uint32_t* input_block_ids,
+ uint32_t input_block_ids_size,
+ ei_impulse_result_t *result,
+ void *config_ptr,
+ bool debug = false)
+{
+ ei_learning_block_config_anomaly_kmeans_t *block_config = (ei_learning_block_config_anomaly_kmeans_t*)config_ptr;
+
+ uint64_t anomaly_start_ms = ei_read_timer_ms();
+
+ float *input = (float*)ei_malloc(block_config->anom_axes_size * sizeof(float));
+ if (!input) {
+ ei_printf("Failed to allocate memory for anomaly input buffer");
+ return EI_IMPULSE_OUT_OF_MEMORY;
+ }
+
+ extract_anomaly_input_values(fmatrix, input_block_ids, input_block_ids_size, block_config->anom_axes_size, block_config->anom_axis, input);
+
+ standard_scaler(input, block_config->anom_scale, block_config->anom_mean, block_config->anom_axes_size);
+ float anomaly = get_min_distance_to_cluster(
+ input, block_config->anom_axes_size, block_config->anom_clusters, block_config->anom_cluster_count);
+
+ uint64_t anomaly_end_ms = ei_read_timer_ms();
+
+ if (debug) {
+ ei_printf("Anomaly score (time: %d ms.): ", static_cast(anomaly_end_ms - anomaly_start_ms));
+ ei_printf_float(anomaly);
+ ei_printf("\n");
+ }
+
+ result->timing.anomaly = anomaly_end_ms - anomaly_start_ms;
+ result->anomaly = anomaly;
+ ei_free(input);
+
+ return EI_IMPULSE_OK;
+}
+
+#if (EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_NONE)
+EI_IMPULSE_ERROR run_gmm_anomaly(
+ const ei_impulse_t *impulse,
+ ei_feature_t *fmatrix,
+ uint32_t* input_block_ids,
+ uint32_t input_block_ids_size,
+ ei_impulse_result_t *result,
+ void *config_ptr,
+ bool debug = false)
+{
+ ei_learning_block_config_anomaly_gmm_t *block_config = (ei_learning_block_config_anomaly_gmm_t*)config_ptr;
+
+ ei_learning_block_config_tflite_graph_t ei_learning_block_config_gmm = {
+ .implementation_version = 1,
+ .block_id = 0,
+ .object_detection = 0,
+ .object_detection_last_layer = EI_CLASSIFIER_LAST_LAYER_UNKNOWN,
+ .output_data_tensor = 0,
+ .output_labels_tensor = 0,
+ .output_score_tensor = 0,
+ .quantized = 0,
+ .compiled = 0,
+ .graph_config = block_config->graph_config
+ };
+
+ ei_impulse_result_t anomaly_result = { 0 };
+
+ std::unique_ptr input_ptr(new ei_feature_t[1]);
+ ei_feature_t* input = input_ptr.get();
+
+ memset(&anomaly_result, 0, sizeof(ei_impulse_result_t));
+
+#if EI_CLASSIFIER_HAS_VISUAL_ANOMALY
+ input = fmatrix;
+#else
+ std::unique_ptr matrix_ptr(new ei::matrix_t(1, block_config->anom_axes_size));
+ input[0].matrix = matrix_ptr.get();
+ input[0].blockId = 0;
+
+ extract_anomaly_input_values(fmatrix, input_block_ids, input_block_ids_size, block_config->anom_axes_size, block_config->anom_axis, input[0].matrix->buffer);
+ input_block_ids_size = 1;
+#endif
+
+ EI_IMPULSE_ERROR res = run_nn_inference(impulse, input, input_block_ids, input_block_ids_size, &anomaly_result, (void*)&ei_learning_block_config_gmm, debug);
+ if (res != EI_IMPULSE_OK) {
+ return res;
+ }
+
+ if (debug) {
+ ei_printf("Anomaly score (time: %d ms.): ", anomaly_result.timing.classification);
+ ei_printf_float(anomaly_result.classification[0].value);
+ ei_printf("\n");
+ }
+
+ result->timing.anomaly = anomaly_result.timing.classification;
+
+#if EI_CLASSIFIER_HAS_VISUAL_ANOMALY
+ result->visual_ad_grid_cells = anomaly_result.visual_ad_grid_cells;
+ result->visual_ad_count = anomaly_result.visual_ad_count;
+ result->visual_ad_result.mean_value = anomaly_result.visual_ad_result.mean_value;
+ result->visual_ad_result.max_value = anomaly_result.visual_ad_result.max_value;
+#else
+ result->anomaly = anomaly_result.classification[0].value;
+#endif
+
+ return EI_IMPULSE_OK;
+}
+#endif // (EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_NONE)
+
+#endif //#if (EI_CLASSIFIER_HAS_ANOMALY == 1)
+#endif // _EDGE_IMPULSE_INFERENCING_ANOMALY_H_
diff --git a/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/drpai.h b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/drpai.h
new file mode 100644
index 0000000..ec1cc65
--- /dev/null
+++ b/edgeimpulse/edge-impulse-sdk/classifier/inferencing_engines/drpai.h
@@ -0,0 +1,753 @@
+/*
+ * Copyright (c) 2022 EdgeImpulse Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+#ifndef _EI_CLASSIFIER_INFERENCING_ENGINE_DRPAI_H_
+#define _EI_CLASSIFIER_INFERENCING_ENGINE_DRPAI_H_
+
+#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI)
+
+/*****************************************
+ * includes
+ ******************************************/
+#include
+#include
+#include
+#include
+#include
+#include
+#include