-
Notifications
You must be signed in to change notification settings - Fork 0
/
RTAudioClassification.lf
44 lines (40 loc) · 1.48 KB
/
RTAudioClassification.lf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
/**
* @file RTAudioClassification.lf
* @author Vincenzo Barbuto
* @brief Examples of how to use the Audio Classification reactor.
*/
target Python
import Microphone from "lib/Input.lf"
import AudioClassifier from "lib/Audio.lf"
/**
* @brief This reactor tests the audio classification functionality by connecting a Microphone, an
* AudioClassifier, and an Actuator reactor.
*
* The `Actuator` reactor receives the classification results and inference time from the
* `AudioClassifier` and prints them to the console.
*
* The `main` reactor creates instances of the `Microphone`, `AudioClassifier`, and `Actuator`
* reactors, and connects them together to form the test pipeline.
*
* @note Remember to set the `model` parameter to the absolute path of the audio classification
* model you want to use.
*/
reactor Actuator {
input results
input inference_time
reaction(results, inference_time) {=
res = results.value
print("-"*70)
for i, result in enumerate(res):
print(f"{i}) Head: {result['head']}, Index: {result['index']}, Label: {result['label']}, Confidence: {result['score']*100:.2f}%")
print(f"Time per inference: {inference_time.value} ms")
=}
}
main reactor {
mic = new Microphone(dtype="float32")
cls = new AudioClassifier(
model = {= os.path.join(os.getcwd(),"models/audio/yamnet/yamnet.tflite") =})
actuator = new Actuator()
mic.audio_data -> cls.input_data
cls.results, cls.inference_time -> actuator.results, actuator.inference_time
}