Skip to content

Commit

Permalink
Fix rebase changes, update documentation
Browse files Browse the repository at this point in the history
  • Loading branch information
vilukissa68 committed Dec 10, 2024
1 parent 9160475 commit 8823714
Show file tree
Hide file tree
Showing 5 changed files with 102 additions and 85 deletions.
20 changes: 10 additions & 10 deletions examples/hpc-c/tvm-hpc/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
This build is only tested on python3.11.

# Installing dependencies
## Installing TVM
## Installing TVM with Headsail backend
Get sources
``` sh
git clone --recursive --depth=1 https://github.com/soc-hub-fi/headsail-tvm tvm
Expand All @@ -19,7 +19,6 @@ cp <config.cmake-in-tvm-hpc-directory> <path-to-tvm-repository>/build/config.cma
For example
``` sh
cp headsail-vp/examples/hpc-c/tvm-hpc/config.cmake tvm/build/config.cmake

```

To enable codegen modify config.cmake file in the build directory by setting line 162 value to pointing at llvm-config.
Expand All @@ -31,7 +30,7 @@ set(USE_HEADSAIL ON)

### Building TVM

Build in the previously greated build directory
Build in the previously created build directory
``` sh
cd build
cmake ..
Expand All @@ -49,7 +48,7 @@ More information in https://tvm.apache.org/docs/install/from_source.html


## Python dependencies
Python dependencies are needed for building TVM models from onnx graphs and must be available during tvm-hpc compilation.
Python dependencies are needed for building TVM models from TFLite graphs and must be available during tvm-hpc compilation.

Install python dependencies for TVM
``` sh
Expand All @@ -67,7 +66,7 @@ pip install -r requirements.txt
# Building project

## Fetching the datasets
To run Tinyperf benchmark we need to obtain the needed datasets. Easiest way to do this is by runnning the `get_testing_data` script.
To run Tinyperf benchmark we need to obtain the needed datasets. Easiest way to do this is by running the `get_testing_data` script.
```sh
./get_testing_data.sh
```
Expand All @@ -77,13 +76,14 @@ In project folder (tvm-hpc)
```sh
mkdir build
cd build
cmake ..
cmake .. -DUSE_PERF_KEYWORD_SPOTTING=ON -DUSE_ACCELERATOR=ON
make
```
This creates a binary called headsail-tvm
This creates a binary called headsail-tvm with model for MLPerf Tiny Keyword Spotting task embedded, with convolutions assigned for the DLA.
Other options for models are: `[-DUSE_PERF_IMAGE_CLASSIFICATION=ON,-DUSE_PERF_VISUAL_WAKE_WORDS=ON]`. The use of accelerator is controlled with the `-DUSE_ACCELERATOR=[ON/OFF]` flag.

# Running in renode
After succesful build, the resulting binary can be run with Headsail's virtual prototype in Renode
# Running in Renode
After successful build, the resulting binary can be run with Headsail's virtual prototype in Renode
```sh
cd /headsail-vp/scripts
./run_on_hpc.sh ../examples/hpc-c/tvm-hpc/build/headsail-tvm
Expand All @@ -92,5 +92,5 @@ cd /headsail-vp/scripts
## Running the benchmark
To run the TinyPerf benchmark run the tiny_perf_benchmark.py script with the `-b` options with the wanted benchmark `[ic, kws, vww]`.
```sh
python tiny_perf_benchmark.py -b ic
python tiny_perf_benchmark.py -b kws
```
102 changes: 70 additions & 32 deletions examples/hpc-c/tvm-hpc/tiny_perf_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,36 +15,40 @@
import time
import numpy as np

UART = '/tmp/uart0'
UART = "/tmp/uart0"
ROOT_PATH = Path(__file__).parents[0]
DATA_DIR = ROOT_PATH / "dev_data"
KWS_DATA_DIR = DATA_DIR / "kws01"
VWW_DATA_DIR = DATA_DIR / "vw_coco2014_96"
VWW_NON_PERSON_DATA_DIR = VWW_DATA_DIR / "non_person"
VWW_NON_PERSON_DATA_DIR = VWW_DATA_DIR / "non_person"
VWW_PERSON_DATA_DIR = VWW_DATA_DIR / "person"
IC_DATA_DIR = DATA_DIR / "cifar-10-batches-py"
AD_DATA_DIR = DATA_DIR / "ToyCar" / "test"


# UTILS
def print_matrix(arr, format_type='signed'):
def print_matrix(arr, format_type="signed"):
for row in arr:
for elem in row:
if format_type == 'signed':
if format_type == "signed":
print(f"{int(elem):d}", end="\t") # Signed decimal
elif format_type == 'unsigned':
elif format_type == "unsigned":
print(f"{int(elem) & 0xFFFFFFFF:d}", end="\t") # Unsigned decimal
elif format_type == 'hex':
elif format_type == "hex":
print(f"{int(elem) & 0xFFFFFFFF:08x}", end="\t") # Hexadecimal
else:
raise ValueError("Invalid format_type. Use 'signed', 'unsigned', or 'hex'.")
raise ValueError(
"Invalid format_type. Use 'signed', 'unsigned', or 'hex'."
)
print()


def accuracy_report(gt, prediction):
print("Accuracy: {:.3f}".format(accuracy_score(gt, prediction)))
print("Confusion matrix:\n{}".format(confusion_matrix(gt, prediction)))
print(classification_report(gt, prediction))


def send_stimulus(data, label=None):
print("Writing {} bytes as stimulus...".format(len(data)))
if label is not None:
Expand All @@ -54,19 +58,20 @@ def send_stimulus(data, label=None):
ser.write(bytes(data))
ser.close()


def wait_for_result():
print("Waiting for results...")
ser = serial.Serial(UART, 9600)
ser = serial.Serial(UART, 9600)
output = ser.readline()
while output != b'Prediction:\n':
while output != b"Prediction:\n":
output = ser.readline()
output = ser.readline()
ser.close()
output = bytearray(output)
results = []
for x in output:
results.append(((x & 0xff) ^ 0x80) - 0x80) # Append signed
results = results[:-1] # Remove line break
results.append(((x & 0xFF) ^ 0x80) - 0x80) # Append signed
results = results[:-1] # Remove line break
print(results)
print("Predicted class: {}".format(np.argmax(results)))
print("\n")
Expand All @@ -79,14 +84,20 @@ def read_kws_file(path):
content = file.read()
return content


def get_kws_stimulus():
df = pd.read_csv(KWS_DATA_DIR / "y_labels.csv", names=["filename", "no_classes", "class"])
df = pd.read_csv(
KWS_DATA_DIR / "y_labels.csv", names=["filename", "no_classes", "class"]
)
data = read_kws_file(KWS_DATA_DIR / df["filename"][0])
print("Expected label:", df["class"][0])
return data


def run_kws(total_samples=200):
df = pd.read_csv(KWS_DATA_DIR / "y_labels.csv", names=["filename", "no_classes", "class"])
df = pd.read_csv(
KWS_DATA_DIR / "y_labels.csv", names=["filename", "no_classes", "class"]
)

class_counts = df["class"].value_counts()

Expand Down Expand Up @@ -116,45 +127,67 @@ def run_kws(total_samples=200):
balanced_df = balanced_df.sample(frac=1, random_state=42)

predictions = []
for (i, filename) in enumerate(balanced_df["filename"]):
for i, filename in enumerate(balanced_df["filename"]):
data = read_kws_file(KWS_DATA_DIR / filename)
send_stimulus(data, df["class"][i])
predictions.append(np.argmax(wait_for_result()))

# Mid run report
accuracy_report(balanced_df["class"][:len(predictions)], predictions)
accuracy_report(balanced_df["class"][: len(predictions)], predictions)

print("Final accuracy report for Keyword Spotting:")
accuracy_report(balanced_df["class"], predictions)


# VWW
def read_vww_file(path):
#Image loading and preprocessing
# Image loading and preprocessing
image = tf.io.read_file(str(path))
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [96,96])
image = tf.image.resize(image, [96, 96])
image = np.array(image, dtype=np.int8)
image = image - 128
return image.astype(np.int8)


def get_vww_stimulus():
items = os.listdir(VWW_NON_PERSON_DATA_DIR)
non_persons = [item for item in items if os.path.isfile(os.path.join(VWW_NON_PERSON_DATA_DIR, item)) and item.startswith("COCO_val")]
non_persons = [
item
for item in items
if os.path.isfile(os.path.join(VWW_NON_PERSON_DATA_DIR, item))
and item.startswith("COCO_val")
]
data = read_vww_file(VWW_NON_PERSON_DATA_DIR / non_persons[0])
print("Expected label: 1")
return data.tobytes()


def run_vww(total_samples=100):
items = os.listdir(VWW_NON_PERSON_DATA_DIR)
non_persons = [item for item in items if os.path.isfile(os.path.join(VWW_NON_PERSON_DATA_DIR, item)) and item.startswith("COCO_val")]
non_persons = [
item
for item in items
if os.path.isfile(os.path.join(VWW_NON_PERSON_DATA_DIR, item))
and item.startswith("COCO_val")
]
non_persons.sort()

items = os.listdir(VWW_PERSON_DATA_DIR)
persons = [item for item in items if os.path.isfile(os.path.join(VWW_PERSON_DATA_DIR, item)) and item.startswith("COCO_val")]
persons = [
item
for item in items
if os.path.isfile(os.path.join(VWW_PERSON_DATA_DIR, item))
and item.startswith("COCO_val")
]
persons.sort()

print("Number of non_persons", len(non_persons))
print("Number of persons", len(persons))
print("Input shape: ", np.shape(read_vww_file(VWW_NON_PERSON_DATA_DIR / non_persons[0])))
print(
"Input shape: ",
np.shape(read_vww_file(VWW_NON_PERSON_DATA_DIR / non_persons[0])),
)

# Calculate balanced number of samples for each category
samples_per_class = min(len(non_persons), len(persons), total_samples // 2)
Expand Down Expand Up @@ -186,22 +219,26 @@ def run_vww(total_samples=100):
print("Final accuracy report for Visual Wakeup Word:")
accuracy_report(gt, predictions)


# IC
def get_ic_stimulus():
import pickle

with open(IC_DATA_DIR / "test_batch", "rb") as file:
data = pickle.load(file, encoding='bytes')
print("Expected label:", data[b'labels'][0])
return data[b'data'][0].tobytes()
data = pickle.load(file, encoding="bytes")
print("Expected label:", data[b"labels"][0])
return data[b"data"][0].tobytes()


def run_ic(total_samples=200):
import pickle

with open(IC_DATA_DIR / "test_batch", "rb") as file:
data = pickle.load(file, encoding='bytes')
print("Input shape: {}".format(np.shape(data[b'data'][0])))
data = pickle.load(file, encoding="bytes")
print("Input shape: {}".format(np.shape(data[b"data"][0])))

images = data[b'data']
labels = data[b'labels']
images = data[b"data"]
labels = data[b"labels"]
print(labels)

class_samples = {i: [] for i in range(10)}
Expand Down Expand Up @@ -234,8 +271,8 @@ def run_ic(total_samples=200):
print(selected_labels)

# Run inference on samples
for (i, image) in enumerate(selected_images):
#FROM CHW to HWC
for i, image in enumerate(selected_images):
# FROM CHW to HWC
image = np.reshape(image, (3, 32, 32))
image = np.rollaxis(image, 0, 3)
image = image - 128
Expand All @@ -244,11 +281,11 @@ def run_ic(total_samples=200):
send_stimulus(image.tobytes(), label)

# Wait for inference result
prediction = (np.argmax(wait_for_result()))
prediction = np.argmax(wait_for_result())
predictions.append(prediction)

# Mid-run report
accuracy_report(selected_labels[:len(predictions)], predictions)
accuracy_report(selected_labels[: len(predictions)], predictions)

print("Final accuracy report for Image Classification:")
accuracy_report(selected_labels, predictions)
Expand Down Expand Up @@ -277,5 +314,6 @@ def main():
else:
print("Bad benchmark! Available benchmarks are: kws, vww, ic")


if __name__ == "__main__":
main()
Loading

0 comments on commit 8823714

Please sign in to comment.