From 3aadfc21f7b19cca2818a8a0924ce124049f1e1f Mon Sep 17 00:00:00 2001 From: Matevz Morato Date: Thu, 9 Feb 2023 04:16:31 +0100 Subject: [PATCH] Add benchmark scripts --- .../tmp_s3/benchmarks/Camera/bench_all.py | 83 +++++++++ .../tmp_s3/benchmarks/Camera/bench_color.py | 31 ++++ .../tmp_s3/benchmarks/Camera/bench_mono.py | 27 +++ .../tmp_s3/benchmarks/ImageManip/manip.py | 47 +++++ .../NeuralNetwork/benchmark_nn_and_parser.py | 173 ++++++++++++++++++ .../VideoEncoder/bench_encoder_live_camera.py | 34 ++++ .../bench_encoder_resend_frame.py | 37 ++++ .../VideoEncoder/bench_encoders_all.py | 65 +++++++ 8 files changed, 497 insertions(+) create mode 100644 examples/tmp_s3/benchmarks/Camera/bench_all.py create mode 100644 examples/tmp_s3/benchmarks/Camera/bench_color.py create mode 100644 examples/tmp_s3/benchmarks/Camera/bench_mono.py create mode 100644 examples/tmp_s3/benchmarks/ImageManip/manip.py create mode 100644 examples/tmp_s3/benchmarks/NeuralNetwork/benchmark_nn_and_parser.py create mode 100644 examples/tmp_s3/benchmarks/VideoEncoder/bench_encoder_live_camera.py create mode 100644 examples/tmp_s3/benchmarks/VideoEncoder/bench_encoder_resend_frame.py create mode 100644 examples/tmp_s3/benchmarks/VideoEncoder/bench_encoders_all.py diff --git a/examples/tmp_s3/benchmarks/Camera/bench_all.py b/examples/tmp_s3/benchmarks/Camera/bench_all.py new file mode 100644 index 000000000..f7f3824e2 --- /dev/null +++ b/examples/tmp_s3/benchmarks/Camera/bench_all.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +import depthai as dai +import time + +color_atributes = ["preview", "video", "isp"] +video_size=(4000,3000) +preview_size=(500,500) + +cams_to_test = [ + { + "name": "left", + "mono": True, + "socket": dai.CameraBoardSocket.LEFT, + "resolution": dai.MonoCameraProperties.SensorResolution.THE_800_P, + "fps": 30, + }, + { + "name": "right", + "mono": True, + "socket": dai.CameraBoardSocket.RIGHT, + "resolution": dai.MonoCameraProperties.SensorResolution.THE_800_P, + "fps": 30, + }, + { + "name": "rgb", + "mono": False, + "socket": dai.CameraBoardSocket.RGB, + "resolution": dai.ColorCameraProperties.SensorResolution.THE_4000X3000, + "fps": 30, + }, + { + "name": "night", + "mono": False, + "socket": dai.CameraBoardSocket.CAM_D, + "resolution": dai.ColorCameraProperties.SensorResolution.THE_4000X3000, + "fps": 30, + }, +] + +def add_cam(pipeline, props, colorOut): + xoutReport = pipeline.create(dai.node.XLinkOut) + benchmarkIn = pipeline.create(dai.node.BenchmarkIn) + benchmarkIn.setNumMessagesToGet(10) + streamName = props["name"] + " - " + colorOut + xoutReport.setStreamName(streamName) + if props["mono"]: + cameraNode = pipeline.create(dai.node.MonoCamera) + cameraNode.out.link(benchmarkIn.input) + else: + cameraNode = pipeline.create(dai.node.ColorCamera) + # Change this to preview/video whatever is needed + if colorOut == "isp": + cameraNode.isp.link(benchmarkIn.input) + elif colorOut == "video": + cameraNode.setVideoSize(video_size) + cameraNode.video.link(benchmarkIn.input) + elif colorOut == "preview": + cameraNode.setPreviewSize(preview_size) + cameraNode.preview.link(benchmarkIn.input) + cameraNode.setResolution(props["resolution"]) + cameraNode.setFps(props["fps"]) + cameraNode.setBoardSocket(props["socket"]) + benchmarkIn.report.link(xoutReport.input) + return streamName + +for color_out in color_atributes: + # Create pipeline + pipeline = dai.Pipeline() + streamNames = [] + for props in cams_to_test: + streamName = add_cam(pipeline, props, color_out) + streamNames.append(streamName) + + # Connect to device and start pipeline + with dai.Device(pipeline) as device: + # Output queue to receive the report back from the device + qOuts = [device.getOutputQueue(name=streamName) for streamName in streamNames] + reports = [qOut.get() for qOut in qOuts] + for i, report in enumerate(reports): + print(f"Stream: {streamNames[i]}") + print(f"Got {report.fps} FPS, in {report.timeTotal} seconds for {report.numMessagesReceived} messages.") + time.sleep(7) + print("----------------------------------------------\n\n\n") diff --git a/examples/tmp_s3/benchmarks/Camera/bench_color.py b/examples/tmp_s3/benchmarks/Camera/bench_color.py new file mode 100644 index 000000000..4faaf55a2 --- /dev/null +++ b/examples/tmp_s3/benchmarks/Camera/bench_color.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +import depthai as dai + +USE_VIDE0 = False +VIDEO_SIZE = (1920, 1080) + +# Create pipeline +pipeline = dai.Pipeline() + +cameraNode = pipeline.create(dai.node.ColorCamera) +xoutReport = pipeline.create(dai.node.XLinkOut) +benchmarkIn = pipeline.create(dai.node.BenchmarkIn) + +cameraNode.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4000X3000) +cameraNode.setVideoSize(VIDEO_SIZE) +benchmarkIn.setNumMessagesToGet(30) +xoutReport.setStreamName("outReport") + +# Linking +if USE_VIDE0: + cameraNode.video.link(benchmarkIn.input) +else: + cameraNode.isp.link(benchmarkIn.input) +benchmarkIn.report.link(xoutReport.input) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + # Output queue to receive the report back from the device + qOut = device.getOutputQueue(name="outReport") + report = qOut.get() + print(f"Got {report.fps} FPS, in {report.timeTotal} s for {report.numMessagesReceived} messages.") diff --git a/examples/tmp_s3/benchmarks/Camera/bench_mono.py b/examples/tmp_s3/benchmarks/Camera/bench_mono.py new file mode 100644 index 000000000..3c163ac88 --- /dev/null +++ b/examples/tmp_s3/benchmarks/Camera/bench_mono.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +import depthai as dai + + +# Create pipeline +pipeline = dai.Pipeline() +cameraNode = pipeline.create(dai.node.MonoCamera) +cameraNode.setResolution(dai.MonoCameraProperties.SensorResolution.THE_800_P) +cameraNode.setFps(120) + +xoutReport = pipeline.create(dai.node.XLinkOut) +benchmarkIn = pipeline.create(dai.node.BenchmarkIn) + +benchmarkIn.setNumMessagesToGet(50) +xoutReport.setStreamName("outReport") + +# Linking +cameraNode.out.link(benchmarkIn.input) +benchmarkIn.report.link(xoutReport.input) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + # Output queue to receive the report back from the device + qOut = device.getOutputQueue(name="outReport") + report = qOut.get() + print(f"Got {report.fps} FPS, in {report.timeTotal} s for {report.numMessagesReceived} messages.") + diff --git a/examples/tmp_s3/benchmarks/ImageManip/manip.py b/examples/tmp_s3/benchmarks/ImageManip/manip.py new file mode 100644 index 000000000..38680b6c1 --- /dev/null +++ b/examples/tmp_s3/benchmarks/ImageManip/manip.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +import depthai as dai + + +# Create pipeline +pipeline = dai.Pipeline() + +# xinFrame = pipeline.create(dai.node.XLinkIn) +xoutReport = pipeline.create(dai.node.XLinkOut) +benchmarkOut = pipeline.create(dai.node.BenchmarkOut) +benchmarkIn = pipeline.create(dai.node.BenchmarkIn) +camRgb = pipeline.create(dai.node.ColorCamera) +imageManip = pipeline.create(dai.node.ImageManip) + +benchmarkIn.setNumMessagesToGet(20) + +camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4000X3000) +camRgb.setVideoSize(800, 600) + +imageManipConfig = dai.ImageManipConfig() +imageManip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) +imageManip.setMaxOutputFrameSize(416 * 416 * 3) +imageManip.inputImage.setBlocking(False) +imageManip.inputImage.setQueueSize(2) +imageManip.initialConfig.setResize((416, 416)) + +xoutReport.setStreamName("outReport") + +# Linking +camRgb.video.link(benchmarkOut.input) +benchmarkOut.out.link(imageManip.inputImage) + +imageManip.out.link(benchmarkIn.input) +benchmarkIn.report.link(xoutReport.input) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + + # Output queue to receive the report back from the device + qOut = device.getOutputQueue(name="outReport") + + frame = None + detections = [] + + report = qOut.get() + print(f"Got {report.fps} FPS, in {report.timeTotal} s for {report.numMessagesReceived} messages.") diff --git a/examples/tmp_s3/benchmarks/NeuralNetwork/benchmark_nn_and_parser.py b/examples/tmp_s3/benchmarks/NeuralNetwork/benchmark_nn_and_parser.py new file mode 100644 index 000000000..f7c946cf9 --- /dev/null +++ b/examples/tmp_s3/benchmarks/NeuralNetwork/benchmark_nn_and_parser.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python3 + +from pathlib import Path +import sys +import cv2 +import depthai as dai +import numpy as np +from time import monotonic + + +BENCH_BOTH = True # Benchmark both parser and neural network +FPS = 0 # Limit FPS (useful to measure latency at a certain FPS, to max out FPS set to 0) +PATH_INPUT_IMAGE = "/home/matevz/Pictures/croped.bmp" # Path to the image +# tiny yolo v4 label texts +labelMap = [ + "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", + "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", + "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", + "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", + "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", + "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", + "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", + "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", + "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", + "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", + "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", + "teddy bear", "hair drier", "toothbrush" +] + +# Create pipeline +pipeline = dai.Pipeline() + +# Define sources and outputs +nn = pipeline.create(dai.node.NeuralNetwork) +det = pipeline.create(dai.node.DetectionParser) + +xinFrame = pipeline.create(dai.node.XLinkIn) +nnOut = pipeline.create(dai.node.XLinkOut) +xOutReport = pipeline.create(dai.node.XLinkOut) +xOutReportDet = pipeline.create(dai.node.XLinkOut) +benchmarkOut = pipeline.create(dai.node.BenchmarkOut) +benchmarkOut.setFps(FPS) +benchmarkIn = pipeline.create(dai.node.BenchmarkIn) +benchmarkInDetection = pipeline.create(dai.node.BenchmarkIn) +# benchmarkOut.setNumMessagesToSend(1) +benchmarkIn.setNumMessagesToGet(100) +benchmarkInDetection.setNumMessagesToGet(100) +xOutReport.setStreamName("outReport") +xOutReportDet.setStreamName("outReportDetection") + +SIZE_W = 416 +SIZE_H = 416 +xinFrame.setStreamName("inFrame") +nnOut.setStreamName("nn") +nnPath = str((Path(__file__).parent / Path('../../../models/yolov6n_416x416_openvino2022.1_vpux.blob')).resolve().absolute()) +nn.setBlobPath(nnPath) +nn.setNumInferenceThreads(4) +nn.setNumShavesPerInferenceThread(16) + +det.setNNFamily(dai.DetectionNetworkType.YOLO) +det.setConfidenceThreshold(0.3) +det.setNumClasses(80) +det.setCoordinateSize(4) + +det.setInputImageSize([SIZE_H, SIZE_W]) +det.setIouThreshold(0.45) + +if BENCH_BOTH: + # Linking + xinFrame.out.link(benchmarkOut.input) + + det.out.link(benchmarkInDetection.input) + benchmarkInDetection.passthrough.link(nnOut.input) + benchmarkInDetection.report.link(xOutReportDet.input) + + benchmarkOut.out.link(nn.input) + nn.out.link(benchmarkIn.input) + benchmarkIn.passthrough.link(det.input) + benchmarkIn.report.link(xOutReport.input) + nn.input.setQueueSize(1) + nn.input.setBlocking(True) + det.input.setQueueSize(10) + det.input.setBlocking(False) + nnOut.input.setBlocking(False) +else: + # Linking + xinFrame.out.link(nn.input) + + det.out.link(benchmarkInDetection.input) + benchmarkInDetection.passthrough.link(nnOut.input) + benchmarkInDetection.report.link(xOutReportDet.input) + + benchmarkOut.out.link(det.input) + nn.out.link(benchmarkOut.input) + benchmarkIn.passthrough.link(det.input) + benchmarkIn.report.link(xOutReport.input) + nn.input.setQueueSize(1) + det.input.setQueueSize(10) + det.input.setBlocking(False) + nnOut.input.setBlocking(False) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + # Input queue will be used to send video frames to the device. + qIn = device.getInputQueue(name="inFrame") + # Output queue will be used to get nn data from the video frames. + qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False) + qOut = device.getOutputQueue(name="outReport") + qOutDet = device.getOutputQueue(name="outReportDetection") + frame = None + detections = [] + + def frameNorm(frame, bbox): + normVals = np.full(len(bbox), frame.shape[0]) + normVals[::2] = frame.shape[1] + return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int) + + + def to_planar(arr: np.ndarray, shape: tuple) -> np.ndarray: + return cv2.resize(arr, shape).transpose(2, 0, 1).flatten() + + + def resize(arr: np.ndarray, shape: tuple) -> np.ndarray: + return cv2.resize(arr, shape).flatten() + + + def displayFrame(name, frame): + for detection in detections: + bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax)) + cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, + 255) + cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) + + cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2) + # Show the frame + cv2.imshow(name, frame) + + img = dai.ImgFrame() + out2 = cv2.imread(PATH_INPUT_IMAGE) # Insert a picture here + frame = out2 + img.setData(to_planar(out2, (SIZE_W, SIZE_H))) + img.setTimestamp(monotonic()) + img.setWidth(SIZE_W) + img.setHeight(SIZE_H) + img.setType(dai.ImgFrame.Type.RGB888i) + qIn.send(img) + + while True: + inDet = qDet.tryGet() + report = qOut.tryGet() + reportDet = qOutDet.tryGet() + if report is not None: + print(f"FPS NN is {report.fps}, average latency is {report.averageLatency * 1000} ms") + + if reportDet is not None: + print(f"FPS Det is {reportDet.fps}, average latency is {reportDet.averageLatency * 1000} ms") + + if report is not None or reportDet is not None: + break + if inDet is not None: + detections = inDet.detections + + if frame is not None: + displayFrame("rgb", frame) + + if cv2.waitKey(1) == ord('q'): + break + if reportDet is None: + reportDet = qOutDet.get() + print(f"FPS Det is {reportDet.fps}, average latency is {reportDet.averageLatency * 1000} ms") + if report is None: + report = qOut.get() + print(f"FPS NN is {report.fps}, average latency is {report.averageLatency * 1000} ms") diff --git a/examples/tmp_s3/benchmarks/VideoEncoder/bench_encoder_live_camera.py b/examples/tmp_s3/benchmarks/VideoEncoder/bench_encoder_live_camera.py new file mode 100644 index 000000000..6822f45ae --- /dev/null +++ b/examples/tmp_s3/benchmarks/VideoEncoder/bench_encoder_live_camera.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +import depthai as dai + + +# Create pipeline +pipeline = dai.Pipeline() + +cameraNode = pipeline.create(dai.node.ColorCamera) +xoutReport = pipeline.create(dai.node.XLinkOut) +benchmarkIn = pipeline.create(dai.node.BenchmarkIn) +videoEnc = pipeline.create(dai.node.VideoEncoder) + + +videoEnc.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H264_HIGH) + +cameraNode.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4000X3000) +cameraNode.setVideoSize(4000, 3000) + +benchmarkIn.setNumMessagesToGet(10) + +xoutReport.setStreamName("outReport") + +# Linking +cameraNode.video.link(videoEnc.input) +# cameraNode.isp.link(videoEnc.input) # Encoding frames from isp works at "camera FPS" +videoEnc.bitstream.link(benchmarkIn.input) +benchmarkIn.report.link(xoutReport.input) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + # Output queue to receive the report back from the device + qOut = device.getOutputQueue(name="outReport") + report = qOut.get() + print(f"Got {report.fps} FPS, in {report.timeTotal} s for {report.numMessagesReceived} messages.") diff --git a/examples/tmp_s3/benchmarks/VideoEncoder/bench_encoder_resend_frame.py b/examples/tmp_s3/benchmarks/VideoEncoder/bench_encoder_resend_frame.py new file mode 100644 index 000000000..8fcce0f6a --- /dev/null +++ b/examples/tmp_s3/benchmarks/VideoEncoder/bench_encoder_resend_frame.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +import depthai as dai + +# This test is likely very unrealistic, since the video encoder is always getting the same image. + +pipeline = dai.Pipeline() +videoEnc = pipeline.create(dai.node.VideoEncoder) +videoEnc.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H264_HIGH) + + +def fill_out_pipeline(pipeline, videoEnc): + cameraNode = pipeline.create(dai.node.ColorCamera) + + xoutReport = pipeline.create(dai.node.XLinkOut) + benchmarkIn = pipeline.create(dai.node.BenchmarkIn) + benchmarkOut = pipeline.create(dai.node.BenchmarkOut) + + cameraNode.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4000X3000) + cameraNode.setVideoSize(4000, 3000) + + benchmarkOut.input.setBlocking(False) + benchmarkIn.setNumMessagesToGet(30) + xoutReport.setStreamName("outReport") + + # Linking + cameraNode.video.link(benchmarkOut.input) + benchmarkOut.out.link(videoEnc.input) + videoEnc.bitstream.link(benchmarkIn.input) + benchmarkIn.report.link(xoutReport.input) + +fill_out_pipeline(pipeline, videoEnc) +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + # Output queue to receive the report back from the device + qOut = device.getOutputQueue(name="outReport") + report = qOut.get() + print(f"Got {report.fps} FPS, in {report.timeTotal} s for {report.numMessagesReceived} messages.") \ No newline at end of file diff --git a/examples/tmp_s3/benchmarks/VideoEncoder/bench_encoders_all.py b/examples/tmp_s3/benchmarks/VideoEncoder/bench_encoders_all.py new file mode 100644 index 000000000..b95b3df06 --- /dev/null +++ b/examples/tmp_s3/benchmarks/VideoEncoder/bench_encoders_all.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +import depthai as dai +import time + +color_atributes = ["video", "isp"] +video_size=(1920, 1080) + +cams_to_test = [ + { + "name": "rgb", + "mono": False, + "socket": dai.CameraBoardSocket.RGB, + "resolution": dai.ColorCameraProperties.SensorResolution.THE_4000X3000, + "fps": 30, + }, + { + "name": "night", + "mono": False, + "socket": dai.CameraBoardSocket.CAM_D, + "resolution": dai.ColorCameraProperties.SensorResolution.THE_4000X3000, + "fps": 30, + }, +] + + +def add_cam(pipeline, props, colorOut): + xoutReport = pipeline.create(dai.node.XLinkOut) + benchmarkIn = pipeline.create(dai.node.BenchmarkIn) + videoEnc = pipeline.create(dai.node.VideoEncoder) + videoEnc.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H264_MAIN) + benchmarkIn.setNumMessagesToGet(10) + streamName = props["name"] + " - " + colorOut + xoutReport.setStreamName(streamName) + cameraNode = pipeline.create(dai.node.ColorCamera) + # Change this to preview/video whatever is needed + if colorOut == "isp": + cameraNode.isp.link(videoEnc.input) + elif colorOut == "video": + cameraNode.setVideoSize(video_size) + cameraNode.video.link(videoEnc.input) + cameraNode.setResolution(props["resolution"]) + cameraNode.setFps(props["fps"]) + cameraNode.setBoardSocket(props["socket"]) + videoEnc.bitstream.link(benchmarkIn.input) + benchmarkIn.report.link(xoutReport.input) + return streamName + +for color_out in color_atributes: + # Create pipeline + pipeline = dai.Pipeline() + streamNames = [] + for props in cams_to_test: + streamName = add_cam(pipeline, props, color_out) + streamNames.append(streamName) + + # Connect to device and start pipeline + with dai.Device(pipeline) as device: + # Output queue to receive the report back from the device + qOuts = [device.getOutputQueue(name=streamName) for streamName in streamNames] + reports = [qOut.get() for qOut in qOuts] + for i, report in enumerate(reports): + print(f"Stream: {streamNames[i]}") + print(f"Got {report.fps} FPS, in {report.timeTotal} seconds for {report.numMessagesReceived} messages.") + time.sleep(7) + print("----------------------------------------------\n\n\n")