From b3e10e4df9ef3a6e22394bb269c8a920df6b6a62 Mon Sep 17 00:00:00 2001 From: Feiyue Chen Date: Fri, 26 Apr 2024 03:30:06 +0000 Subject: [PATCH] Fixed typing error in README Added separate patch to enable the external delegate for label_image && Update READMe.md Type: Documentation Signed-off-by: Feiyue Chen --- README.md | 26 +++++++++++++------------- patches/label_image_support.patch | 16 ++++++++++++++++ 2 files changed, 29 insertions(+), 13 deletions(-) create mode 100644 patches/label_image_support.patch diff --git a/README.md b/README.md index f4bd4f8..79d58fa 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # TfLite-vx-delegate -TfLite-vx-delegate constructed with TIM-VX as an openvx delegate for tensorflow lite. Before vx-delegate, you may have nnapi-linux version from Verisilicon, we suggest you move to this new delegate because: +TfLite-vx-delegate constructed with TIM-VX as an openvx delegate for tensorflow lite. Before vx-delegate, you may have nnapi-linux version from VeriSilicon, we suggest you move to this new delegate because: 1. without nnapi, it's flexible to enable more AI operators. - 2. vx-delegate is opensourced, and will promised compatible with latest tensorflow release(currently v2.9.0). + 2. vx-delegate is opensourced, and will promised compatible with latest tensorflow release(currently v2.14.0). # Use tflite-vx-delegate ## Prepare source code @@ -27,7 +27,7 @@ make vx_delegate -j12 # benchmark_model make benchmark_model -j12 # label_image -make lable_image -j12 +make label_image -j12 ``` If you would like to build with your own vivante driver sdk and tim-vx build, you need do cross-build as ```sh @@ -36,8 +36,8 @@ mkdir build && cd build cmake .. -DCMAKE_TOOLCHAIN_FILE= -DEXTERNAL_VIV_SDK= # we can also build from a specific ovxlib instead of use default one by set # TIM_VX_USE_EXTERNAL_OVXLIB=ON -# OVXLIB_INC= -# OVXLIB_LIB= +# OVXLIB_INC= +# OVXLIB_LIB= ``` If you would like to build using local version of tensorflow, you can use `FETCHCONTENT_SOURCE_DIR_TENSORFLOW` cmake variable. Point this variable to your tensorflow tree. For additional details on this variable please see the [official cmake documentation](https://cmake.org/cmake/help/latest/module/FetchContent.html#command:fetchcontent_populate) @@ -51,13 +51,13 @@ After cmake execution completes, build and run as usual. Beware that cmake proce ## Enable external delegate support in benchmark_model/label_image -For tensorflow v2.8.0, addtional patch `pwd`/patches/0001-TensorFlow-V280-Enable-External-Delegate.patch requred to enable enable external delegate in benchmark_model/label_image. +For tensorflow v2.8.0, addtional patch `pwd`/patches/0001-TensorFlow-V280-Enable-External-Delegate.patch requred to enable enable external delegate in benchmark_model/label_image. For higher versions of TensorFlow, the benchmark_model has automatically enabled the external delegate mechanism, but it is still necessary to apply patch `pwd`/patches/label_image_support.patch to enable the external delegate in label_image. If tensorflow source code downloaded by cmake, you can find it in /_deps/tensorflow-src The patch get merged into Tensorflow master branch, no patch required for master branch. -## benchmark_model/Label_image compatible with Tflite+NBG -With our Acuity Toolkit, you can generate tflite file with compiled NBG(**N**etwork **B**inary **G**raph) as a custom operator. To support this special format, you should build benchmark_model/label_image from our delegate repo not use the offical one. +## benchmark_model/label_image compatible with Tflite+NBG +With our Acuity Toolkit, you can generate tflite file with compiled NBG(**N**etwork **B**inary **G**raph) as a custom operator. To support this special format, you should build benchmark_model/label_image from our delegate repo and not use the offical one. ## Run ```sh @@ -66,9 +66,9 @@ With our Acuity Toolkit, you can generate tflite file with compiled NBG(**N**etw export VIVANTE_SDK_DIR= # Please copy libtim-vx.so to drivers/ directory export LD_LIBRARY_PATH=${VIVANTE_SDK_DIR}/drivers:$LD_LIBRARY_PATH # the "drivers" maybe named as lib -./benchmark_model --external_delegate_path= --graph= +./benchmark_model --external_delegate_path= --graph= # If you would like to use cache mode which save and load binary graph in local disk -./benchmark_model --external_delegate_path= \ +./benchmark_model --external_delegate_path= \ --external_delegate_options='allowed_cache_mode:true;cache_file_path:' \ --graph= ``` @@ -76,7 +76,7 @@ export LD_LIBRARY_PATH=${VIVANTE_SDK_DIR}/drivers:$LD_LIBRARY_PATH # the "driver ## Test Introduced unit test with tensorflow keras api and convert it to tflite with quantized or none-quantized model, Golden generated from CPU implementation of tflite -[Details for run test](./test/python/READMME.md) +[Details for run test](./test/python/README.md) [Model verification script](./test/python/run_model.py) to compare NPU result with CPU result @@ -91,7 +91,7 @@ examples/minimal modified based on [offical minimal](https://cs.opensource.google/tensorflow/tensorflow/+/master:tensorflow/lite/examples/minimal/) ```sh -minimal +minimal # If you would like to use cache mode which save and load binary graph in local disk -minimal use_cache_mode +minimal use_cache_mode ``` diff --git a/patches/label_image_support.patch b/patches/label_image_support.patch new file mode 100644 index 0000000..1f6e52b --- /dev/null +++ b/patches/label_image_support.patch @@ -0,0 +1,16 @@ +diff --git a/tensorflow/lite/examples/label_image/CMakeLists.txt b/tensorflow/lite/examples/label_image/CMakeLists.txt +index f3edeb40a31..b21fa42ea03 100644 +--- a/tensorflow/lite/examples/label_image/CMakeLists.txt ++++ b/tensorflow/lite/examples/label_image/CMakeLists.txt +@@ -55,6 +55,11 @@ if(TFLITE_ENABLE_GPU) + ) + endif() # TFLITE_ENABLE_GPU + ++if(TFLITE_ENABLE_EXTERNAL_DELEGATE) ++ list(APPEND TFLITE_LABEL_IMAGE_SRCS ++ ${TFLITE_SOURCE_DIR}/tools/delegates/external_delegate_provider.cc) ++endif() ++ + add_executable(label_image + EXCLUDE_FROM_ALL + ${TFLITE_LABEL_IMAGE_SRCS}