From a514d5fbf89ffd2b020204d7126102a125c9ae4a Mon Sep 17 00:00:00 2001 From: Idan Ben Ami <109598548+Idan-BenAmi@users.noreply.github.com> Date: Wed, 4 Sep 2024 14:53:35 +0300 Subject: [PATCH] Update the text in tutorials for generic timm and torochvision model (#1200) --- .../notebooks/imx500_notebooks/README.md | 31 ++++++++----------- ...timm_classification_model_for_imx500.ipynb | 3 +- ...sion_classification_model_for_imx500.ipynb | 7 +++-- 3 files changed, 20 insertions(+), 21 deletions(-) diff --git a/tutorials/notebooks/imx500_notebooks/README.md b/tutorials/notebooks/imx500_notebooks/README.md index 2abb783b6..681e3de67 100644 --- a/tutorials/notebooks/imx500_notebooks/README.md +++ b/tutorials/notebooks/imx500_notebooks/README.md @@ -25,7 +25,7 @@ deployment performance. Classification MobilenetV2 - Keras + ipynb (Keras) Keras Applications ImageNet @@ -34,7 +34,7 @@ deployment performance. MobileVit - PyTorch + ipynb (PyTorch) Timm mct-model-garden ImageNet @@ -43,7 +43,7 @@ deployment performance. regnety_002.pycls_in1k - PyTorch + ipynb (PyTorch) Timm ImageNet @@ -52,7 +52,6 @@ deployment performance. regnetx_002.pycls_in1k - PyTorch Timm ImageNet @@ -61,7 +60,6 @@ deployment performance. regnety_004.pycls_in1k - PyTorch Timm ImageNet @@ -70,7 +68,7 @@ deployment performance. mnasnet1_0 - PyTorch + ipynb (PyTorch) torchvision ImageNet @@ -79,7 +77,6 @@ deployment performance. mobilenet_v2 - PyTorch torchvision ImageNet @@ -88,7 +85,6 @@ deployment performance. regnet_y_400mf - PyTorch torchvision ImageNet @@ -97,7 +93,6 @@ deployment performance. shufflenet_v2_x1_5 - PyTorch torchvision ImageNet @@ -108,7 +103,7 @@ deployment performance. Object Detection YOLOv8n - Keras + ipynb (Keras) Ultralytics mct-model-garden COCO @@ -117,7 +112,7 @@ deployment performance. YOLOv8n - PyTorch + ipynb (PyTorch) Ultralytics mct-model-garden COCO @@ -126,7 +121,7 @@ deployment performance. NanoDet-Plus-m-416 - Keras + ipynb (Keras) Nanodet mct-model-garden COCO @@ -135,7 +130,7 @@ deployment performance. EfficientDet-lite0 - Keras + ipynb (Keras) efficientdet-pytorch mct-model-garden COCO @@ -145,7 +140,7 @@ deployment performance. Semantic Segmentation Deeplabv3plus - Keras + ipynb (Keras) bonlime mct-model-garden PASCAL VOC @@ -155,7 +150,7 @@ deployment performance. Instance Segmentation YOLOv8n-seg - PyTorch + ipynb (PyTorch) Ultralytics mct-model-garden COCO @@ -165,7 +160,7 @@ deployment performance. Pose Estimation YOLOv8n-pose - PyTorch + ipynb (PyTorch) Ultralytics mct-model-garden COCO @@ -175,8 +170,8 @@ deployment performance. Anomaly Detection Efficient AD - PyTorch - Ultralytics + ipynb (PyTorch) + *EfficientAD paper mct-model-garden MvTech 98.56 diff --git a/tutorials/notebooks/imx500_notebooks/pytorch/pytorch_timm_classification_model_for_imx500.ipynb b/tutorials/notebooks/imx500_notebooks/pytorch/pytorch_timm_classification_model_for_imx500.ipynb index 5a02575ba..c7cdc8b8e 100644 --- a/tutorials/notebooks/imx500_notebooks/pytorch/pytorch_timm_classification_model_for_imx500.ipynb +++ b/tutorials/notebooks/imx500_notebooks/pytorch/pytorch_timm_classification_model_for_imx500.ipynb @@ -111,7 +111,8 @@ "source": [ "## Model Quantization\n", "\n", - "### Download a Pre-Trained Model \n" + "### Download a Pre-Trained Model - Please select a Timm model\n", + "The tutorial is pre-configured to download `mobilenet_v2` model. In case you wish to use a different model - please change the model & weights below, based on [Timm](https://github.com/huggingface/pytorch-image-models)" ] }, { diff --git a/tutorials/notebooks/imx500_notebooks/pytorch/pytorch_torchvision_classification_model_for_imx500.ipynb b/tutorials/notebooks/imx500_notebooks/pytorch/pytorch_torchvision_classification_model_for_imx500.ipynb index b62d507fe..309a42195 100644 --- a/tutorials/notebooks/imx500_notebooks/pytorch/pytorch_torchvision_classification_model_for_imx500.ipynb +++ b/tutorials/notebooks/imx500_notebooks/pytorch/pytorch_torchvision_classification_model_for_imx500.ipynb @@ -39,7 +39,9 @@ { "metadata": {}, "cell_type": "markdown", - "source": "Install MCT (if it’s not already installed). Additionally, in order to use all the necessary utility functions for this tutorial, we also copy [MCT tutorials folder](https://github.com/sony/model_optimization/tree/main/tutorials) and add it to the system path.", + "source": [ + "Install MCT (if it’s not already installed). Additionally, in order to use all the necessary utility functions for this tutorial, we also copy [MCT tutorials folder](https://github.com/sony/model_optimization/tree/main/tutorials) and add it to the system path." + ], "id": "b1a05efedd4dbc77" }, { @@ -93,7 +95,8 @@ "source": [ "## Model Quantization\n", "\n", - "### Download a Pre-Trained Model" + "### Download a pre-trained model - Please select a Torchvision model\n", + "The tutorial is pre-configured to download `mobilenet_v2` model. In case you wish to use a different model - please change the model & weights below, based on [torchvision](https://pytorch.org/vision/stable/models.html)" ], "id": "7059e58ac6efff74" },