From df6dc9ee0976645df4e4b6cb03234bca174f300b Mon Sep 17 00:00:00 2001 From: Kawaeee Date: Tue, 4 May 2021 18:12:27 +0700 Subject: [PATCH 1/5] Swap torch,torchvision into cpu version --- README.md | 22 ++++++++++++++++++++-- requirements.txt | 6 ++++-- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 02de646..7a0bbac 100644 --- a/README.md +++ b/README.md @@ -50,10 +50,12 @@ |Batch Size | 32 | |Optimizer | ADAM | -## Reproduction +## Model Reproduction * In order to reproduce the model, it requires our datasets. You can send me an e-mail at kawaekc@gmail.com if you are interested. - - Install dependencies + - Install dependencies + - Remove "+cpu" and "--find-links flag" in requirements.txt to get CUDA support + ```Bash pip install -r requirements.txt ``` @@ -65,3 +67,19 @@ ``` - Open and run the notebook for prediction: `predictor.ipynb` + +## Streamlit Reproduction + - Install dependencies + + ```Bash + pip install -r requirements.txt + ``` + + - Run the streamlit + + ```Bash + streamlit run streamlit_app,py + ``` + + - Streamlit web application will be host on http://localhost:8501 + \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 1147ad3..2f36553 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,8 @@ numpy==1.20.2 tqdm==4.60.0 pillow==8.2.0 streamlit==0.80.0 -torch==1.8.1 -torchvision==0.9.1 +# [STREAMLIT] Remove "+cpu" and "--find-links flag" in requirements.txt to get CUDA support +--find-links https://download.pytorch.org/whl/torch_stable.html +torch==1.8.1+cpu +torchvision==0.9.1+cpu psutil==5.8.0 \ No newline at end of file From 3aaa557a63eb6fcb1bb6e11ac40e29642394f28e Mon Sep 17 00:00:00 2001 From: Kawaeee Date: Tue, 4 May 2021 18:20:42 +0700 Subject: [PATCH 2/5] Update README.md --- README.md | 4 ++-- streamlit_app.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 7a0bbac..f956ad2 100644 --- a/README.md +++ b/README.md @@ -54,8 +54,8 @@ * In order to reproduce the model, it requires our datasets. You can send me an e-mail at kawaekc@gmail.com if you are interested. - Install dependencies - - Remove "+cpu" and "--find-links flag" in requirements.txt to get CUDA support - + ###### Remove "+cpu" and "--find-links flag" in requirements.txt to get CUDA support + ```Bash pip install -r requirements.txt ``` diff --git a/streamlit_app.py b/streamlit_app.py index 56b654f..b2e7038 100644 --- a/streamlit_app.py +++ b/streamlit_app.py @@ -79,6 +79,7 @@ } # Model configuration +# Streamlit server does not provide GPU, So we go will CPU! processing_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") img_normalizer = transforms.Normalize( @@ -95,7 +96,7 @@ ) -@st.cache(allow_output_mutation=True, max_entries=3, ttl=1800) +@st.cache(allow_output_mutation=True, max_entries=2, ttl=600) def initialize_model(device=processing_device): """Retrieves the butt_bread trained model and maps it to the CPU by default, can also specify GPU here.""" model = models.resnet152(pretrained=False).to(device) From ed960e6b6b0cb142d3d1a4d8d312ee67abd9f83e Mon Sep 17 00:00:00 2001 From: Kawaeee Date: Tue, 4 May 2021 18:37:02 +0700 Subject: [PATCH 3/5] add prediction cache --- README.md | 10 +++++----- streamlit_app.py | 3 ++- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index f956ad2..6210a80 100644 --- a/README.md +++ b/README.md @@ -54,11 +54,11 @@ * In order to reproduce the model, it requires our datasets. You can send me an e-mail at kawaekc@gmail.com if you are interested. - Install dependencies - ###### Remove "+cpu" and "--find-links flag" in requirements.txt to get CUDA support - - ```Bash - pip install -r requirements.txt - ``` + - ```Remove "+cpu" and "--find-links flag" in requirements.txt to get CUDA support``` + + ```Bash + pip install -r requirements.txt + ``` - Run the train.py python script diff --git a/streamlit_app.py b/streamlit_app.py index b2e7038..a2b7b60 100644 --- a/streamlit_app.py +++ b/streamlit_app.py @@ -96,7 +96,7 @@ ) -@st.cache(allow_output_mutation=True, max_entries=2, ttl=600) +@st.cache(allow_output_mutation=True, suppress_st_warning=True, max_entries=2, ttl=600) def initialize_model(device=processing_device): """Retrieves the butt_bread trained model and maps it to the CPU by default, can also specify GPU here.""" model = models.resnet152(pretrained=False).to(device) @@ -111,6 +111,7 @@ def initialize_model(device=processing_device): return model +@st.cache(max_entries=10, ttl=300) def predict(img, model): """Make a prediction on a single image""" input_img = img_transformer(img).float() From 2b154be8797b3d42ce3e722e31d0cb791b57d582 Mon Sep 17 00:00:00 2001 From: Kawaeee Date: Tue, 4 May 2021 18:46:56 +0700 Subject: [PATCH 4/5] Clear unused variables after inference --- streamlit_app.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/streamlit_app.py b/streamlit_app.py index a2b7b60..50f3129 100644 --- a/streamlit_app.py +++ b/streamlit_app.py @@ -137,6 +137,10 @@ def predict(img, model): }, } + input_img = None + pred_logit_tensor = None + pred_probs = None + return json_output def download_model(): @@ -145,7 +149,7 @@ def download_model(): print("Downloading butt_bread model !!") req = requests.get(model_url_path, allow_redirects=True) open("buttbread_resnet152_3.h5", "wb").write(req.content) - return True + req = None return True @@ -224,6 +228,8 @@ def health_check(): st.image(resized_image) st.write("Prediction:") st.json(prediction) + img = None + resized_image = None + prediction = None - # Reset model after used model = None From d4ff43c9b36bf6c78da180be7bbdd53e8c55951c Mon Sep 17 00:00:00 2001 From: Kawaeee Date: Tue, 4 May 2021 18:53:19 +0700 Subject: [PATCH 5/5] Reduce max_entries cache for predict() --- README.md | 2 +- streamlit_app.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 6210a80..629a42a 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,7 @@ - Run the streamlit ```Bash - streamlit run streamlit_app,py + streamlit run streamlit_app.py ``` - Streamlit web application will be host on http://localhost:8501 diff --git a/streamlit_app.py b/streamlit_app.py index 50f3129..3842944 100644 --- a/streamlit_app.py +++ b/streamlit_app.py @@ -111,7 +111,7 @@ def initialize_model(device=processing_device): return model -@st.cache(max_entries=10, ttl=300) +@st.cache(max_entries=5, ttl=300) def predict(img, model): """Make a prediction on a single image""" input_img = img_transformer(img).float()