diff --git a/README.md b/README.md index 02de646..629a42a 100644 --- a/README.md +++ b/README.md @@ -50,13 +50,15 @@ |Batch Size | 32 | |Optimizer | ADAM | -## Reproduction +## Model Reproduction * In order to reproduce the model, it requires our datasets. You can send me an e-mail at kawaekc@gmail.com if you are interested. - - Install dependencies - ```Bash - pip install -r requirements.txt - ``` + - Install dependencies + - ```Remove "+cpu" and "--find-links flag" in requirements.txt to get CUDA support``` + + ```Bash + pip install -r requirements.txt + ``` - Run the train.py python script @@ -65,3 +67,19 @@ ``` - Open and run the notebook for prediction: `predictor.ipynb` + +## Streamlit Reproduction + - Install dependencies + + ```Bash + pip install -r requirements.txt + ``` + + - Run the streamlit + + ```Bash + streamlit run streamlit_app.py + ``` + + - Streamlit web application will be host on http://localhost:8501 + \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 1147ad3..2f36553 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,8 @@ numpy==1.20.2 tqdm==4.60.0 pillow==8.2.0 streamlit==0.80.0 -torch==1.8.1 -torchvision==0.9.1 +# [STREAMLIT] Remove "+cpu" and "--find-links flag" in requirements.txt to get CUDA support +--find-links https://download.pytorch.org/whl/torch_stable.html +torch==1.8.1+cpu +torchvision==0.9.1+cpu psutil==5.8.0 \ No newline at end of file diff --git a/streamlit_app.py b/streamlit_app.py index 56b654f..3842944 100644 --- a/streamlit_app.py +++ b/streamlit_app.py @@ -79,6 +79,7 @@ } # Model configuration +# Streamlit server does not provide GPU, So we go will CPU! processing_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") img_normalizer = transforms.Normalize( @@ -95,7 +96,7 @@ ) -@st.cache(allow_output_mutation=True, max_entries=3, ttl=1800) +@st.cache(allow_output_mutation=True, suppress_st_warning=True, max_entries=2, ttl=600) def initialize_model(device=processing_device): """Retrieves the butt_bread trained model and maps it to the CPU by default, can also specify GPU here.""" model = models.resnet152(pretrained=False).to(device) @@ -110,6 +111,7 @@ def initialize_model(device=processing_device): return model +@st.cache(max_entries=5, ttl=300) def predict(img, model): """Make a prediction on a single image""" input_img = img_transformer(img).float() @@ -135,6 +137,10 @@ def predict(img, model): }, } + input_img = None + pred_logit_tensor = None + pred_probs = None + return json_output def download_model(): @@ -143,7 +149,7 @@ def download_model(): print("Downloading butt_bread model !!") req = requests.get(model_url_path, allow_redirects=True) open("buttbread_resnet152_3.h5", "wb").write(req.content) - return True + req = None return True @@ -222,6 +228,8 @@ def health_check(): st.image(resized_image) st.write("Prediction:") st.json(prediction) + img = None + resized_image = None + prediction = None - # Reset model after used model = None