Skip to content

Commit

Permalink
Merge pull request #1 from staskh/python3.11
Browse files Browse the repository at this point in the history
Upgrade to PyTorch 2.3 and Python 3.11
  • Loading branch information
staskh authored Jun 28, 2024
2 parents 3ce98fb + 6690695 commit 029425e
Show file tree
Hide file tree
Showing 11 changed files with 163 additions and 133 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -127,3 +127,6 @@ dmypy.json

# Pyre type checker
.pyre/
# do not save Results in Git
Results/*
M1_Retinal_Image_quality_EyePACS/test_outside/results_ensemble.csv
27 changes: 27 additions & 0 deletions .vscode/launch.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Python Debugger: Current File with Arguments",
"type": "debugpy",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal",
"cwd": "${fileDirname}",
"justMyCode": true,
"args": "${command:pickArgs}"
},
{
"name": "Debug",
"type": "debugpy",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal",
"justMyCode": true,
"cwd": "${fileDirname}"
}
]
}
13 changes: 6 additions & 7 deletions LOCAL.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,18 @@

### Requirements

1. Linux is preferred. For windows, install [MinGW-w64](https://www.mingw-w64.org/) for using commands below to set enviroment.
1. Linux or Mac are preferred. For windows, install [MinGW-w64](https://www.mingw-w64.org/) for using commands below to set enviroment.
2. Anaconda or miniconda installed.
3. python=3.6, cudatoolkit=11.0, torch=1.7, etc. (installation steps below)
4. GPU is essential.
3. python=3.11, torch=2.3, etc. (installation steps below)
4. GPU is essential - NVIDIA (cuda) or M2 (mps).


### Package installation

Step 1: create virtual environment:
```bash
conda update conda
conda create -n automorph python=3.6 -y
conda create -n automorph python=3.11 -y
```

Step 2: Activate virtual environment and clone the code.
Expand All @@ -23,16 +23,15 @@ git clone https://github.com/rmaphoh/AutoMorph.git
cd AutoMorph
```

Step 3: install pytorch 1.7 and cudatoolkit 11.0
Step 3: install pytorch 2.3
```bash
conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=11.0 -c pytorch -y
conda install pytorch==2.3.1 -c pytorch -y
```

Step 4: install other packages:
```bash
pip install --ignore-installed certifi
pip install -r requirement.txt
pip install efficientnet_pytorch
```

### Running
Expand Down
4 changes: 2 additions & 2 deletions M0_Preprocess/fundus_prep.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def _get_radius_by_mask_center(mask,center):
# radius=
index=np.where(mask>0)
d_int=np.sqrt((index[0]-center[0])**2+(index[1]-center[1])**2)
b_count=np.bincount(np.ceil(d_int).astype(np.int))
b_count=np.bincount(np.ceil(d_int).astype(int))
radius=np.where(b_count>b_count.max()*0.995)[0].max()
return radius

Expand Down Expand Up @@ -129,7 +129,7 @@ def mask_image(img,mask):
def remove_back_area(img,bbox=None,border=None):
image=img
if border is None:
border=np.array((bbox[0],bbox[0]+bbox[2],bbox[1],bbox[1]+bbox[3],img.shape[0],img.shape[1]),dtype=np.int)
border=np.array((bbox[0],bbox[0]+bbox[2],bbox[1],bbox[1]+bbox[3],img.shape[0],img.shape[1]),dtype=int)
image=image[border[0]:border[1],border[2]:border[3],...]
return image,border

Expand Down
51 changes: 30 additions & 21 deletions M1_Retinal_Image_quality_EyePACS/test_outside.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,19 @@
import pandas as pd
import torch.nn as nn
from tqdm import tqdm
from pycm import *
import matplotlib
import matplotlib.pyplot as plt
#from pycm import *
# import matplotlib
# import matplotlib.pyplot as plt
from dataset import BasicDataset_OUT
from torch.utils.data import DataLoader
from model import Resnet101_fl, InceptionV3_fl, Densenet161_fl, Resnext101_32x8d_fl, MobilenetV2_fl, Vgg16_bn_fl, Efficientnet_fl


font = {
'weight' : 'normal',
'size' : 18}
plt.rc('font',family='Times New Roman')
matplotlib.rc('font', **font)
# font = {
# 'weight' : 'normal',
# 'size' : 18}
# plt.rc('font',family='Times New Roman')
# matplotlib.rc('font', **font)

def test_net(model_fl_1,
model_fl_2,
Expand Down Expand Up @@ -187,10 +187,19 @@ def get_args():
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
args = get_args()

torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)

#logging.info(f'Using device {device}')

# Check if CUDA is available
if torch.cuda.is_available():
logging.info("CUDA is available. Using CUDA...")
device = torch.device("cuda",args.local_rank)
elif torch.backends.mps.is_available(): # Check if MPS is available (for macOS)
logging.info("MPS is available. Using MPS...")
device = torch.device("mps")
else:
logging.info("Neither CUDA nor MPS is available. Using CPU...")
device = torch.device("cpu")

logging.info(f'Using device {device}')

test_dir = args.test_dir
dataset=args.dataset
Expand Down Expand Up @@ -243,31 +252,31 @@ def get_args():
model_fl_7.to(device=device)
model_fl_8.to(device=device)

map_location = {'cuda:%d' % 0: 'cuda:%d' % args.local_rank}
#map_location = {'cuda:%d' % 0: 'cuda:%d' % args.local_rank}
if args.load:
model_fl_1.load_state_dict(
torch.load(checkpoint_path_1, map_location="cuda:0")
torch.load(checkpoint_path_1, map_location=device)
)
model_fl_2.load_state_dict(
torch.load(checkpoint_path_2, map_location="cuda:0")
torch.load(checkpoint_path_2, map_location=device)
)
model_fl_3.load_state_dict(
torch.load(checkpoint_path_3, map_location="cuda:0")
torch.load(checkpoint_path_3, map_location=device)
)
model_fl_4.load_state_dict(
torch.load(checkpoint_path_4, map_location="cuda:0")
torch.load(checkpoint_path_4, map_location=device)
)
model_fl_5.load_state_dict(
torch.load(checkpoint_path_5, map_location="cuda:0")
torch.load(checkpoint_path_5, map_location=device)
)
model_fl_6.load_state_dict(
torch.load(checkpoint_path_6, map_location="cuda:0")
torch.load(checkpoint_path_6, map_location=device)
)
model_fl_7.load_state_dict(
torch.load(checkpoint_path_7, map_location="cuda:0")
torch.load(checkpoint_path_7, map_location=device)
)
model_fl_8.load_state_dict(
torch.load(checkpoint_path_8, map_location="cuda:0")
torch.load(checkpoint_path_8, map_location=device)
)

# faster convolutions, but more memory
Expand Down
62 changes: 36 additions & 26 deletions M2_Artery_vein/test_outside.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,8 +280,18 @@ def get_args():

logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
args = get_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#logging.info(f'Using device {device}')
# Check if CUDA is available
if torch.cuda.is_available():
logging.info("CUDA is available. Using CUDA...")
device = torch.device("cuda:0")
elif torch.backends.mps.is_available(): # Check if MPS is available (for macOS)
logging.info("MPS is available. Using MPS...")
device = torch.device("mps")
else:
logging.info("Neither CUDA nor MPS is available. Using CPU...")
device = torch.device("cpu")

logging.info(f'Using device {device}')

img_size = Define_image_size(args.uniform, args.dataset)
dataset_name = args.dataset
Expand Down Expand Up @@ -347,79 +357,79 @@ def get_args():


for i in range(1):
net_G_1.load_state_dict(torch.load( checkpoint_saved_1 + 'CP_best_F1_all.pth'))
net_G_A_1.load_state_dict(torch.load( checkpoint_saved_1 + 'CP_best_F1_A.pth'))
net_G_V_1.load_state_dict(torch.load(checkpoint_saved_1 + 'CP_best_F1_V.pth'))
net_G_1.load_state_dict(torch.load( checkpoint_saved_1 + 'CP_best_F1_all.pth',map_location=device))
net_G_A_1.load_state_dict(torch.load( checkpoint_saved_1 + 'CP_best_F1_A.pth',map_location=device))
net_G_V_1.load_state_dict(torch.load(checkpoint_saved_1 + 'CP_best_F1_V.pth',map_location=device))
net_G_1.eval()
net_G_A_1.eval()
net_G_V_1.eval()
net_G_1.to(device=device)
net_G_A_1.to(device=device)
net_G_V_1.to(device=device)

net_G_2.load_state_dict(torch.load( checkpoint_saved_2 + 'CP_best_F1_all.pth'))
net_G_A_2.load_state_dict(torch.load( checkpoint_saved_2 + 'CP_best_F1_A.pth'))
net_G_V_2.load_state_dict(torch.load(checkpoint_saved_2 + 'CP_best_F1_V.pth'))
net_G_2.load_state_dict(torch.load( checkpoint_saved_2 + 'CP_best_F1_all.pth',map_location=device))
net_G_A_2.load_state_dict(torch.load( checkpoint_saved_2 + 'CP_best_F1_A.pth',map_location=device))
net_G_V_2.load_state_dict(torch.load(checkpoint_saved_2 + 'CP_best_F1_V.pth',map_location=device))
net_G_2.eval()
net_G_A_2.eval()
net_G_V_2.eval()
net_G_2.to(device=device)
net_G_A_2.to(device=device)
net_G_V_2.to(device=device)

net_G_3.load_state_dict(torch.load( checkpoint_saved_3 + 'CP_best_F1_all.pth'))
net_G_A_3.load_state_dict(torch.load( checkpoint_saved_3 + 'CP_best_F1_A.pth'))
net_G_V_3.load_state_dict(torch.load(checkpoint_saved_3 + 'CP_best_F1_V.pth'))
net_G_3.load_state_dict(torch.load( checkpoint_saved_3 + 'CP_best_F1_all.pth',map_location=device))
net_G_A_3.load_state_dict(torch.load( checkpoint_saved_3 + 'CP_best_F1_A.pth',map_location=device))
net_G_V_3.load_state_dict(torch.load(checkpoint_saved_3 + 'CP_best_F1_V.pth',map_location=device))
net_G_3.eval()
net_G_A_3.eval()
net_G_V_3.eval()
net_G_3.to(device=device)
net_G_A_3.to(device=device)
net_G_V_3.to(device=device)

net_G_4.load_state_dict(torch.load( checkpoint_saved_4 + 'CP_best_F1_all.pth'))
net_G_A_4.load_state_dict(torch.load( checkpoint_saved_4 + 'CP_best_F1_A.pth'))
net_G_V_4.load_state_dict(torch.load(checkpoint_saved_4 + 'CP_best_F1_V.pth'))
net_G_4.load_state_dict(torch.load( checkpoint_saved_4 + 'CP_best_F1_all.pth',map_location=device))
net_G_A_4.load_state_dict(torch.load( checkpoint_saved_4 + 'CP_best_F1_A.pth',map_location=device))
net_G_V_4.load_state_dict(torch.load(checkpoint_saved_4 + 'CP_best_F1_V.pth',map_location=device))
net_G_4.eval()
net_G_A_4.eval()
net_G_V_4.eval()
net_G_4.to(device=device)
net_G_A_4.to(device=device)
net_G_V_4.to(device=device)

net_G_5.load_state_dict(torch.load( checkpoint_saved_5 + 'CP_best_F1_all.pth'))
net_G_A_5.load_state_dict(torch.load( checkpoint_saved_5 + 'CP_best_F1_A.pth'))
net_G_V_5.load_state_dict(torch.load(checkpoint_saved_5 + 'CP_best_F1_V.pth'))
net_G_5.load_state_dict(torch.load( checkpoint_saved_5 + 'CP_best_F1_all.pth',map_location=device))
net_G_A_5.load_state_dict(torch.load( checkpoint_saved_5 + 'CP_best_F1_A.pth',map_location=device))
net_G_V_5.load_state_dict(torch.load(checkpoint_saved_5 + 'CP_best_F1_V.pth',map_location=device))
net_G_5.eval()
net_G_A_5.eval()
net_G_V_5.eval()
net_G_5.to(device=device)
net_G_A_5.to(device=device)
net_G_V_5.to(device=device)

net_G_6.load_state_dict(torch.load( checkpoint_saved_6 + 'CP_best_F1_all.pth'))
net_G_A_6.load_state_dict(torch.load( checkpoint_saved_6 + 'CP_best_F1_A.pth'))
net_G_V_6.load_state_dict(torch.load(checkpoint_saved_6 + 'CP_best_F1_V.pth'))
net_G_6.load_state_dict(torch.load( checkpoint_saved_6 + 'CP_best_F1_all.pth',map_location=device))
net_G_A_6.load_state_dict(torch.load( checkpoint_saved_6 + 'CP_best_F1_A.pth',map_location=device))
net_G_V_6.load_state_dict(torch.load(checkpoint_saved_6 + 'CP_best_F1_V.pth',map_location=device))
net_G_6.eval()
net_G_A_6.eval()
net_G_V_6.eval()
net_G_6.to(device=device)
net_G_A_6.to(device=device)
net_G_V_6.to(device=device)

net_G_7.load_state_dict(torch.load( checkpoint_saved_7 + 'CP_best_F1_all.pth'))
net_G_A_7.load_state_dict(torch.load( checkpoint_saved_7 + 'CP_best_F1_A.pth'))
net_G_V_7.load_state_dict(torch.load(checkpoint_saved_7 + 'CP_best_F1_V.pth'))
net_G_7.load_state_dict(torch.load( checkpoint_saved_7 + 'CP_best_F1_all.pth',map_location=device))
net_G_A_7.load_state_dict(torch.load( checkpoint_saved_7 + 'CP_best_F1_A.pth',map_location=device))
net_G_V_7.load_state_dict(torch.load(checkpoint_saved_7 + 'CP_best_F1_V.pth',map_location=device))
net_G_7.eval()
net_G_A_7.eval()
net_G_V_7.eval()
net_G_7.to(device=device)
net_G_A_7.to(device=device)
net_G_V_7.to(device=device)

net_G_8.load_state_dict(torch.load( checkpoint_saved_8 + 'CP_best_F1_all.pth'))
net_G_A_8.load_state_dict(torch.load( checkpoint_saved_8 + 'CP_best_F1_A.pth'))
net_G_V_8.load_state_dict(torch.load(checkpoint_saved_8 + 'CP_best_F1_V.pth'))
net_G_8.load_state_dict(torch.load( checkpoint_saved_8 + 'CP_best_F1_all.pth',map_location=device))
net_G_A_8.load_state_dict(torch.load( checkpoint_saved_8 + 'CP_best_F1_A.pth',map_location=device))
net_G_V_8.load_state_dict(torch.load(checkpoint_saved_8 + 'CP_best_F1_V.pth',map_location=device))
net_G_8.eval()
net_G_A_8.eval()
net_G_V_8.eval()
Expand Down
33 changes: 22 additions & 11 deletions M2_Vessel_seg/test_outside_integrated.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,34 +203,34 @@ def test_net(data_path, batch_size, device, dataset_train, dataset_test, image_s
net_10 = Segmenter(input_channels=3, n_filters = 32, n_classes=1, bilinear=False)


net_1.load_state_dict(torch.load(dir_checkpoint_1 + 'G_best_F1_epoch.pth'))
net_1.load_state_dict(torch.load(dir_checkpoint_1 + 'G_best_F1_epoch.pth',map_location=device))
net_1.eval()
net_1.to(device=device)
net_2.load_state_dict(torch.load(dir_checkpoint_2 + 'G_best_F1_epoch.pth'))
net_2.load_state_dict(torch.load(dir_checkpoint_2 + 'G_best_F1_epoch.pth',map_location=device))
net_2.eval()
net_2.to(device=device)
net_3.load_state_dict(torch.load(dir_checkpoint_3 + 'G_best_F1_epoch.pth'))
net_3.load_state_dict(torch.load(dir_checkpoint_3 + 'G_best_F1_epoch.pth',map_location=device))
net_3.eval()
net_3.to(device=device)
net_4.load_state_dict(torch.load(dir_checkpoint_4 + 'G_best_F1_epoch.pth'))
net_4.load_state_dict(torch.load(dir_checkpoint_4 + 'G_best_F1_epoch.pth',map_location=device))
net_4.eval()
net_4.to(device=device)
net_5.load_state_dict(torch.load(dir_checkpoint_5 + 'G_best_F1_epoch.pth'))
net_5.load_state_dict(torch.load(dir_checkpoint_5 + 'G_best_F1_epoch.pth',map_location=device))
net_5.eval()
net_5.to(device=device)
net_6.load_state_dict(torch.load(dir_checkpoint_6 + 'G_best_F1_epoch.pth'))
net_6.load_state_dict(torch.load(dir_checkpoint_6 + 'G_best_F1_epoch.pth',map_location=device))
net_6.eval()
net_6.to(device=device)
net_7.load_state_dict(torch.load(dir_checkpoint_7 + 'G_best_F1_epoch.pth'))
net_7.load_state_dict(torch.load(dir_checkpoint_7 + 'G_best_F1_epoch.pth',map_location=device))
net_7.eval()
net_7.to(device=device)
net_8.load_state_dict(torch.load(dir_checkpoint_8 + 'G_best_F1_epoch.pth'))
net_8.load_state_dict(torch.load(dir_checkpoint_8 + 'G_best_F1_epoch.pth',map_location=device))
net_8.eval()
net_8.to(device=device)
net_9.load_state_dict(torch.load(dir_checkpoint_9 + 'G_best_F1_epoch.pth'))
net_9.load_state_dict(torch.load(dir_checkpoint_9 + 'G_best_F1_epoch.pth',map_location=device))
net_9.eval()
net_9.to(device=device)
net_10.load_state_dict(torch.load(dir_checkpoint_10 + 'G_best_F1_epoch.pth'))
net_10.load_state_dict(torch.load(dir_checkpoint_10 + 'G_best_F1_epoch.pth',map_location=device))
net_10.eval()
net_10.to(device=device)

Expand Down Expand Up @@ -285,9 +285,20 @@ def get_args():

logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
args = get_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Check if CUDA is available
if torch.cuda.is_available():
logging.info("CUDA is available. Using CUDA...")
device = torch.device("cuda:0")
elif torch.backends.mps.is_available(): # Check if MPS is available (for macOS)
logging.info("MPS is available. Using MPS...")
device = torch.device("mps")
else:
logging.info("Neither CUDA nor MPS is available. Using CPU...")
device = torch.device("cpu")

logging.info(f'Using device {device}')


image_size = Define_image_size(args.uniform, args.dataset)
lr = args.lr

Expand Down
Loading

0 comments on commit 029425e

Please sign in to comment.