Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] flownet2-docker comparison #160

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@
from glob import glob
import utils.frame_utils as frame_utils

from scipy.misc import imread, imresize
from imageio import imread


class StaticRandomCrop(object):
def __init__(self, image_size, crop_size):
Expand Down
4 changes: 2 additions & 2 deletions download_caffe_models.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/bin/bash
sudo rm -rf flownet2-docker
sudo git clone https://github.com/lmb-freiburg/flownet2-docker
#sudo rm -rf flownet2-docker
#sudo git clone https://github.com/lmb-freiburg/flownet2-docker
cd flownet2-docker

sudo sed -i '$ a RUN apt-get update && apt-get install -y python-pip \
Expand Down
8 changes: 4 additions & 4 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,8 +223,8 @@ def forward(self, data, target, inference=False ):
if not os.path.exists(args.save):
os.makedirs(args.save)

train_logger = SummaryWriter(log_dir = os.path.join(args.save, 'train'), comment = 'training')
validation_logger = SummaryWriter(log_dir = os.path.join(args.save, 'validation'), comment = 'validation')
train_logger = SummaryWriter(logdir = os.path.join(args.save, 'train'), comment = 'training')
validation_logger = SummaryWriter(logdir = os.path.join(args.save, 'validation'), comment = 'validation')

# Dynamically load the optimizer with parameters passed in via "--optimizer_[param]=[value]" arguments
with tools.TimerBlock("Initializing {} Optimizer".format(args.optimizer)) as block:
Expand Down Expand Up @@ -261,7 +261,7 @@ def train(args, epoch, start_iteration, data_loader, model, optimizer, logger, i

data, target = [Variable(d) for d in data], [Variable(t) for t in target]
if args.cuda and args.number_gpus == 1:
data, target = [d.cuda(async=True) for d in data], [t.cuda(async=True) for t in target]
data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in target]

optimizer.zero_grad() if not is_validate else None
losses = model(data[0], target[0])
Expand Down Expand Up @@ -357,7 +357,7 @@ def inference(args, epoch, data_loader, model, offset=0):
total_loss = 0
for batch_idx, (data, target) in enumerate(progress):
if args.cuda:
data, target = [d.cuda(async=True) for d in data], [t.cuda(async=True) for t in target]
data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in target]
data, target = [Variable(d) for d in data], [Variable(t) for t in target]

# when ground-truth flows are not available for inference_dataset,
Expand Down
1 change: 1 addition & 0 deletions networks/FlowNetC.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ def __init__(self,args, batchNorm=True, div_flow = 20):
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')

def forward(self, x):
from pdb import set_trace
x1 = x[:,0:3,:,:]
x2 = x[:,3::,:,:]

Expand Down
76 changes: 76 additions & 0 deletions run_pair.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import os
import torch
import numpy as np
import argparse

from models import FlowNet2 # the path is depended on where you create this module
from utils.frame_utils import read_gen # the path is depended on where you create this module
from PIL import Image
from math import ceil

from pdb import set_trace

if __name__ == '__main__':
# obtain the necessary args for construct the flownet framework
parser = argparse.ArgumentParser()
parser.add_argument('--fp16', action='store_true', help='Run model in pseudo-fp16 mode (fp16 storage fp32 math).')
parser.add_argument("--rgb_max", type=float, default=255.)
args = parser.parse_args()

# initial a Net
net = FlowNet2(args).cuda()
# load the state_dict
state_dict = torch.load("./FlowNet2_checkpoint.pth.tar")
net.load_state_dict(state_dict["state_dict"])

# load the image pair, you can find this operation in dataset.py
img1_fn = "./flownet2-docker/data/0000000-imgL.png"
img2_fn = "./flownet2-docker/data/0000001-imgL.png"
pim1 = read_gen(img1_fn)
pim2 = read_gen(img2_fn)
# return numpy array with shape h,w,3

img1 = Image.open(img1_fn)
img2 = Image.open(img2_fn)
assert(img1.size == img2.size)
width, height = img1.size
divisor = 64.
adapted_width = int(ceil(width/divisor) * divisor)
adapted_height = int(ceil(height/divisor) * divisor)
img1 = img1.resize((adapted_width,adapted_height),Image.BICUBIC)
img2 = img1.resize((adapted_width,adapted_height),Image.BICUBIC)
pim1 = np.array(img1)
pim2 = np.array(img2)

assert(pim1.shape == pim2.shape)
images = [pim1, pim2]
images = np.array(images).transpose(3, 0, 1, 2)
im = torch.from_numpy(images.astype(np.float32)).unsqueeze(0).cuda()

# process the image pair to obtian the flow
result = net(im).squeeze()
data = result.data.cpu().numpy().transpose(1, 2, 0)

cmp_path = "./flownet2-docker/flow.flo"
if os.path.isfile( cmp_path):
cmp_data = read_gen(cmp_path)
# resize channels individually
if width != adapted_width or height != adapted_height:
flow_u = Image.fromarray(data[:,:,0]).resize((width, height))
flow_v = Image.fromarray(data[:,:,1]).resize((width, height))
data = np.stack((flow_u,flow_v),axis=2)
print("Doing comparison: ", np.linalg.norm(data - cmp_data) )

# save flow, I reference the code in scripts/run-flownet.py in flownet2-caffe project
def writeFlow(name, flow):
f = open(name, 'wb')
f.write('PIEH'.encode('utf-8'))
np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
flow = flow.astype(np.float32)
flow.tofile(f)
f.flush()
f.close()

data = result.data.cpu().numpy().transpose(1, 2, 0)
writeFlow("./flow.flo", data)
print("wrote flow.flo")
2 changes: 1 addition & 1 deletion utils/frame_utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import numpy as np
from os.path import *
from scipy.misc import imread
from imageio import imread
from . import flow_utils

def read_gen(file_name):
Expand Down
4 changes: 1 addition & 3 deletions utils/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,12 @@
from os.path import *
import numpy as np
from inspect import isclass
from pytz import timezone
from datetime import datetime
import inspect
import torch

def datestr():
pacific = timezone('US/Pacific')
now = datetime.now(pacific)
now = datetime.now()
return '{}{:02}{:02}_{:02}{:02}'.format(now.year, now.month, now.day, now.hour, now.minute)

def module_to_dict(module, exclude=[]):
Expand Down