Skip to content

Commit

Permalink
First commit
Browse files Browse the repository at this point in the history
  • Loading branch information
nikostsagk committed Nov 7, 2019
0 parents commit f77b084
Show file tree
Hide file tree
Showing 10 changed files with 1,282 additions and 0 deletions.
Binary file added .DS_Store
Binary file not shown.
1 change: 1 addition & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
*.psd filter=lfs diff=lfs merge=lfs -text
75 changes: 75 additions & 0 deletions adam.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
function [w, state] = adam(w, state, grad, opts, lr)
%ADAM
% Adam solver for use with CNN_TRAIN and CNN_TRAIN_DAG
%
% See [Kingma et. al., 2014](http://arxiv.org/abs/1412.6980)
% | ([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
%
% If called without any input argument, returns the default options
% structure. Otherwise provide all input arguments.
%
% W is the vector/matrix/tensor of parameters. It can be single/double
% precision and can be a `gpuArray`.
%
% STATE is as defined below and so are supported OPTS.
%
% GRAD is the gradient of the objective w.r.t W
%
% LR is the learning rate, referred to as \alpha by Algorithm 1 in
% [Kingma et. al., 2014].
%
% Solver options: (opts.train.solverOpts)
%
% `beta1`:: 0.9
% Decay for 1st moment vector. See algorithm 1 in [Kingma et.al. 2014]
%
% `beta2`:: 0.999
% Decay for 2nd moment vector
%
% `eps`:: 1e-8
% Additive offset when dividing by state.v
%
% The state is initialized as 0 (number) to start with. The first call to
% this function will initialize it with the default state consisting of
%
% `m`:: 0
% First moment vector
%
% `v`:: 0
% Second moment vector
%
% `t`:: 0
% Global iteration number across epochs
%
% This implementation borrowed from torch optim.adam

% Copyright (C) 2016 Aravindh Mahendran.
% All rights reserved.
%
% This file is part of the VLFeat library and is made available under
% the terms of the BSD license (see the COPYING file).

if nargin == 0 % Returns the default solver options
w = struct('beta1', 0.9, 'beta2', 0.999, 'eps', 1e-8) ;
return ;
end

if isequal(state, 0) % start off with state = 0 so as to get default state
state = struct('m', 0, 'v', 0, 't', 0);
end

% update first moment vector `m`
state.m = opts.beta1 * state.m + (1 - opts.beta1) * grad ;

% update second moment vector `v`
state.v = opts.beta2 * state.v + (1 - opts.beta2) * grad.^2 ;

% update the time step
state.t = state.t + 1 ;

% This implicitly corrects for biased estimates of first and second moment
% vectors
lr_t = lr * (((1 - opts.beta2^state.t)^0.5) / (1 - opts.beta1^state.t)) ;

% Update `w`
w = w - lr_t * state.m ./ (state.v.^0.5 + opts.eps) ;
131 changes: 131 additions & 0 deletions cnn_signature_independent.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
function [net, stats] = cnn_writer_independent(varargin)

run 'matlab/vl_setupnn.m'


% Parameter defaults.
opts.train.batchSize = 128 ;
opts.train.numEpochs = 30 ;
opts.train.continue = true ;
opts.train.gpus = 1 ;
opts.train.learningRate = 0.001 ;
opts.expDir = fullfile(vl_rootnn, 'data','CEDAR-adam-') ;
opts.dataDir = fullfile(vl_rootnn, 'data', 'D_set.mat');
[opts, varargin] = vl_argparse(opts, varargin) ;

opts = vl_argparse(opts, varargin) ;

% --------------------------------------------------------------------
% Load data
% --------------------------------------------------------------------

load(opts.dataDir);
imdb = D;
clear D;

%---------------------------------------------------------------------
% NETWORK
%---------------------------------------------------------------------

f = 1/100;
net.layers = {} ;
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{f*randn(11,11,1,32, 'single'), zeros(1, 32, 'single')}}, ...
'stride', 2, ...
'pad', 5);
net.layers{end+1} = struct('type','relu');
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{f*randn(3,3,32,32, 'single'), zeros(1, 32, 'single')}}, ...
'stride', 1, ...
'pad', 1);
net.layers{end+1} = struct('type','relu');
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{f*randn(3,3,32,64, 'single'), zeros(1, 64, 'single')}}, ...
'stride', 1, ...
'pad', 1);
net.layers{end+1} = struct('type','relu');
net.layers{end+1} = struct('type', 'pool', ...
'method', 'max', ...
'pool', [2 2], ...
'stride', 2);
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{f*randn(3,3,64,128, 'single'), zeros(1, 128, 'single')}}, ...
'stride', 1, ...
'pad', 1);
net.layers{end+1} = struct('type','relu');
net.layers{end+1} = struct('type', 'pool', ...
'method', 'max', ...
'pool', [2 2], ...
'stride', 2);
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{f*randn(3,3,128,256, 'single'), zeros(1, 256, 'single')}}, ...
'stride', 1, ...
'pad', 1);
net.layers{end+1} = struct('type','relu');
net.layers{end+1} = struct('type', 'pool', ...
'method', 'max', ...
'pool', [2 2], ...
'stride', 2);
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{f*randn(3,3,256,512, 'single'), zeros(1, 512, 'single')}}, ...
'stride', 1, ...
'pad', 1);
net.layers{end+1} = struct('type', 'pool', ...
'method', 'avg', ...
'pool', [5 7], ...
'stride', 5, ...
'pad', 0);
%-------------------------> FEATURE EXTRACTION <---------------------------
net.layers{end+1} = struct('type', 'conv', ...
'weights', {{f*randn(1,1,512,45, 'single'), zeros(1, 45, 'single')}}, ...
'stride', 1, ...
'pad', 0);
net.layers{end+1} = struct('type', 'loss');

% Fill in any values we didn't specify explicitly
net.meta.inputSize = [80 120 1];
net.meta.trainOpts.learningRate = 0.001; %[0.3*ones(1,20) 0.01*ones(1,20) 0.001*ones(1,20) 0.0001*ones(1,20)];
net.meta.trainOpts.weightDecay = 0.0005;
net.meta.trainOpts.momentum = 0.3;
net.meta.trainOpts.numEpochs = 30;
net.meta.trainOpts.batchSize = 128;

net = vl_simplenn_tidy(net) ;


% --------------------------------------------------------------------
% Train
% --------------------------------------------------------------------

use_gpu = ~isempty(opts.train.gpus) ;

% Start training
[net, stats] = cnn_train(net, imdb, @(imdb, batch) getBatch(imdb, batch, use_gpu), ...
'train', find(imdb.images.set == 1), 'val', find(imdb.images.set == 2), opts.train) ;

%---------------------------------------------------------------------
% Visualize the learned filters
%---------------------------------------------------------------------
% figure(2); vl_tshow(net.layers{1}.weights{1}); title('Conv1 filters');
% figure(3); vl_tshow(net.layers{3}.weights{1}); title('Conv2 filters');
% figure(4); vl_tshow(net.layers{5}.weights{1}); title('Conv3 filters');
% figure(5); vl_tshow(net.layers{8}.weights{1}); title('Conv4 filters');
% figure(6); vl_tshow(net.layers{11}.weights{1}); title('Conv5 filters');
% figure(7); vl_tshow(net.layers{14}.weights{1}); title('Conv6 filters');
% figure(8); vl_tshow(net.layers{16}.weights{1}); title('Conv7 filters');


% --------------------------------------------------------------------
function [images, labels] = getBatch(imdb, batch, use_gpu)
% --------------------------------------------------------------------
% This is where we return a given set of images (and their labels) from
% our imdb structure.
% If the dataset was too large to fit in memory, getBatch could load images
% from disk instead (with indexes given in 'batch').

images = imdb.images.data(:,:,:,batch) ;
labels = imdb.images.labels(batch) ;

if use_gpu
images = gpuArray(images) ;
end
Loading

0 comments on commit f77b084

Please sign in to comment.