Skip to content

Commit

Permalink
Improved Conv Alu + Testbenches
Browse files Browse the repository at this point in the history
  • Loading branch information
tqml committed Jun 20, 2020
1 parent cba075a commit 86ee6e4
Show file tree
Hide file tree
Showing 7 changed files with 248 additions and 80 deletions.
106 changes: 72 additions & 34 deletions vivado/NN_IP/EggNet_1.0/concepts/conv_arch.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
import numpy as np
import EggNet


def convolution2d(image, kernel, bias):
m, n = kernel.shape
if (m == n):
y, x = image.shape
y = y - m + 1
x = x - m + 1
new_image = np.zeros((y,x))
new_image = np.zeros((y, x))
for i in range(y):
for j in range(x):
new_image[i][j] = np.sum(image[i:i+m, j:j+m]*kernel) + bias
Expand All @@ -15,40 +17,37 @@ def convolution2d(image, kernel, bias):

# Define Input Image Data
I = np.array([
1,4,7,
2,5,8,
3,6,9,
]).reshape((3,3))

1, 4, 7,
2, 5, 8,
3, 6, 9,
]).reshape((3, 3))


# Define Weights
W = np.array([
2,2,2,
2,2,2,
2,2,2,
]).reshape((3,3))

Ipad = np.pad(I, pad_width=(1,1))
Y = convolution2d(Ipad,W,0)
2, 2, 2,
2, 2, 2,
2, 2, 2,
]).reshape((3, 3))

Y1 = np.zeros(shape=(3,5))
Y2 = np.zeros(shape=(3,3))
Ipad = np.pad(I, pad_width=(1, 1))
Y = convolution2d(Ipad, W, 0)

Y2[1,0] = np.sum(I[:,:-1]*W[:,:-1])
Y2[1,1] = np.sum(I*W)
Y2[1,2] = np.sum(I[:,1:]*W[:,1:])
Y1 = np.zeros(shape=(3, 5))
Y2 = np.zeros(shape=(3, 3))

Y2[1, 0] = np.sum(I[:, :-1]*W[:, :-1])
Y2[1, 1] = np.sum(I*W)
Y2[1, 2] = np.sum(I[:, 1:]*W[:, 1:])


w1 = W[:,0]
w2 = W[:,1]
w3 = W[:,2]

i1 = I[:,0]
i2 = I[:,1]
i3 = I[:,2]
w1 = W[:, 0]
w2 = W[:, 1]
w3 = W[:, 2]

i1 = I[:, 0]
i2 = I[:, 1]
i3 = I[:, 2]


a3_1 = 0
Expand All @@ -58,9 +57,9 @@ def convolution2d(image, kernel, bias):
type = 'conv'
# type = 'fc'

for i in range(0,5):
i_patch = Ipad[1:-1,i]
for i in range(0, 5):
i_patch = Ipad[1:-1, i]

# MUL
z1_1 = w1 * i_patch
z1_2 = w2 * i_patch
Expand All @@ -71,10 +70,9 @@ def convolution2d(image, kernel, bias):
z2_2 = np.sum(z1_2)
z2_3 = np.sum(z1_3)


if type == 'conv':
# ACCUM + SHIFT
Y1[1,i] = a3_3 + z2_3
Y1[1, i] = a3_3 + z2_3
a3_3 = a3_2 + z2_2
a3_2 = a3_1 + z2_1
a3_1 = 0
Expand All @@ -85,10 +83,50 @@ def convolution2d(image, kernel, bias):
a3_1 = a3_1 + z2_1
else:
raise Exception()





print(Y)
print(Y1)
print(Y1)


def conv_alu(i_patch, mW, accum):
"""ALU of Eggnet
Args:
i_patch (array): Nx1 array
mW (matrix): MxN matrix
accum (array): Mx1 array
Returns:
float: current output y
array: accumulator for next iteration
"""
# MUL
# z1_1 = w1 * i_patch
# z1_2 = w2 * i_patch
# z1_3 = w3 * i_patch
z1 = i_patch * mW

# ADD
# z2_1 = np.sum(z1_1)
# z2_2 = np.sum(z1_2)
# z2_3 = np.sum(z1_3)
z2 = np.sum(z1, axis=1)

y = accum[-1] + z2[-1]
accum[1:] = accum[:-1] + z2[:-1]
accum[0] = 0
return y, accum


accum = np.zeros(shape=(6,))
W = np.random.rand(6,3)
for i in range(0, 5):
i_patch = Ipad[1:-1, i]
y, accum = conv_alu(i_patch, W, accum)
print(y)
# print(accum)


Y = convolution2d(Ipad, kernel=W, bias=0)

102 changes: 84 additions & 18 deletions vivado/NN_IP/EggNet_1.0/concepts/nn_conv_kernel.vhd
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,79 @@ library ieee;
use ieee.std_logic_1164.all;
use ieee.numeric_std.all;

-- General ALU Operation of EggNet
--
--
-- 3 General Phases:
-- 1) Multiply
-- 2) Add
-- 3) Accum
--
-- The input can be pipelined/streamed through the ALU. Depending on the number of MUL and ADD
-- operations that can be performed in a single step, the entity is generic adjustable.
--
-- Also the result of the ADD operation can be added and stored to the accumulator on the same line
-- which is useful for general matrix multiplication. It can also be combined with the accumulator
-- on the same line and stored in the accumulator below. This then implements a convolutional
-- operation.
--
--
-- b Bias Input
-- Mul Add Accum +
-- |
-- +-+ v
-- |-| +-+ +++
-- +----> |-| +------> + +-----+ | |
-- | |-| +-+ | +++
-- I x | +-+ | |
-- M | +----->+
-- A +-+ | |
-- G |-| | +-+ v
-- E |-| | |-| +-+ +++
-- |-| +---------> |-| +------> +-+------+ | |
-- P |-| | |-| +-+ | +++
-- A +-+ | +-+ | |
-- T | +---->+
-- C | |
-- H | +-+ v
-- | |-| +-+ +++
-- +----> |-| +-----> +-+------+ | |
-- |-| +-+ | +++
-- +-+ | |
-- +---->+
-- |
-- v
-- Convolution
-- Output y

use work.nnpkg.all;
entity nn_conv_kernel is

generic (
FILTER_WIDTH : natural := 3;
FILTER_HEIGHT : natural := 3;
INPUT_BITS : natural := 8;
CONV_MODE_ENABLE : boolean := true;
WEIGHT_BITS : natural := 8;
OUTPUT_BITS : natural := INPUT_BITS + WEIGHT_BITS
FILTER_WIDTH : natural := 3; -- Filter/Kernel Width
FILTER_HEIGHT : natural := 3; -- Filter/Kernel Height
INPUT_BITS : natural := 8; -- Bits of the input vector x
WEIGHT_BITS : natural := 8; -- Bits used for the weights
OUTPUT_BITS : natural := INPUT_BITS + WEIGHT_BITS
-- OUTPUT_BITS : natural := INPUT_BITS + nlog2(PARALLEL_INPUT) -- prevent overflow
);

port (
clk : in std_logic;
rst : in std_logic;
-- Control signals
clk : in std_logic;
rst : in std_logic;
ctl_alu_type : in nn_alu_type; -- Defines if Convolution Mode or Matrix Mode should be performed

-- Inputs
w_i : in vec2d_t(0 to FILTER_HEIGHT - 1, 0 to FILTER_WIDTH - 1)(WEIGHT_BITS - 1 downto 0);
x_i : in vec1d_t(0 to FILTER_HEIGHT - 1)(INPUT_BITS - 1 downto 0);
b_i : in std_logic_vector(OUTPUT_BITS - 1 downto 0); -- Added bias (convolution only )

y_o : out std_logic_vector(OUTPUT_BITS - 1 downto 0)

-- Outputs
y_o : out std_logic_vector(OUTPUT_BITS - 1 downto 0);
ya_o : out vec1d_t(0 to FILTER_WIDTH - 1)(OUTPUT_BITS - 1 downto 0)
);

end entity nn_conv_kernel;

architecture rtl of nn_conv_kernel is
Expand All @@ -40,14 +90,14 @@ architecture rtl of nn_conv_kernel is
begin

update_weights : process (w_i)
-- # TODO This is a bit ugly and maybe it's not a good idea to have an array in the sensitivity list
begin
for i in 0 to FILTER_WIDTH - 1 loop
for j in 0 to FILTER_HEIGHT - 1 loop
w_n(i)(j) <= w_i(j, i);
end loop;
end loop;
end process; -- update_weights

--- Generate ALU's
gen_alu : for i in 0 to FILTER_WIDTH - 1 generate
nn_alu_0 : entity work.nn_alu
Expand All @@ -66,18 +116,34 @@ begin
);
end generate;

process (clk, rst)
-- ya_o directly reflects the accumulator
ya_o <= a_n;
op_process : process (clk, rst)
begin
if rst = '1' then
a_n <= a_n_ZERO;
z_n <= a_n_ZERO;
elsif rising_edge(clk) then

a_n(0) <= (others => '0');
y_o <= std_logic_vector(unsigned(a_n(FILTER_WIDTH - 1)) + unsigned(z_n(FILTER_WIDTH - 1)));
accum : for i in 1 to FILTER_WIDTH - 1 loop
a_n(i) <= std_logic_vector(unsigned(a_n(i - 1)) + unsigned(z_n(i - 1)));
end loop; -- accum
-- Independent of operation, simply the last output. (to avoid generating a latch)
y_o <= std_logic_vector(unsigned(a_n(FILTER_WIDTH - 1)) + unsigned(z_n(FILTER_WIDTH - 1)));

if ctl_alu_type = ALU_CONV then

-- Convoltional operation
a_n(0) <= b_i;
accum_conv : for i in 1 to FILTER_WIDTH - 1 loop
a_n(i) <= std_logic_vector(unsigned(a_n(i - 1)) + unsigned(z_n(i - 1)));
end loop; -- accum

else
-- Matrix Multiplication
accum_mat : for i in 0 to FILTER_WIDTH - 1 loop
a_n(i) <= std_logic_vector(unsigned(a_n(i)) + unsigned(z_n(i)));
end loop; -- accum

end if;

end if;
end process;

end architecture;
3 changes: 3 additions & 0 deletions vivado/NN_IP/EggNet_1.0/concepts/nnpkg.vhd
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ package nnpkg is
type vec1d_t is array (natural range <>) of std_logic_vector;
type vec2d_t is array (natural range <>, natural range <>) of std_logic_vector;


type nn_alu_type is (ALU_CONV, ALU_FC);

-- LOG2 for natural numbers
function nlog2 (x : natural) return natural;

Expand Down
11 changes: 11 additions & 0 deletions vivado/NN_IP/EggNet_1.0/concepts/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,5 +28,16 @@
lib.add_source_files(ROOT / "*.vhd")



tb_nn_alu = lib.test_bench('tb_nn_alu')
tb_nn_alu.set_sim_option('ghdl.sim_flags', [f'--vcd={ROOT / "tb_nn_alu.vcd"}'])

tb_nn_conv_kernel = lib.test_bench('tb_nn_conv_kernel')
tb_nn_conv_kernel.set_sim_option('ghdl.sim_flags', [
f'--vcd={ROOT / "tb_nn_conv_kernel.vcd"}',
f'--read-wave-opt={ROOT/"tb_nn_conv_kernel_vcd_conf.txt"}'])



if __name__ == "__main__":
VU.main()
2 changes: 1 addition & 1 deletion vivado/NN_IP/EggNet_1.0/concepts/tb_nn_alu.vhd
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use IEEE.math_real.log2;
library vunit_lib;
context vunit_lib.vunit_context;

library work;

use work.nnpkg.all;

entity tb_nn_alu is
Expand Down
Loading

0 comments on commit 86ee6e4

Please sign in to comment.