Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Auxiliary scripts used to perform experiments with multiple models and security. #2

Open
wants to merge 11 commits into
base: master
Choose a base branch
from
4 changes: 2 additions & 2 deletions include/puma.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ struct CompilerOptions {
enum GraphPartitioningScheme { GP_ROW_MAJOR, GP_COL_MAJOR, GP_KAHIP, GP_RANDOM };

GraphPartitioningScheme gp_ = GP_ROW_MAJOR;
bool coalesceMVMOperations_ = true;
bool printDebugInfo_ = false;
bool coalesceMVMOperations_ = false;
bool printDebugInfo_ = true;

};

Expand Down
70 changes: 39 additions & 31 deletions src/codegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,25 +31,31 @@ void CodeGenerator::codegen() {

// TODO: Define ABI for laying out the binary

for(unsigned int pTile = 0; pTile < placer_->getNPTiles(); ++pTile) {
unsigned int nTile = placer_->getNPTiles();
if (nTile < MIN_N_TILE)
nTile = MIN_N_TILE;

for(unsigned int pTile = 0; pTile < nTile; ++pTile) {

// Generate code for the tile
std::stringstream fileName;
fileName << model_->getName() << "-tile" << pTile << ".puma";
std::ofstream tileCode;
tileCode.open(fileName.str());
std::list<TileOperation*>& tileOperationList = linearizer_->getTileOperationList(pTile);
for(TileOperation* tileOp : tileOperationList) {
if(SendOperation* send = dynamic_cast<SendOperation*>(tileOp)) {
tileCode << codegen(send);
} else if(ReceiveOperation* recv = dynamic_cast<ReceiveOperation*>(tileOp)) {
tileCode << codegen(recv);
} else if(WriteInputOperation* write = dynamic_cast<WriteInputOperation*>(tileOp)) {
tileCode << codegen(write);
} else if(ReadOutputOperation* read = dynamic_cast<ReadOutputOperation*>(tileOp)) {
tileCode << codegen(read);
} else {
assert(0 && "Unsupported operation for code generation!");
if (pTile < placer_->getNPTiles()) {
std::list<TileOperation*>& tileOperationList = linearizer_->getTileOperationList(pTile);
for(TileOperation* tileOp : tileOperationList) {
if(SendOperation* send = dynamic_cast<SendOperation*>(tileOp)) {
tileCode << codegen(send);
} else if(ReceiveOperation* recv = dynamic_cast<ReceiveOperation*>(tileOp)) {
tileCode << codegen(recv);
} else if(WriteInputOperation* write = dynamic_cast<WriteInputOperation*>(tileOp)) {
tileCode << codegen(write);
} else if(ReadOutputOperation* read = dynamic_cast<ReadOutputOperation*>(tileOp)) {
tileCode << codegen(read);
} else {
assert(0 && "Unsupported operation for code generation!");
}
}
}
tileCode << "halt()" << std::endl;
Expand All @@ -61,24 +67,26 @@ void CodeGenerator::codegen() {
fileName << model_->getName() << "-tile" << pTile << "-core" << pCore << ".puma";
std::ofstream coreCode;
coreCode.open(fileName.str());
std::list<CoreOperation*>& coreOperationList = linearizer_->getCoreOperationList(pTile, pCore);
for(CoreOperation* coreOp : coreOperationList) {
if(MVMOperation* mvm = dynamic_cast<MVMOperation*>(coreOp)) {
coreCode << codegen(mvm);
} else if(TrainingMatrixOperation* trainOp = dynamic_cast<TrainingMatrixOperation*>(coreOp)) {
coreCode << codegen(trainOp);
} else if(ALUVectorOperation* aluOp = dynamic_cast<ALUVectorOperation*>(coreOp)) {
coreCode << codegen(aluOp);
} else if(SetImmediateOperation* seti = dynamic_cast<SetImmediateOperation*>(coreOp)) {
coreCode << codegen(seti);
} else if(CopyOperation* copy = dynamic_cast<CopyOperation*>(coreOp)) {
coreCode << codegen(copy);
} else if(LoadOperation* load = dynamic_cast<LoadOperation*>(coreOp)) {
coreCode << codegen(load);
} else if(StoreOperation* store = dynamic_cast<StoreOperation*>(coreOp)) {
coreCode << codegen(store);
} else {
assert(0 && "Unsupported operation for code generation!");
if (pTile < placer_->getNPTiles()) {
std::list<CoreOperation*>& coreOperationList = linearizer_->getCoreOperationList(pTile, pCore);
for(CoreOperation* coreOp : coreOperationList) {
if(MVMOperation* mvm = dynamic_cast<MVMOperation*>(coreOp)) {
coreCode << codegen(mvm);
} else if(TrainingMatrixOperation* trainOp = dynamic_cast<TrainingMatrixOperation*>(coreOp)) {
coreCode << codegen(trainOp);
} else if(ALUVectorOperation* aluOp = dynamic_cast<ALUVectorOperation*>(coreOp)) {
coreCode << codegen(aluOp);
} else if(SetImmediateOperation* seti = dynamic_cast<SetImmediateOperation*>(coreOp)) {
coreCode << codegen(seti);
} else if(CopyOperation* copy = dynamic_cast<CopyOperation*>(coreOp)) {
coreCode << codegen(copy);
} else if(LoadOperation* load = dynamic_cast<LoadOperation*>(coreOp)) {
coreCode << codegen(load);
} else if(StoreOperation* store = dynamic_cast<StoreOperation*>(coreOp)) {
coreCode << codegen(store);
} else {
assert(0 && "Unsupported operation for code generation!");
}
}
}
coreCode << "hlt()" << std::endl;
Expand Down
6 changes: 3 additions & 3 deletions src/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,9 @@

/* Constants */
#define MVMU_DIM 128
#define N_CONSTANT_MVMUS_PER_CORE 6
#define N_TRAINING_MVMUS_PER_CORE 2
#define N_CONSTANT_MVMUS_PER_CORE 2
#define N_TRAINING_MVMUS_PER_CORE 0
#define MIN_N_TILE 0
#define N_CORES_PER_TILE 8
#define MAX_LOAD_STORE_WIDTH 16
#define MAX_SEND_RECV_WIDTH 16
Expand Down Expand Up @@ -104,4 +105,3 @@ class CodeGenerator;
class ModelInstanceImpl;

#endif

44 changes: 44 additions & 0 deletions test/encryptall.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.fernet import Fernet
import sys,os,shutil

key ="0aaTE5OgWdw9nDlhCucpTzL_97-vYRnSamxQafDAUUc="
#print(key)
f = Fernet(key)
done=0
path=sys.argv[1]
os.chdir(path)
if os.path.exists(path+'/crypto'):
shutil.rmtree(path+'/crypto')
os.makedirs(str(path+"/crypto"))
#os.chdir(path+'/crypto')
for root,dirs,files in os.walk(path):
for name in dirs:
if(name=='crypto'): continue
if(os.path.exists('crypto/'+str(name))):
done=1
break
os.makedirs('crypto/'+str(name))
path1=os.path.join(path, name)
#print(path1)
for root1,dirs1,files1 in os.walk(path1):
#print(files1)
for file in files1:
#print(root1)
# print(file)
with open(path1+"/"+file, 'rb') as myfile:

data = myfile.read()
secretdata=f.encrypt(data)
with open(str(path+"/crypto/"+name+"/"+file),'w') as secretfile:
secretfile.write(secretdata)
if(done==1):
break



#print(data)



72 changes: 72 additions & 0 deletions test/generate-py.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# Copy this file to compiler test folder where the .npy files are generated.
# Update SIMULATOR_PATH value and execute it.

SIMULATOR_PATH="/home/plinio/puma-simulator" # simulator root path

if [[ $SIMULATOR_PATH == "" ]] ; then
print "Error, missing simulator path."
exit
fi


PYTHON=python2

for g in *.puma; do
echo $g
#Parse tile and core ids
dataset="$( cut -d '-' -f 1 <<< "$g" )"
#echo $dataset
tileid=$(echo $g | grep -o -E 'tile[0-9]+' | head -1)

if [[ $g == *"core"* ]]; then
#coredid=$(echo $g | cut -d " " -f $N)
coreid=$(echo "${g: -6}")
coreid="$( cut -d '.' -f 1 <<< "$coreid" )"
filename='core_imem'$coreid
else
filename='tile_imem'
fi
mkdir -p $dataset/$tileid
dir=$dataset/$tileid
f=$dataset/$tileid/$g

echo "" > $f.py
echo "import sys, os" >> $f.py
echo "import numpy as np" >> $f.py
echo "import math" >> $f.py
echo "sys.path.insert (0, '$SIMULATOR_PATH/include/')" >> $f.py
echo "sys.path.insert (0, '$SIMULATOR_PATH/src/')" >> $f.py
echo "sys.path.insert (0, '$SIMULATOR_PATH/')" >> $f.py
echo "from data_convert import *" >> $f.py
echo "from instrn_proto import *" >> $f.py
echo "from tile_instrn_proto import *" >> $f.py
echo "dict_temp = {}" >> $f.py
echo "dict_list = []" >> $f.py
while read line
do
echo "i_temp = i_$line" >> $f.py
echo "dict_list.append(i_temp.copy())" >> $f.py
done < $g
echo "filename = '$dir/$filename.npy'" >> $f.py
echo "np.save(filename, dict_list)" >> $f.py
$PYTHON $f.py
done


cp input.py $dataset/input.py

echo $dataset
cd $dataset
$PYTHON input.py

if [[ $1 == *"-c"* ]]; then
$PYTHON $SIMULATOR_PATH/Security/encrypter.py $2 $PWD
rm -r tile*
fi

if [[ $1 == *"-a"* ]]; then
$PYTHON $SIMULATOR_PATH/Security/generateMAC.py $2 $PWD
fi

cd ..
$PYTHON populate.py
11 changes: 11 additions & 0 deletions test/input.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import numpy as np
import random
random.seed(1)
num_inputs = 30000

data = np.random.randn(num_inputs)
counter = 100* np.ones (num_inputs)
valid = np.ones (num_inputs)
inp = {'data':data, 'counter': counter, 'valid':valid}
filename = 'input.npy'
np.save (filename, inp)
96 changes: 96 additions & 0 deletions test/lenet.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
/*
* Copyright (c) 2019 IMPACT Research Group, University of Illinois.
* All rights reserved.
*
* This file is covered by the LICENSE.txt license file in the root directory.
*
*/

#include <assert.h>
#include <string>
#include <vector>

#include "puma.h"
#include "conv-layer.h"
#include "fully-connected-layer.h"

void isolated_fully_connected_layer(Model model, std::string layerName, unsigned int in_size, unsigned int out_size) {

// Input vector
auto in = InputVector::create(model, "in", in_size);

// Output vector
auto out = OutputVector::create(model, "out", out_size);

// Layer
out = fully_connected_layer(model, layerName, in_size, out_size, in);

}

int main() {

Model model = Model::create("lenet");

// Layer 1 (convolution) configurations
unsigned int k_size_x1 = 5;
unsigned int k_size_y1 = 5;
unsigned int in_size_x1 = 32;
unsigned int in_size_y1 = 32;
unsigned int in_channels1 = 1;
unsigned int out_channels1 = 6;
unsigned int max_pool_size_x1 = 2;
unsigned int max_pool_size_y1 = 2;

// Layer 2 (convolution) configurations
unsigned int k_size_x2 = 5;
unsigned int k_size_y2 = 5;
unsigned int in_size_x2 = in_size_x1/2; // 16
unsigned int in_size_y2 = in_size_y1/2;
unsigned int in_channels2 = out_channels1;
unsigned int out_channels2 = 16;
unsigned int max_pool_size_x2 = 2;
unsigned int max_pool_size_y2 = 2;

// Layer 3 (convolution) configurations
unsigned int k_size_x3 = 5;
unsigned int k_size_y3 = 5;
unsigned int in_size_x3 = in_size_x2/2; // 8
unsigned int in_size_y3 = in_size_y2/2;
unsigned int in_channels3 = out_channels2;
unsigned int out_channels3 = 32;

// Input
unsigned int in_size_x = in_size_x2;
unsigned int in_size_y = in_size_y2;
unsigned int in_channels = in_channels2;
auto in_stream = InputImagePixelStream::create(model, "in_stream", in_size_x, in_size_y, in_channels);

// Output
unsigned int out_size_x = in_size_x3; // 8
unsigned int out_size_y = in_size_y3; // 8
unsigned int out_channels = out_channels2;
auto out_stream = OutputImagePixelStream::create(model, "out_stream", out_size_x, out_size_y, out_channels);

// Layer 4 (fully-connected) configurations
unsigned int in_size4 = 512;
unsigned int out_size4 = 10;

// Define network
// auto out1 = convmax_layer(model, "layer" + std::to_string(1), k_size_x1, k_size_y1, in_size_x1, in_size_y1, in_channels1, out_channels1, max_pool_size_x1, max_pool_size_y1, in_stream);
// auto out1 = conv_layer(model, "layer" + std::to_string(1), k_size_x1, k_size_y1, in_size_x1, in_size_y1, in_channels1, out_channels1, in_stream);
auto out2 = convmax_layer(model, "layer" + std::to_string(2), k_size_x2, k_size_y2, in_size_x2, in_size_y2, in_channels2, out_channels2, max_pool_size_x2, max_pool_size_y2, in_stream);
// auto out3 = conv_layer(model, "layer" + std::to_string(3), k_size_x3, k_size_y3, in_size_x3, in_size_y3, in_channels3, out_channels3, out2);
// out_stream = out3;
out_stream = out2;
// isolated_fully_connected_layer(model, "layer" + std::to_string(4), in_size4, out_size4);

// Compile
model.compile();

// Destroy model
model.destroy();

return 0;

}

Loading