Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Formatted code according to PEP 8 rules #25

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion Classifier/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
# get the data
label_map = ['Anger', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']


def getData(filename):
# images are 48x48
# N = 35887
Expand All @@ -19,4 +20,4 @@ def getData(filename):
X.append([int(p) for p in row[1].split()])

X, Y = np.array(X) / 255.0, np.array(Y)
return X, Y
return X, Y
9 changes: 6 additions & 3 deletions Classifier/preprocess.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
from dataset import getData
from utils import balance_class, give_train_test_splits

def preprocess_data(filename='/content/drive/My Drive/fer2013.csv', image_size=(48, 48)):

def preprocess_data(filename='/content/drive/My Drive/fer2013.csv',
image_size=(48, 48)):
X, Y = getData(filename)
num_class = len(set(Y))

# balance = balance_class(Y)

N, D = X.shape
X = X.reshape(N, image_size, 1)

return give_train_test_splits(X, Y, test_size=0.1, random_state=0), num_class
return give_train_test_splits(X, Y, test_size=0.1,
random_state=0), num_class
36 changes: 22 additions & 14 deletions Classifier/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,30 +16,36 @@
IMG_SIZE = (48, 48)
# NUM_CLASSES = 7

X_train, X_test, y_train, y_test, NUM_CLASSES = preprocess_data(filename='/content/drive/My Drive/fer2013.csv',
image_size=IMG_SIZE)
X_train, X_test, y_train, y_test,
NUM_CLASSES = preprocess_data(filename='/content/drive/My Drive/fer2013.csv',
image_size=IMG_SIZE)

model = Sequential()

# 1st Convolution layer
model.add(give_convolution_layer(filters=64, kernel_size=(3,3),
padding='same', use_bn=False, dropout_percentage=None, pool_size=(2,2)))
model.add(give_convolution_layer(filters=64, kernel_size=(3, 3),
padding='same', use_bn=False,
dropout_percentage=None, pool_size=(2, 2)))

# 2nd Convolution layer
model.add(give_convolution_layer(filters=128, kernel_size=(3,3),
padding='same', use_bn=True, dropout_percentage=0.3, pool_size=(2,2)))
model.add(give_convolution_layer(filters=128, kernel_size=(3, 3),
padding='same', use_bn=True,
dropout_percentage=0.3, pool_size=(2, 2)))

# 3rd Convolution layer
model.add(give_convolution_layer(filters=256, kernel_size=(3,3),
padding='same', use_bn=True, dropout_percentage=0.3, pool_size=(2,2)))
model.add(give_convolution_layer(filters=256, kernel_size=(3, 3),
padding='same', use_bn=True,
dropout_percentage=0.3, pool_size=(2, 2)))

# 4th Convolution layer
model.add(give_convolution_layer(filters=512, kernel_size=(3,3),
padding='same', use_bn=True, dropout_percentage=0.3, pool_size=(2,2)))
model.add(give_convolution_layer(filters=512, kernel_size=(3, 3),
padding='same', use_bn=True,
dropout_percentage=0.3, pool_size=(2, 2)))

# 5th Convolution layer
model.add(give_convolution_layer(filters=1024, kernel_size=(3,3),
padding='same', use_bn=True, dropout_percentage=0.3, pool_size=(2,2)))
model.add(give_convolution_layer(filters=1024, kernel_size=(3, 3),
padding='same', use_bn=True,
dropout_percentage=0.3, pool_size=(2, 2)))

# Flattening
model.add(Flatten())
Expand All @@ -50,10 +56,12 @@
model.add(Dropout(0.2))

# Last layer
model.add(Dense(NUM_CLASSES, activation='softmax', kernel_initializer='glorot_normal'))
model.add(Dense(NUM_CLASSES, activation='softmax',
kernel_initializer='glorot_normal'))

# Compile model
model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=[categorical_accuracy])
model.compile(optimizer=Adam(learning_rate=0.0001),
loss='categorical_crossentropy', metrics=[categorical_accuracy])

# Print model summary
print(model.summary())
Expand Down
18 changes: 13 additions & 5 deletions Classifier/utils.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,34 @@
from sklearn.model_selection import train_test_split

# To see number of training data point available for each label


def balance_class(Y):
num_class = set(Y)
count_class = {}
for i in range(len(num_class)):
count_class[i] = sum([1 for y in Y if y == i])
return count_class


def give_train_test_splits(X, Y, test_size=0.1, random_state):
# Split in training set : validation set : testing set in 80:10:10
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=random_state)
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=test_size, random_state=random_state)
y_train = (np.arange(num_class) == y_train[:, None]).astype(np.float32)
y_test = (np.arange(num_class) == y_test[:, None]).astype(np.float32)
return X_train, X_test, y_train, y_test

def give_convolution_layer(filters, kernel_size=(3,3), padding='same', use_bn=True, dropout_percentage=None, pool_size=None):
sequential_model.add(Conv2D(filters, kernel_size, padding='same', activation='relu'))

def give_convolution_layer(filters, kernel_size=(3, 3), padding='same',
use_bn=True, dropout_percentage=None,
pool_size=None):
sequential_model.add(
Conv2D(filters, kernel_size, padding='same', activation='relu'))
if use_bn:
model.add(BatchNormalization())
if pool_size is not None:
if pool_size is not None:
model.add(MaxPooling2D(pool_size=pool_size))
if dropout_percentage is not None:
model.add(Dropout(dropout))
return sequential_model
return sequential_model
30 changes: 16 additions & 14 deletions model template/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,39 +8,41 @@


# importing libraries
from keras.layers import Dense , Dropout ,Flatten , MaxPooling2D
from keras.layers import Dense, Dropout, Flatten, MaxPooling2D
from keras.models import Model

# define model
# importing MobileNet_v2 for higher accuracy
from keras.applications import MobileNetV2
mobile = MobileNetV2(input_shape=(224,224,3),include_top=False,weights='imagenet')
mobile = MobileNetV2(input_shape=(224, 224, 3),
include_top=False, weights='imagenet')

#print(mobile.summary())
# print(mobile.summary())

# layer should not be change
for layer in mobile.layers:
layer.trainable = False
layer.trainable = False


# Make output layer of mobilenet
op_layer = mobile.output
op_layer = MaxPooling2D(pool_size=(6,6))(op_layer)
op_layer = MaxPooling2D(pool_size=(6, 6))(op_layer)
op_layer = Flatten()(op_layer)
op_layer = Dense(128,activation='relu')(op_layer)
op_layer = Dense(128, activation='relu')(op_layer)
op_layer = Dropout((0.5))(op_layer)
op_layer = Dense(2,activation= 'softmax')(op_layer)
op_layer = Dense(2, activation='softmax')(op_layer)

# Define model input and output
model = Model(inputs = mobile.input , outputs = op_layer)
model = Model(inputs=mobile.input, outputs=op_layer)

# compiling model
model.compile(optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = ['acc'])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['acc'])

# defining a new model as feature extractor for svm and xgboost
model_new = Model(inputs = mobile.input , outputs = op_layer)
model_new = Model(inputs=mobile.input, outputs=op_layer)

#compiling model
model_new.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['acc'])
# compiling model
model_new.compile(optimizer='adam',
loss='categorical_crossentropy', metrics=['acc'])
27 changes: 13 additions & 14 deletions model template/preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,18 +32,17 @@
# loop over the image paths
for imagePath in imagePaths:

# extract the class label from the filename
label = imagePath.split(os.path.sep)[-2]
# load the input image (150x150) and preprocess it
image = load_img(imagePath, target_size=(224, 224))
image = img_to_array(image)/255.


#image = preprocess_input(image)

# update the data and labels lists, respectively
data.append(image)
labels.append(label)
# extract the class label from the filename
label = imagePath.split(os.path.sep)[-2]
# load the input image (150x150) and preprocess it
image = load_img(imagePath, target_size=(224, 224))
image = img_to_array(image)/255.

# image = preprocess_input(image)

# update the data and labels lists, respectively
data.append(image)
labels.append(label)

# convert the data and labels to NumPy arrays
data = np.array(data, dtype="float32")
Expand All @@ -55,5 +54,5 @@
label_value = to_categorical(labels)

# store data and labels in memory address
np.save('data address',data)
np.save('labels address',labels)
np.save('data address', data)
np.save('labels address', labels)
70 changes: 37 additions & 33 deletions model template/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@
"""

# import the necessary packages
from xgboost import XGBClassifier
from sklearn.svm import SVC
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from keras.models import Model
Expand All @@ -22,57 +26,58 @@
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data, labels,
test_size=0.20, stratify=labels, random_state=42,shuffle = True)
test_size=0.20,
stratify=labels,
random_state=42,
shuffle=True)

# initialize data generators
aug_train = ImageDataGenerator(rescale= 1.0/255.,
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest")
aug_train = ImageDataGenerator(rescale=1.0/255.,
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest")

aug_test = ImageDataGenerator(rescale= 1.0/255.)
aug_test = ImageDataGenerator(rescale=1.0/255.)

# initialize batch size and epochs
BS = 32
EPOCHS = 50

# train model
hist = model.fit_generator(steps_per_epoch=len(trainX)//BS,
generator=aug_train.flow(trainX, trainY, batch_size=BS),
validation_data= (testX, testY),
generator=aug_train.flow(
trainX, trainY, batch_size=BS),
validation_data=(testX, testY),
validation_steps=len(testX)//BS,
epochs=EPOCHS)

# plotting training and testing graph
# print accuracy and loss graph
import matplotlib.pyplot as plt
plt.plot(hist.history["acc"])
plt.plot(hist.history['val_acc'])
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title("model accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","loss","Validation Loss"])
plt.legend(["Accuracy", "Validation Accuracy", "loss", "Validation Loss"])
plt.show()

# storing model architecture for future use
model.save('model_name')

# printing confusion matrix
from sklearn.metrics import confusion_matrix
y_pred = model.predict(testX)
y_p = np.argmax(y_pred,axis=1)
y_true = np.argmax(testY,axis=1)
print(confusion_matrix(y_true,y_p))
y_p = np.argmax(y_pred, axis=1)
y_true = np.argmax(testY, axis=1)
print(confusion_matrix(y_true, y_p))

# print classification report
from sklearn.metrics import classification_report
print(classification_report(y_true,y_p))
print(classification_report(y_true, y_p))


# increasing accuracy using cnn as feature extractor and svm as classifier
Expand All @@ -81,20 +86,19 @@
test_new = model_new.predict(testX)

# load and training svm
from sklearn.svm import SVC
svm = SVC(kernel='rbf')
svm.fit(train_new,np.argmax(trainY,axis=1))
svm_train = svm.score(train_new,np.argmax(trainY,axis=1))
print('training accuracy of svm: ',svm_train)
svm_score = svm.score(test_new,np.argmax(testY,axis=1))
print('testing accuracy of svm: ',svm_score)
svm.fit(train_new, np.argmax(trainY, axis=1))
svm_train = svm.score(train_new, np.argmax(trainY, axis=1))
print('training accuracy of svm: ', svm_train)
svm_score = svm.score(test_new, np.argmax(testY, axis=1))
print('testing accuracy of svm: ', svm_score)


# increasing accuracy using cnn as feature extractor and xgboost as boosting technics
from xgboost import XGBClassifier
# increasing accuracy using cnn as feature extractor
# and xgboost as boosting technics
xg = XGBClassifier()
xg.fit(train_new,np.argmax(trainY,axis=1))
xg_train = xg.score(train_new,np.argmax(trainY,axis=1))
print('training accuracy of xgboost: ',xg_train)
xg_score = svm.score(test_new,np.argmax(testY,axis=1))
print('testing accuracy of xgboost: ',xg_score)
xg.fit(train_new, np.argmax(trainY, axis=1))
xg_train = xg.score(train_new, np.argmax(trainY, axis=1))
print('training accuracy of xgboost: ', xg_train)
xg_score = svm.score(test_new, np.argmax(testY, axis=1))
print('testing accuracy of xgboost: ', xg_score)
24 changes: 13 additions & 11 deletions testServer.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,20 @@
from flask import Flask,request,jsonify
import numpy as np
from flask import Flask, request, jsonify
import numpy as np
import pickle

app= Flask(__name__)
app = Flask(__name__)

@app.route('/',methods=['POST','GET'])

@app.route('/', methods=['POST', 'GET'])
def testVals():
serverValues= request.get_data()
##print (serverValues)
##serverValues2= dict(serverValues)
##video = serverValues2['video']
serverValues = request.get_data()
# print (serverValues)
# serverValues2= dict(serverValues)
# video = serverValues2['video']
print(serverValues)
with open('/home/animesh/Desktop/videoPick3','wb') as f:
pickle.dump(serverValues,f)
with open('/home/animesh/Desktop/videoPick3', 'wb') as f:
pickle.dump(serverValues, f)
return 'success'

app.run(host='0.0.0.0',port=8080,debug=True)

app.run(host='0.0.0.0', port=8080, debug=True)