From ccd9adf36b278d500f6c9c096885f9ecb57dc566 Mon Sep 17 00:00:00 2001 From: AdamMiltonBarker Date: Sun, 22 Aug 2021 19:39:39 +0200 Subject: [PATCH] Resolves #4 Fix formatting --- classifier.py | 146 +++--- modules/AbstractClassifier.py | 92 ++-- modules/AbstractData.py | 136 +++-- modules/AbstractModel.py | 270 +++++----- modules/AbstractServer.py | 38 +- modules/augmentation.py | 84 ++-- modules/data.py | 194 ++++---- modules/engine.py | 305 ++++++------ modules/helpers.py | 90 ++-- modules/model.py | 904 +++++++++++++++++----------------- modules/server.py | 62 +-- 11 files changed, 1154 insertions(+), 1167 deletions(-) diff --git a/classifier.py b/classifier.py index 6a4aa1a..03712d4 100644 --- a/classifier.py +++ b/classifier.py @@ -44,76 +44,76 @@ class classifier(AbstractClassifier): - """ ALL Jetson Nano Classifier + """ ALL Jetson Nano Classifier - Represents a AI classifier that processes data - using the ALL Jetson Nano Classifier model. - """ + Represents a AI classifier that processes data + using the ALL Jetson Nano Classifier model. + """ - def train(self): - """ Creates & trains the model. """ + def train(self): + """ Creates & trains the model. """ - self.model.prepare_data() - self.model.prepare_network() - self.model.train() - self.model.evaluate() + self.model.prepare_data() + self.model.prepare_network() + self.model.train() + self.model.evaluate() - def init_model(self): - """ Initializes the model class """ + def init_model(self): + """ Initializes the model class """ - self.model = model(self.helpers) + self.model = model(self.helpers) - def load_model(self): - """ Loads the trained model """ + def load_model(self): + """ Loads the trained model """ - self.model.load() + self.model.load() - def load_model_tfrt(self): - """ Loads the trained TFRT model """ + def load_model_tfrt(self): + """ Loads the trained TFRT model """ - self.model.load_tfrt() + self.model.load_tfrt() - def inference(self): - """ Classifies test data locally """ + def inference(self): + """ Classifies test data locally """ - self.load_model() - self.model.test() + self.load_model() + self.model.test() - def server(self): - """ Starts the API server """ + def server(self): + """ Starts the API server """ - self.load_model() - self.server = server(self.helpers, self.model, - self.model_type) - self.server.start() + self.load_model() + self.server = server(self.helpers, self.model, + self.model_type) + self.server.start() - def inference_http(self): - """ Classifies test data via HTTP requests """ + def inference_http(self): + """ Classifies test data via HTTP requests """ - self.model.test_http() + self.model.test_http() - def inference_tfrt(self): - """ Classifies test data via HTTP requests """ + def inference_tfrt(self): + """ Classifies test data via HTTP requests """ - self.load_model_tfrt() - self.model.test_tfrt() + self.load_model_tfrt() + self.model.test_tfrt() - def init_engine(self): - """ Initizializes the engine class """ + def init_engine(self): + """ Initizializes the engine class """ - from modules.engine import engine + from modules.engine import engine - self.engine = engine(self.helpers) + self.engine = engine(self.helpers) - def inference_tensorrt(self): - """ Classifies test data via HTTP requests """ + def inference_tensorrt(self): + """ Classifies test data via HTTP requests """ - self.engine.load_engine() - self.engine.test() + self.engine.load_engine() + self.engine.test() - def signal_handler(self, signal, frame): - self.helpers.logger.info("Disconnecting") - sys.exit(1) + def signal_handler(self, signal, frame): + self.helpers.logger.info("Disconnecting") + sys.exit(1) classifier = classifier() @@ -121,39 +121,39 @@ def signal_handler(self, signal, frame): def main(): - if len(sys.argv) < 2: - print("You must provide an argument") - exit() - elif sys.argv[1] not in classifier.helpers.confs["agent"]["params"]: - print("Mode not supported! server, train or inference") - exit() + if len(sys.argv) < 2: + print("You must provide an argument") + exit() + elif sys.argv[1] not in classifier.helpers.confs["agent"]["params"]: + print("Mode not supported! server, train or inference") + exit() - mode = sys.argv[1] + mode = sys.argv[1] - if mode == "train": - classifier.init_model() - classifier.train() + if mode == "train": + classifier.init_model() + classifier.train() - elif mode == "classify": - classifier.init_model() - classifier.inference() + elif mode == "classify": + classifier.init_model() + classifier.inference() - elif mode == "server": - classifier.init_model() - classifier.server() + elif mode == "server": + classifier.init_model() + classifier.server() - elif mode == "classify_http": - classifier.init_model() - classifier.inference_http() + elif mode == "classify_http": + classifier.init_model() + classifier.inference_http() - elif mode == "classify_tfrt": - classifier.init_model() - classifier.inference_tfrt() + elif mode == "classify_tfrt": + classifier.init_model() + classifier.inference_tfrt() - elif mode == "classify_tensorrt": - classifier.init_engine() - classifier.inference_tensorrt() + elif mode == "classify_tensorrt": + classifier.init_engine() + classifier.inference_tensorrt() if __name__ == "__main__": - main() + main() diff --git a/modules/AbstractClassifier.py b/modules/AbstractClassifier.py index 095ede0..9366a8f 100644 --- a/modules/AbstractClassifier.py +++ b/modules/AbstractClassifier.py @@ -40,49 +40,49 @@ class AbstractClassifier(ABC): - """ Abstract class representing an AI Classifier. - - Represents an AI Classifier. AI Classifiers process data using AI - models. Based on HIAS AI Agents for future compatibility with - the HIAS Network. - """ - - def __init__(self): - """ Initializes the AbstractClassifier object. """ - super().__init__() - - self.helpers = helpers("Classifier") - self.confs = self.helpers.confs - self.model_type = None - - self.helpers.logger.info("Classifier initialization complete.") - - @abstractmethod - def init_model(self): - """ Loads the model class """ - pass - - @abstractmethod - def train(self): - """ Creates & trains the model. """ - pass - - @abstractmethod - def load_model(self): - """ Loads the AI model """ - pass - - @abstractmethod - def inference(self): - """ Loads model and classifies test data """ - pass - - @abstractmethod - def server(self): - """ Loads the API server """ - pass - - @abstractmethod - def inference_http(self): - """ Classifies test data via HTTP requests """ - pass \ No newline at end of file + """ Abstract class representing an AI Classifier. + + Represents an AI Classifier. AI Classifiers process data using AI + models. Based on HIAS AI Agents for future compatibility with + the HIAS Network. + """ + + def __init__(self): + """ Initializes the AbstractClassifier object. """ + super().__init__() + + self.helpers = helpers("Classifier") + self.confs = self.helpers.confs + self.model_type = None + + self.helpers.logger.info("Classifier initialization complete.") + + @abstractmethod + def init_model(self): + """ Loads the model class """ + pass + + @abstractmethod + def train(self): + """ Creates & trains the model. """ + pass + + @abstractmethod + def load_model(self): + """ Loads the AI model """ + pass + + @abstractmethod + def inference(self): + """ Loads model and classifies test data """ + pass + + @abstractmethod + def server(self): + """ Loads the API server """ + pass + + @abstractmethod + def inference_http(self): + """ Classifies test data via HTTP requests """ + pass \ No newline at end of file diff --git a/modules/AbstractData.py b/modules/AbstractData.py index a4a427e..77204bc 100644 --- a/modules/AbstractData.py +++ b/modules/AbstractData.py @@ -35,7 +35,6 @@ import cv2 import pathlib import random -import os from numpy.random import seed @@ -43,73 +42,68 @@ class AbstractData(ABC): - """ AI Model Data Abstract Class. - - Provides the AI Model with the required required data - processing functionality. - """ - - def __init__(self, helpers): - "Initializes the AbstractData object." - super().__init__() - - self.helpers = helpers - self.confs = self.helpers.confs - - self.seed = self.confs["data"]["seed"] - self.dim = self.confs["data"]["dim"] - - seed(self.seed) - random.seed(self.seed) - - self.data = [] - self.labels = [] - - self.helpers.logger.info("Data class initialization complete.") - - def remove_testing(self): - """ Removes the testing images from the dataset. """ - - for img in self.confs["data"]["test_data"]: - original = "model/data/train/"+img - destination = "model/data/test/"+img - - if not os.path.isfile(original): - self.helpers.logger.error("Original " + destination + " does not exist, please ensure all data is in the model/data/train directory") - exit() - - pathlib.Path(original).rename(destination) - self.helpers.logger.info(original + " moved to " + destination) - cv2.imwrite(destination, cv2.resize(cv2.imread(destination), - (self.dim, self.dim))) - self.helpers.logger.info("Resized " + destination) - - @abstractmethod - def process(self): - """ Processes the images. """ - pass - - @abstractmethod - def encode_labels(self): - """ One Hot Encodes the labels. """ - pass - - @abstractmethod - def convert_data(self): - """ Converts the training data to a numpy array. """ - pass - - @abstractmethod - def shuffle(self): - """ Shuffles the data and labels. """ - pass - - @abstractmethod - def get_split(self): - """ Splits the data and labels creating training and validation datasets. """ - pass - - @abstractmethod - def resize(self, path, dim): - """ Resizes an image to the provided dimensions (dim). """ - pass + """ AI Model Data Abstract Class. + + Provides the AI Model with the required required data + processing functionality. + """ + + def __init__(self, helpers): + "Initializes the AbstractData object." + super().__init__() + + self.helpers = helpers + self.confs = self.helpers.confs + + self.seed = self.confs["data"]["seed"] + self.dim = self.confs["data"]["dim"] + + seed(self.seed) + random.seed(self.seed) + + self.data = [] + self.labels = [] + + self.helpers.logger.info("Data class initialization complete.") + + def remove_testing(self): + """ Removes the testing images from the dataset. """ + + for img in self.confs["data"]["test_data"]: + original = "model/data/train/"+img + destination = "model/data/test/"+img + pathlib.Path(original).rename(destination) + self.helpers.logger.info(original + " moved to " + destination) + cv2.imwrite(destination, cv2.resize(cv2.imread(destination), + (self.dim, self.dim))) + self.helpers.logger.info("Resized " + destination) + + @abstractmethod + def process(self): + """ Processes the images. """ + pass + + @abstractmethod + def encode_labels(self): + """ One Hot Encodes the labels. """ + pass + + @abstractmethod + def convert_data(self): + """ Converts the training data to a numpy array. """ + pass + + @abstractmethod + def shuffle(self): + """ Shuffles the data and labels. """ + pass + + @abstractmethod + def get_split(self): + """ Splits the data and labels creating training and validation datasets. """ + pass + + @abstractmethod + def resize(self, path, dim): + """ Resizes an image to the provided dimensions (dim). """ + pass diff --git a/modules/AbstractModel.py b/modules/AbstractModel.py index 2054c9c..3260875 100644 --- a/modules/AbstractModel.py +++ b/modules/AbstractModel.py @@ -41,138 +41,138 @@ from modules.data import data class AbstractModel(ABC): - """ AI Model abstract class """ - - def __init__(self, helpers): - """ Initializes the AbstractModel object. """ - super().__init__() - - self.helpers = helpers - self.confs = self.helpers.confs - - os.environ["KMP_BLOCKTIME"] = "1" - os.environ["KMP_SETTINGS"] = "1" - os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0" - os.environ["OMP_NUM_THREADS"] = str( - self.confs["agent"]["cores"]) - - self.data = data(self.helpers) - - self.data_labels = self.confs["data"]["labels"] - - self.testing_dir = self.confs["data"]["test"] - self.valid = self.confs["data"]["valid_types"] - self.seed = self.confs["data"]["seed"] - - random.seed(self.seed) - seed(self.seed) - - self.weights_file_path = self.confs["model"]["weights"] - self.json_model_path = self.confs["model"]["model"] - self.saved_model_path = self.confs["model"]["saved"] - self.tfrt_model_path = self.confs["model"]["tfrt"] - self.onnx_model_path = self.confs["model"]["onnx"] - self.tensorrt_model_path = self.confs["model"]["tensorrt"] - - self.helpers.logger.info("Model class initialization complete.") - - @abstractmethod - def prepare_data(self): - """ Prepares the model data """ - pass - - @abstractmethod - def prepare_network(self): - """ Builds the network """ - pass - - @abstractmethod - def train(self): - """ Trains the model """ - pass - - @abstractmethod - def save_model_as_json(self): - """ Saves the model as JSON """ - pass - - @abstractmethod - def save_weights(self): - """ Saves the model weights """ - pass - - @abstractmethod - def evaluate(self): - """ Evaluates the model """ - pass - - @abstractmethod - def plot_accuracy(self): - """ Plots the accuracy. """ - pass - - @abstractmethod - def plot_loss(self): - """ Plots the loss. """ - pass - - @abstractmethod - def plot_auc(self): - """ Plots the AUC curve. """ - pass - - @abstractmethod - def plot_precision(self): - """ Plots the precision. """ - pass - - @abstractmethod - def plot_recall(self): - """ Plots the recall. """ - pass - - @abstractmethod - def confusion_matrix(self): - """ Prints/displays the confusion matrix. """ - pass - - @abstractmethod - def figures_of_merit(self): - """ Calculates/prints the figures of merit. """ - pass - - @abstractmethod - def predictions(self): - """ Makes predictions on the train & test sets. """ - pass - - @abstractmethod - def predict(self, img): - """ Gets a prediction for an image. """ - pass - - @abstractmethod - def reshape(self, img): - """ Reshapes an image. """ - pass - - @abstractmethod - def test(self): - """Local test mode - - Loops through the test directory and classifies the images. - """ - pass - - @abstractmethod - def http_request(self): - """ Sends image to the inference API endpoint. """ - pass - - @abstractmethod - def test_http(self): - """Server test mode - - Loops through the test directory and sends the images to the classification server. - """ - pass \ No newline at end of file + """ AI Model abstract class """ + + def __init__(self, helpers): + """ Initializes the AbstractModel object. """ + super().__init__() + + self.helpers = helpers + self.confs = self.helpers.confs + + os.environ["KMP_BLOCKTIME"] = "1" + os.environ["KMP_SETTINGS"] = "1" + os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0" + os.environ["OMP_NUM_THREADS"] = str( + self.confs["agent"]["cores"]) + + self.data = data(self.helpers) + + self.data_labels = self.confs["data"]["labels"] + + self.testing_dir = self.confs["data"]["test"] + self.valid = self.confs["data"]["valid_types"] + self.seed = self.confs["data"]["seed"] + + random.seed(self.seed) + seed(self.seed) + + self.weights_file_path = self.confs["model"]["weights"] + self.json_model_path = self.confs["model"]["model"] + self.saved_model_path = self.confs["model"]["saved"] + self.tfrt_model_path = self.confs["model"]["tfrt"] + self.onnx_model_path = self.confs["model"]["onnx"] + self.tensorrt_model_path = self.confs["model"]["tensorrt"] + + self.helpers.logger.info("Model class initialization complete.") + + @abstractmethod + def prepare_data(self): + """ Prepares the model data """ + pass + + @abstractmethod + def prepare_network(self): + """ Builds the network """ + pass + + @abstractmethod + def train(self): + """ Trains the model """ + pass + + @abstractmethod + def save_model_as_json(self): + """ Saves the model as JSON """ + pass + + @abstractmethod + def save_weights(self): + """ Saves the model weights """ + pass + + @abstractmethod + def evaluate(self): + """ Evaluates the model """ + pass + + @abstractmethod + def plot_accuracy(self): + """ Plots the accuracy. """ + pass + + @abstractmethod + def plot_loss(self): + """ Plots the loss. """ + pass + + @abstractmethod + def plot_auc(self): + """ Plots the AUC curve. """ + pass + + @abstractmethod + def plot_precision(self): + """ Plots the precision. """ + pass + + @abstractmethod + def plot_recall(self): + """ Plots the recall. """ + pass + + @abstractmethod + def confusion_matrix(self): + """ Prints/displays the confusion matrix. """ + pass + + @abstractmethod + def figures_of_merit(self): + """ Calculates/prints the figures of merit. """ + pass + + @abstractmethod + def predictions(self): + """ Makes predictions on the train & test sets. """ + pass + + @abstractmethod + def predict(self, img): + """ Gets a prediction for an image. """ + pass + + @abstractmethod + def reshape(self, img): + """ Reshapes an image. """ + pass + + @abstractmethod + def test(self): + """Local test mode + + Loops through the test directory and classifies the images. + """ + pass + + @abstractmethod + def http_request(self): + """ Sends image to the inference API endpoint. """ + pass + + @abstractmethod + def test_http(self): + """Server test mode + + Loops through the test directory and sends the images to the classification server. + """ + pass \ No newline at end of file diff --git a/modules/AbstractServer.py b/modules/AbstractServer.py index b0f326e..40c1181 100644 --- a/modules/AbstractServer.py +++ b/modules/AbstractServer.py @@ -34,30 +34,30 @@ from abc import ABC, abstractmethod class AbstractServer(ABC): - """ Server/API abstract class. + """ Server/API abstract class. - Abstract class for the classifier server/API. - """ + Abstract class for the classifier server/API. + """ - def __init__(self, helpers, model, model_type): - "Initializes the AbstractServer object." - super().__init__() + def __init__(self, helpers, model, model_type): + "Initializes the AbstractServer object." + super().__init__() - self.helpers = helpers - self.confs = self.helpers.confs + self.helpers = helpers + self.confs = self.helpers.confs - self.model = model - self.model_type = model_type + self.model = model + self.model_type = model_type - self.helpers.logger.info("Server initialization complete.") + self.helpers.logger.info("Server initialization complete.") - @abstractmethod - def predict(self, req): - """ Classifies an image sent via HTTP. """ - pass + @abstractmethod + def predict(self, req): + """ Classifies an image sent via HTTP. """ + pass - @abstractmethod - def start(self, img_path): - """ Sends image to the inference API endpoint. """ - pass + @abstractmethod + def start(self, img_path): + """ Sends image to the inference API endpoint. """ + pass diff --git a/modules/augmentation.py b/modules/augmentation.py index 8764d28..8b3ffe2 100644 --- a/modules/augmentation.py +++ b/modules/augmentation.py @@ -36,67 +36,67 @@ from skimage import transform as tm class augmentation(): - """ HIAS AI Model Data Augmentation Class + """ HIAS AI Model Data Augmentation Class - Provides data augmentation methods. - """ + Provides data augmentation methods. + """ - def __init__(self, helpers): - """ Initializes the class. """ + def __init__(self, helpers): + """ Initializes the class. """ - self.helpers = helpers + self.helpers = helpers - self.seed = self.helpers.confs["data"]["seed"] - seed(self.seed) + self.seed = self.helpers.confs["data"]["seed"] + seed(self.seed) - self.helpers.logger.info("Augmentation class initialization complete.") + self.helpers.logger.info("Augmentation class initialization complete.") - def grayscale(self, data): - """ Creates a grayscale copy. """ + def grayscale(self, data): + """ Creates a grayscale copy. """ - gray = cv2.cvtColor(data, cv2.COLOR_BGR2GRAY) - return np.dstack([gray, gray, gray]).astype(np.float32)/255. + gray = cv2.cvtColor(data, cv2.COLOR_BGR2GRAY) + return np.dstack([gray, gray, gray]).astype(np.float32)/255. - def equalize_hist(self, data): - """ Creates a histogram equalized copy. """ + def equalize_hist(self, data): + """ Creates a histogram equalized copy. """ - img_to_yuv = cv2.cvtColor(data, cv2.COLOR_BGR2YUV) - img_to_yuv[:, :, 0] = cv2.equalizeHist(img_to_yuv[:, :, 0]) - hist_equalization_result = cv2.cvtColor(img_to_yuv, cv2.COLOR_YUV2BGR) - return hist_equalization_result.astype(np.float32)/255. + img_to_yuv = cv2.cvtColor(data, cv2.COLOR_BGR2YUV) + img_to_yuv[:, :, 0] = cv2.equalizeHist(img_to_yuv[:, :, 0]) + hist_equalization_result = cv2.cvtColor(img_to_yuv, cv2.COLOR_YUV2BGR) + return hist_equalization_result.astype(np.float32)/255. - def reflection(self, data): - """ Creates a reflected copy. """ + def reflection(self, data): + """ Creates a reflected copy. """ - return cv2.flip(data, 0).astype(np.float32)/255., cv2.flip(data, 1).astype(np.float32)/255. + return cv2.flip(data, 0).astype(np.float32)/255., cv2.flip(data, 1).astype(np.float32)/255. - def gaussian(self, data): - """ Creates a gaussian blurred copy. """ + def gaussian(self, data): + """ Creates a gaussian blurred copy. """ - return ndimage.gaussian_filter(data, sigma=5.11).astype(np.float32)/255. + return ndimage.gaussian_filter(data, sigma=5.11).astype(np.float32)/255. - def translate(self, data): - """ Creates transformed copy. """ + def translate(self, data): + """ Creates transformed copy. """ - cols, rows, chs = data.shape + cols, rows, chs = data.shape - return cv2.warpAffine(data, np.float32([[1, 0, 84], [0, 1, 56]]), (rows, cols), - borderMode=cv2.BORDER_CONSTANT, borderValue=(144, 159, 162)).astype(np.float32)/255. + return cv2.warpAffine(data, np.float32([[1, 0, 84], [0, 1, 56]]), (rows, cols), + borderMode=cv2.BORDER_CONSTANT, borderValue=(144, 159, 162)).astype(np.float32)/255. - def rotation(self, data): - """ Creates a rotated copy. """ + def rotation(self, data): + """ Creates a rotated copy. """ - cols, rows, chs = data.shape + cols, rows, chs = data.shape - rand_deg = random.randint(-180, 180) - matrix = cv2.getRotationMatrix2D((cols/2, rows/2), rand_deg, 0.70) - rotated = cv2.warpAffine(data, matrix, (rows, cols), borderMode=cv2.BORDER_CONSTANT, - borderValue=(144, 159, 162)) + rand_deg = random.randint(-180, 180) + matrix = cv2.getRotationMatrix2D((cols/2, rows/2), rand_deg, 0.70) + rotated = cv2.warpAffine(data, matrix, (rows, cols), borderMode=cv2.BORDER_CONSTANT, + borderValue=(144, 159, 162)) - return rotated.astype(np.float32)/255. + return rotated.astype(np.float32)/255. - def shear(self, data): - """ Creates a histogram equalized copy. """ + def shear(self, data): + """ Creates a histogram equalized copy. """ - at = tm.AffineTransform(shear=0.5) - return tm.warp(data, inverse_map=at) + at = tm.AffineTransform(shear=0.5) + return tm.warp(data, inverse_map=at) diff --git a/modules/data.py b/modules/data.py index c6f96d8..ac9b63c 100644 --- a/modules/data.py +++ b/modules/data.py @@ -43,135 +43,135 @@ from modules.augmentation import augmentation class data(AbstractData): - """ AI Model Data Class. + """ AI Model Data Class. - Provides the AI Model with the required required data - processing functionality. - """ + Provides the AI Model with the required required data + processing functionality. + """ - def process(self): - """ Processes the images. """ + def process(self): + """ Processes the images. """ - aug = augmentation(self.helpers) + aug = augmentation(self.helpers) - data_dir = pathlib.Path(self.confs["data"]["train_dir"]) - data = list(data_dir.glob( - '*' + self.confs["data"]["file_type"])) + data_dir = pathlib.Path(self.confs["data"]["train_dir"]) + data = list(data_dir.glob( + '*' + self.confs["data"]["file_type"])) - count = 0 - neg_count = 0 - pos_count = 0 + count = 0 + neg_count = 0 + pos_count = 0 - augmented_data = [] - self.labels = [] - temp = [] + augmented_data = [] + self.labels = [] + temp = [] - for rimage in data: - fpath = str(rimage) - fname = os.path.basename(rimage) - label = 0 if "_0" in fname else 1 + for rimage in data: + fpath = str(rimage) + fname = os.path.basename(rimage) + label = 0 if "_0" in fname else 1 - # Resize Image - image = self.resize(fpath, self.dim) + # Resize Image + image = self.resize(fpath, self.dim) - if image.shape[2] == 1: - image = np.dstack( - [image, image, image]) + if image.shape[2] == 1: + image = np.dstack( + [image, image, image]) - temp.append(image.astype(np.float32)/255.) + temp.append(image.astype(np.float32)/255.) - self.data.append(image.astype(np.float32)/255.) - self.labels.append(label) + self.data.append(image.astype(np.float32)/255.) + self.labels.append(label) - # Grayscale - self.data.append(aug.grayscale(image)) - self.labels.append(label) + # Grayscale + self.data.append(aug.grayscale(image)) + self.labels.append(label) - # Histogram Equalization - self.data.append(aug.equalize_hist(image)) - self.labels.append(label) + # Histogram Equalization + self.data.append(aug.equalize_hist(image)) + self.labels.append(label) - # Reflection - horizontal, vertical = aug.reflection(image) - self.data.append(horizontal) - self.labels.append(label) - self.data.append(vertical) - self.labels.append(label) + # Reflection + horizontal, vertical = aug.reflection(image) + self.data.append(horizontal) + self.labels.append(label) + self.data.append(vertical) + self.labels.append(label) - # Gaussian Blur - self.data.append(aug.gaussian(image)) - self.labels.append(label) + # Gaussian Blur + self.data.append(aug.gaussian(image)) + self.labels.append(label) - # Translation - self.data.append(aug.translate(image)) - self.labels.append(label) + # Translation + self.data.append(aug.translate(image)) + self.labels.append(label) - # Shear - self.data.append(aug.shear(image)) - self.labels.append(label) + # Shear + self.data.append(aug.shear(image)) + self.labels.append(label) - # Rotation - for i in range(0, self.helpers.confs["data"]["rotations"]): - self.data.append(aug.rotation(image)) - self.labels.append(label) - if "_0" in fname: - neg_count += 1 - else: - pos_count += 1 - count += 1 + # Rotation + for i in range(0, self.helpers.confs["data"]["rotations"]): + self.data.append(aug.rotation(image)) + self.labels.append(label) + if "_0" in fname: + neg_count += 1 + else: + pos_count += 1 + count += 1 - if "_0" in fname: - neg_count += 8 - else: - pos_count += 8 - count += 8 + if "_0" in fname: + neg_count += 8 + else: + pos_count += 8 + count += 8 - self.shuffle() - self.convert_data() - self.encode_labels() + self.shuffle() + self.convert_data() + self.encode_labels() - self.helpers.logger.info("Augmented data size: " + str(count)) - self.helpers.logger.info("Negative data size: " + str(neg_count)) - self.helpers.logger.info("Positive data size: " + str(pos_count)) - self.helpers.logger.info("Augmented data shape: " + str(self.data.shape)) - self.helpers.logger.info("Labels shape: " + str(self.labels.shape)) + self.helpers.logger.info("Augmented data size: " + str(count)) + self.helpers.logger.info("Negative data size: " + str(neg_count)) + self.helpers.logger.info("Positive data size: " + str(pos_count)) + self.helpers.logger.info("Augmented data shape: " + str(self.data.shape)) + self.helpers.logger.info("Labels shape: " + str(self.labels.shape)) - self.X_train_arr = np.asarray(temp) + self.X_train_arr = np.asarray(temp) - self.get_split() + self.get_split() - def convert_data(self): - """ Converts the training data to a numpy array. """ + def convert_data(self): + """ Converts the training data to a numpy array. """ - self.data = np.array(self.data) + self.data = np.array(self.data) - def encode_labels(self): - """ One Hot Encodes the labels. """ + def encode_labels(self): + """ One Hot Encodes the labels. """ - encoder = OneHotEncoder(categories='auto') + encoder = OneHotEncoder(categories='auto') - self.labels = np.reshape(self.labels, (-1, 1)) - self.labels = encoder.fit_transform(self.labels).toarray() + self.labels = np.reshape(self.labels, (-1, 1)) + self.labels = encoder.fit_transform(self.labels).toarray() - def shuffle(self): - """ Shuffles the data and labels. """ + def shuffle(self): + """ Shuffles the data and labels. """ - self.data, self.labels = shuffle( - self.data, self.labels, random_state=self.seed) + self.data, self.labels = shuffle( + self.data, self.labels, random_state=self.seed) - def get_split(self): - """ Splits the data and labels creating training and validation datasets. """ + def get_split(self): + """ Splits the data and labels creating training and validation datasets. """ - self.X_train, self.X_test, self.y_train, self.y_test = train_test_split( - self.data, self.labels, test_size=self.helpers.confs["data"]["split"], - random_state=self.seed) + self.X_train, self.X_test, self.y_train, self.y_test = train_test_split( + self.data, self.labels, test_size=self.helpers.confs["data"]["split"], + random_state=self.seed) - self.helpers.logger.info("Training data: " + str(self.X_train.shape)) - self.helpers.logger.info("Training labels: " + str(self.y_train.shape)) - self.helpers.logger.info("Validation data: " + str(self.X_test.shape)) - self.helpers.logger.info("Validation labels: " + str(self.y_test.shape)) + self.helpers.logger.info("Training data: " + str(self.X_train.shape)) + self.helpers.logger.info("Training labels: " + str(self.y_train.shape)) + self.helpers.logger.info("Validation data: " + str(self.X_test.shape)) + self.helpers.logger.info("Validation labels: " + str(self.y_test.shape)) - def resize(self, path, dim): - """ Resizes an image to the provided dimensions (dim). """ + def resize(self, path, dim): + """ Resizes an image to the provided dimensions (dim). """ - return cv2.resize(cv2.imread(path), (dim, dim)) + return cv2.resize(cv2.imread(path), (dim, dim)) diff --git a/modules/engine.py b/modules/engine.py index 3d22c6f..2a2e074 100644 --- a/modules/engine.py +++ b/modules/engine.py @@ -1,17 +1,13 @@ """ TensorRT engine - Provides the TensorRT engine functionality. - Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files(the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and / or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE @@ -19,10 +15,8 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - Contributors: - Adam Milton-Barker - Reference: https://github.com/jkjung-avt/keras_imagenet/ """ @@ -37,163 +31,162 @@ class engine(): - """ TensorRT engine """ - - def __init__(self, helpers): - """ Initializes the TensorRT engine class. """ + """ TensorRT engine """ - self.helpers = helpers - self.confs = helpers.confs - self.onnx_model_path = self.confs["model"]["onnx"] - self.tensorrt_model_path = self.confs["model"]["tensorrt"] - self.testing_dir = self.confs["data"]["test"] - self.valid = self.confs["data"]["valid_types"] - self.labels = self.confs["data"]["labels"] + def __init__(self, helpers): + """ Initializes the TensorRT engine class. """ - if not os.path.isfile(self.tensorrt_model_path): - self.save_engine(self.build_engine()) - self.helpers.logger.info("TensorRT model generated.") + self.helpers = helpers + self.confs = helpers.confs + self.onnx_model_path = self.confs["model"]["onnx"] + self.tensorrt_model_path = self.confs["model"]["tensorrt"] + self.testing_dir = self.confs["data"]["test"] + self.valid = self.confs["data"]["valid_types"] + self.labels = self.confs["data"]["labels"] - self.helpers.logger.info("Engine class initialization complete.") + if not os.path.isfile(self.tensorrt_model_path): + self.save_engine(self.build_engine()) + self.helpers.logger.info("TensorRT model generated.") - def build_engine(self): - """ Builds the TensorRT engine. """ + self.helpers.logger.info("Engine class initialization complete.") - with trt.Builder(TRT_LOGGER) as builder, builder.create_network(*EXPLICIT_BATCH) \ - as network, trt.OnnxParser(network, TRT_LOGGER) as parser: - builder.max_workspace_size = 1 << 30 - builder.max_batch_size = 1 - builder.fp16_mode = False - with open(self.onnx_model_path, 'rb') as model: - if not parser.parse(model.read()): - self.helpers.logger.info("ERROR: Failed to parse the ONNX file.") - for error in range(parser.num_errors): - self.helpers.logger.info(parser.get_error(error)) - return None - shape = list(network.get_input(0).shape) - shape[0] = 1 - network.get_input(0).shape = shape - return builder.build_cuda_engine(network) + def build_engine(self): + """ Builds the TensorRT engine. """ - self.helpers.logger.info("Engine build complete.") + with trt.Builder(TRT_LOGGER) as builder, builder.create_network(*EXPLICIT_BATCH) \ + as network, trt.OnnxParser(network, TRT_LOGGER) as parser: + builder.max_workspace_size = 1 << 30 + builder.max_batch_size = 1 + builder.fp16_mode = False + with open(self.onnx_model_path, 'rb') as model: + if not parser.parse(model.read()): + self.helpers.logger.info("ERROR: Failed to parse the ONNX file.") + for error in range(parser.num_errors): + self.helpers.logger.info(parser.get_error(error)) + return None + shape = list(network.get_input(0).shape) + shape[0] = 1 + network.get_input(0).shape = shape + return builder.build_cuda_engine(network) - def save_engine(self, engine): - """ Saves the TensorRT engine. """ + self.helpers.logger.info("Engine build complete.") - with open(self.tensorrt_model_path, 'wb') as f: - f.write(engine.serialize()) + def save_engine(self, engine): + """ Saves the TensorRT engine. """ - self.helpers.logger.info("Engine save complete.") - - def load_engine(self): - """ Loads the TensorRT engine. """ - - with open(self.tensorrt_model_path, 'rb') as f: - engine_data = f.read() - self.engine = trt.Runtime(TRT_LOGGER).deserialize_cuda_engine(engine_data) - - self.helpers.logger.info("Engine load complete.") - - def init_trt_buffers(self, cuda): - """ Initialize host buffers and cuda buffers for the engine.""" - - size = trt.volume((1, 100, 100, 3)) * self.engine.max_batch_size - host_input = cuda.pagelocked_empty(size, np.float32) - cuda_input = cuda.mem_alloc(host_input.nbytes) - size = trt.volume((1, 2)) * self.engine.max_batch_size - host_output = cuda.pagelocked_empty(size, np.float32) - cuda_output = cuda.mem_alloc(host_output.nbytes) - return host_input, cuda_input, host_output, cuda_output - - self.helpers.logger.info("Engine buffers initialized.") - - def predict(self, img): - """ Inference the image with TensorRT engine.""" - - import pycuda.autoinit - import pycuda.driver as cuda - - with open(self.tensorrt_model_path, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime: - engine = runtime.deserialize_cuda_engine(f.read()) - - host_input, cuda_input, host_output, cuda_output = self.init_trt_buffers( - cuda) - stream = cuda.Stream() - - context = self.engine.create_execution_context() - context.set_binding_shape(0, (1, 100, 100, 3)) - - np.copyto(host_input, img.ravel()) - cuda.memcpy_htod_async(cuda_input, host_input, stream) - - context.execute_async_v2(bindings=[int(cuda_input), int(cuda_output)], - stream_handle=stream.handle) - - cuda.memcpy_dtoh_async(host_output, cuda_output, stream) - stream.synchronize() - - return host_output - - def reshape(self, img): - """ Reshapes an image. """ - - dx, dy, dz = img.shape - input_data = img.reshape((-1, dx, dy, dz)) - input_data = input_data / 255.0 - - return input_data - - def test(self): - """TensorRT test mode - - Loops through the test directory and classifies the images - using the TensorRT model. - """ - - files = 0 - tp = 0 - fp = 0 - tn = 0 - fn = 0 - totaltime = 0 - - for testFile in os.listdir(self.testing_dir): - if os.path.splitext(testFile)[1] in self.valid: - files += 1 - fileName = self.testing_dir + "/" + testFile - - img = cv2.imread(fileName).astype(np.float32) - self.helpers.logger.info("Loaded test image " + fileName) - - img = cv2.resize(img, (100,100)) - img = self.reshape(img) - - start = time.time() - predictions = self.predict(img) - predictions = predictions.argsort()[::-1] - prediction = self.labels[predictions[0]] - end = time.time() - benchmark = end - start - totaltime += benchmark + with open(self.tensorrt_model_path, 'wb') as f: + f.write(engine.serialize()) + + self.helpers.logger.info("Engine save complete.") + + def load_engine(self): + """ Loads the TensorRT engine. """ + + with open(self.tensorrt_model_path, 'rb') as f: + engine_data = f.read() + self.engine = trt.Runtime(TRT_LOGGER).deserialize_cuda_engine(engine_data) + + self.helpers.logger.info("Engine load complete.") + + def init_trt_buffers(self, cuda): + """ Initialize host buffers and cuda buffers for the engine.""" + + size = trt.volume((1, 100, 100, 3)) * self.engine.max_batch_size + host_input = cuda.pagelocked_empty(size, np.float32) + cuda_input = cuda.mem_alloc(host_input.nbytes) + size = trt.volume((1, 2)) * self.engine.max_batch_size + host_output = cuda.pagelocked_empty(size, np.float32) + cuda_output = cuda.mem_alloc(host_output.nbytes) + return host_input, cuda_input, host_output, cuda_output + + self.helpers.logger.info("Engine buffers initialized.") + + def predict(self, img): + """ Inference the image with TensorRT engine.""" + + import pycuda.autoinit + import pycuda.driver as cuda + + with open(self.tensorrt_model_path, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime: + engine = runtime.deserialize_cuda_engine(f.read()) + + host_input, cuda_input, host_output, cuda_output = self.init_trt_buffers( + cuda) + stream = cuda.Stream() + + context = self.engine.create_execution_context() + context.set_binding_shape(0, (1, 100, 100, 3)) + + np.copyto(host_input, img.ravel()) + cuda.memcpy_htod_async(cuda_input, host_input, stream) + + context.execute_async_v2(bindings=[int(cuda_input), int(cuda_output)], + stream_handle=stream.handle) + + cuda.memcpy_dtoh_async(host_output, cuda_output, stream) + stream.synchronize() + + return host_output + + def reshape(self, img): + """ Reshapes an image. """ + + dx, dy, dz = img.shape + input_data = img.reshape((-1, dx, dy, dz)) + input_data = input_data / 255.0 + + return input_data + + def test(self): + """TensorRT test mode + Loops through the test directory and classifies the images + using the TensorRT model. + """ + + files = 0 + tp = 0 + fp = 0 + tn = 0 + fn = 0 + totaltime = 0 + + for testFile in os.listdir(self.testing_dir): + if os.path.splitext(testFile)[1] in self.valid: + files += 1 + fileName = self.testing_dir + "/" + testFile + + img = cv2.imread(fileName).astype(np.float32) + self.helpers.logger.info("Loaded test image " + fileName) + + img = cv2.resize(img, (100,100)) + img = self.reshape(img) + + start = time.time() + predictions = self.predict(img) + predictions = predictions.argsort()[::-1] + prediction = self.labels[predictions[0]] + end = time.time() + benchmark = end - start + totaltime += benchmark - msg = "" - if prediction == 1 and "_1." in testFile: - tp += 1 - msg = "Acute Lymphoblastic Leukemia correctly detected (True Positive) in " + str(benchmark) + " seconds." - elif prediction == 1 and "_0." in testFile: - fp += 1 - msg = "Acute Lymphoblastic Leukemia incorrectly detected (False Positive) in " + str(benchmark) + " seconds." - elif prediction == 0 and "_0." in testFile: - tn += 1 - msg = "Acute Lymphoblastic Leukemia correctly not detected (True Negative) in " + str(benchmark) + " seconds." - elif prediction == 0 and "_1." in testFile: - fn += 1 - msg = "Acute Lymphoblastic Leukemia incorrectly not detected (False Negative) in " + str(benchmark) + " seconds." - self.helpers.logger.info(msg) - - self.helpers.logger.info("Images Classified: " + str(files)) - self.helpers.logger.info("True Positives: " + str(tp)) - self.helpers.logger.info("False Positives: " + str(fp)) - self.helpers.logger.info("True Negatives: " + str(tn)) - self.helpers.logger.info("False Negatives: " + str(fn)) - self.helpers.logger.info("Total Time Taken: " + str(totaltime)) \ No newline at end of file + msg = "" + if prediction == 1 and "_1." in testFile: + tp += 1 + msg = "Acute Lymphoblastic Leukemia correctly detected (True Positive) in " + str(benchmark) + " seconds." + elif prediction == 1 and "_0." in testFile: + fp += 1 + msg = "Acute Lymphoblastic Leukemia incorrectly detected (False Positive) in " + str(benchmark) + " seconds." + elif prediction == 0 and "_0." in testFile: + tn += 1 + msg = "Acute Lymphoblastic Leukemia correctly not detected (True Negative) in " + str(benchmark) + " seconds." + elif prediction == 0 and "_1." in testFile: + fn += 1 + msg = "Acute Lymphoblastic Leukemia incorrectly not detected (False Negative) in " + str(benchmark) + " seconds." + self.helpers.logger.info(msg) + + self.helpers.logger.info("Images Classified: " + str(files)) + self.helpers.logger.info("True Positives: " + str(tp)) + self.helpers.logger.info("False Positives: " + str(fp)) + self.helpers.logger.info("True Negatives: " + str(tn)) + self.helpers.logger.info("False Negatives: " + str(fn)) + self.helpers.logger.info("Total Time Taken: " + str(totaltime)) \ No newline at end of file diff --git a/modules/helpers.py b/modules/helpers.py index b3a2db0..fb6e554 100644 --- a/modules/helpers.py +++ b/modules/helpers.py @@ -37,63 +37,63 @@ class helpers(): - """ Helper Class + """ Helper Class - Configuration and logging functions. - """ + Configuration and logging functions. + """ - def __init__(self, ltype, log=True): - """ Initializes the Helpers Class. """ + def __init__(self, ltype, log=True): + """ Initializes the Helpers Class. """ - # Loads system configs - self.confs = {} - self.load_confs() + # Loads system configs + self.confs = {} + self.load_confs() - # Sets system logging - self.logger = logging.getLogger(ltype) - self.logger.setLevel(logging.INFO) + # Sets system logging + self.logger = logging.getLogger(ltype) + self.logger.setLevel(logging.INFO) - formatter = logging.Formatter( - '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') - allLogHandler = handlers.TimedRotatingFileHandler( - os.path.dirname(os.path.abspath(__file__)) + '/../logs/all.log', when='H', interval=1, backupCount=0) - allLogHandler.setLevel(logging.INFO) - allLogHandler.setFormatter(formatter) + allLogHandler = handlers.TimedRotatingFileHandler( + os.path.dirname(os.path.abspath(__file__)) + '/../logs/all.log', when='H', interval=1, backupCount=0) + allLogHandler.setLevel(logging.INFO) + allLogHandler.setFormatter(formatter) - errorLogHandler = handlers.TimedRotatingFileHandler( - os.path.dirname(os.path.abspath(__file__)) + '/../logs/error.log', when='H', interval=1, backupCount=0) - errorLogHandler.setLevel(logging.ERROR) - errorLogHandler.setFormatter(formatter) + errorLogHandler = handlers.TimedRotatingFileHandler( + os.path.dirname(os.path.abspath(__file__)) + '/../logs/error.log', when='H', interval=1, backupCount=0) + errorLogHandler.setLevel(logging.ERROR) + errorLogHandler.setFormatter(formatter) - warningLogHandler = handlers.TimedRotatingFileHandler( - os.path.dirname(os.path.abspath(__file__)) + '/../logs/warning.log', when='H', interval=1, backupCount=0) - warningLogHandler.setLevel(logging.WARNING) - warningLogHandler.setFormatter(formatter) + warningLogHandler = handlers.TimedRotatingFileHandler( + os.path.dirname(os.path.abspath(__file__)) + '/../logs/warning.log', when='H', interval=1, backupCount=0) + warningLogHandler.setLevel(logging.WARNING) + warningLogHandler.setFormatter(formatter) - consoleHandler = logging.StreamHandler(sys.stdout) - consoleHandler.setFormatter(formatter) + consoleHandler = logging.StreamHandler(sys.stdout) + consoleHandler.setFormatter(formatter) - self.logger.addHandler(allLogHandler) - self.logger.addHandler(errorLogHandler) - self.logger.addHandler(warningLogHandler) - self.logger.addHandler(consoleHandler) + self.logger.addHandler(allLogHandler) + self.logger.addHandler(errorLogHandler) + self.logger.addHandler(warningLogHandler) + self.logger.addHandler(consoleHandler) - if log is True: - self.logger.info("Helpers class initialization complete.") + if log is True: + self.logger.info("Helpers class initialization complete.") - def load_confs(self): - """ Load the configuration. """ + def load_confs(self): + """ Load the configuration. """ - with open(os.path.dirname(os.path.abspath(__file__)) + '/../configuration/config.json') as confs: - self.confs = json.loads(confs.read()) + with open(os.path.dirname(os.path.abspath(__file__)) + '/../configuration/config.json') as confs: + self.confs = json.loads(confs.read()) - def get_ip_addr(self): - """ Load the configuration. """ + def get_ip_addr(self): + """ Load the configuration. """ - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.connect(("8.8.8.8", 80)) - ipaddr = s.getsockname()[0] - s.close() - - return ipaddr \ No newline at end of file + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.connect(("8.8.8.8", 80)) + ipaddr = s.getsockname()[0] + s.close() + + return ipaddr \ No newline at end of file diff --git a/modules/model.py b/modules/model.py index fd81816..c0abea2 100644 --- a/modules/model.py +++ b/modules/model.py @@ -54,462 +54,462 @@ class model(AbstractModel): - """ AI Model class """ + """ AI Model class """ - def prepare_data(self): - """ Creates/sorts dataset. """ + def prepare_data(self): + """ Creates/sorts dataset. """ - self.data.remove_testing() - self.data.process() + self.data.remove_testing() + self.data.process() - self.helpers.logger.info("Data preperation complete.") + self.helpers.logger.info("Data preperation complete.") - def prepare_network(self): - """ Builds the network. """ + def prepare_network(self): + """ Builds the network. """ - self.tf_model = tf.keras.models.Sequential([ - tf.keras.layers.InputLayer(input_shape=(self.data.X_train.shape[1:])), - tf.keras.layers.AveragePooling2D( - pool_size=(2, 2), strides=None, padding='valid'), - tf.keras.layers.Conv2D(30, (5, 5), strides=1, - padding="valid", activation='relu'), - tf.keras.layers.DepthwiseConv2D(30, (1, 1), - padding="valid", activation='relu'), - tf.keras.layers.Flatten(), - tf.keras.layers.Dense(2), - tf.keras.layers.Activation('softmax', name='softmax') - ], - "AllJetsonNano") - self.tf_model.summary() + self.tf_model = tf.keras.models.Sequential([ + tf.keras.layers.InputLayer(input_shape=(self.data.X_train.shape[1:])), + tf.keras.layers.AveragePooling2D( + pool_size=(2, 2), strides=None, padding='valid'), + tf.keras.layers.Conv2D(30, (5, 5), strides=1, + padding="valid", activation='relu'), + tf.keras.layers.DepthwiseConv2D(30, (1, 1), + padding="valid", activation='relu'), + tf.keras.layers.Flatten(), + tf.keras.layers.Dense(2), + tf.keras.layers.Activation('softmax', name='softmax') + ], + "AllJetsonNano") + self.tf_model.summary() - self.helpers.logger.info("Network initialization complete.") + self.helpers.logger.info("Network initialization complete.") - def train(self): - """ Trains the model - - Compiles and fits the model. - """ - - self.helpers.logger.info("Using Adam Optimizer.") - optimizer = tf.keras.optimizers.Adam(learning_rate=self.confs["train"]["learning_rate_adam"], - decay = self.confs["train"]["decay_adam"]) - - self.helpers.logger.info("Using Early Stopping.") - callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', - patience=3, - verbose=0, - mode='auto', - restore_best_weights=True) - - self.tf_model.compile(optimizer=optimizer, - loss='binary_crossentropy', - metrics=[tf.keras.metrics.BinaryAccuracy(name='acc'), - tf.keras.metrics.Precision(name='precision'), - tf.keras.metrics.Recall(name='recall'), - tf.keras.metrics.AUC(name='auc') ]) - - self.history = self.tf_model.fit(self.data.X_train, self.data.y_train, - validation_data=(self.data.X_test, self.data.y_test), - validation_steps=self.confs["train"]["val_steps"], - epochs=self.confs["train"]["epochs"], callbacks=[callback]) - - print(self.history) - print("") - - self.save_model_as_json() - self.save_weights() - self.convert_to_tfrt() - self.convert_to_onnx() - - def save_model_as_json(self): - """ Saves the model as JSON """ - - with open(self.json_model_path, "w") as file: - file.write(self.tf_model.to_json()) - - self.tf_model.save(self.saved_model_path) - - self.helpers.logger.info("Model JSON saved " + self.json_model_path) - - def save_weights(self): - """ Saves the model weights """ - - self.tf_model.save_weights(self.weights_file_path) - self.helpers.logger.info("Weights saved " + self.weights_file_path) - - def convert_to_tfrt(self): - """ Converts the model to TFRT format """ - - converter = trt.TrtGraphConverterV2( - input_saved_model_dir=self.saved_model_path) - converter.convert() - converter.save(self.tfrt_model_path) - - def convert_to_onnx(self): - """ Converts the model to ONNX format """ - - os.system('python3 -m tf2onnx.convert --saved-model ' \ - + self.saved_model_path \ - + ' --output ' + self.onnx_model_path \ - + ' --tag serve --signature_def serving_default') - - def predictions(self): - """ Gets a prediction for an image. """ - - self.train_preds = self.tf_model.predict(self.data.X_train) - self.test_preds = self.tf_model.predict(self.data.X_test) - - def evaluate(self): - """ Evaluates the model """ - - self.predictions() - - metrics = self.tf_model.evaluate( - self.data.X_test, self.data.y_test, verbose=0) - for name, value in zip(self.tf_model.metrics_names, metrics): - self.helpers.logger.info("Metrics: " + name + " " + str(value)) - print() - - self.plot_accuracy() - self.plot_loss() - self.plot_auc() - self.plot_precision() - self.plot_recall() - self.confusion_matrix() - self.figures_of_merit() - - def plot_accuracy(self): - """ Plots the accuracy. """ - - plt.plot(self.history.history['acc']) - plt.plot(self.history.history['val_acc']) - plt.title('Model Accuracy') - plt.ylabel('Accuracy') - plt.xlabel('Epoch') - plt.ylim((0, 1)) - plt.legend(['Train', 'Validate'], loc='upper left') - plt.savefig('model/plots/accuracy.png') - plt.show() - plt.clf() - - def plot_loss(self): - """ Plots the loss. """ - - plt.plot(self.history.history['loss']) - plt.plot(self.history.history['val_loss']) - plt.title('Model Loss') - plt.ylabel('loss') - plt.xlabel('Epoch') - plt.legend(['Train', 'Validate'], loc='upper left') - plt.savefig('model/plots/loss.png') - plt.show() - plt.clf() - - def plot_auc(self): - """ Plots the AUC. """ - - plt.plot(self.history.history['auc']) - plt.plot(self.history.history['val_auc']) - plt.title('Model AUC') - plt.ylabel('AUC') - plt.xlabel('Epoch') - plt.legend(['Train', 'Validate'], loc='upper left') - plt.savefig('model/plots/auc.png') - plt.show() - plt.clf() - - def plot_precision(self): - """ Plots the precision. """ - - plt.plot(self.history.history['precision']) - plt.plot(self.history.history['val_precision']) - plt.title('Model Precision') - plt.ylabel('Precision') - plt.xlabel('Epoch') - plt.legend(['Train', 'Validate'], loc='upper left') - plt.savefig('model/plots/precision.png') - plt.show() - plt.clf() - - def plot_recall(self): - """ Plots the recall. """ - - plt.plot(self.history.history['recall']) - plt.plot(self.history.history['val_recall']) - plt.title('Model Recall') - plt.ylabel('Recall') - plt.xlabel('Epoch') - plt.legend(['Train', 'Validate'], loc='upper left') - plt.savefig('model/plots/recall.png') - plt.show() - plt.clf() - - def confusion_matrix(self): - """ Plots the confusion matrix. """ - - self.matrix = confusion_matrix(self.data.y_test.argmax(axis=1), - self.test_preds.argmax(axis=1)) - - self.helpers.logger.info("Confusion Matrix: " + str(self.matrix)) - print("") - - plot_confusion_matrix(conf_mat=self.matrix) - plt.savefig('model/plots/confusion-matrix.png') - plt.show() - plt.clf() - - def figures_of_merit(self): - """ Calculates/prints the figures of merit. - - https://homes.di.unimi.it/scotti/all/ - """ - - test_len = len(self.data.X_test) - - TP = self.matrix[1][1] - TN = self.matrix[0][0] - FP = self.matrix[0][1] - FN = self.matrix[1][0] - - TPP = (TP * 100)/test_len - FPP = (FP * 100)/test_len - FNP = (FN * 100)/test_len - TNP = (TN * 100)/test_len - - specificity = TN/(TN+FP) - - misc = FP + FN - miscp = (misc * 100)/test_len - - self.helpers.logger.info( - "True Positives: " + str(TP) + "(" + str(TPP) + "%)") - self.helpers.logger.info( - "False Positives: " + str(FP) + "(" + str(FPP) + "%)") - self.helpers.logger.info( - "True Negatives: " + str(TN) + "(" + str(TNP) + "%)") - self.helpers.logger.info( - "False Negatives: " + str(FN) + "(" + str(FNP) + "%)") - - self.helpers.logger.info("Specificity: " + str(specificity)) - self.helpers.logger.info("Misclassification: " + - str(misc) + "(" + str(miscp) + "%)") - - def load(self): - """ Loads the model """ - - with open(self.json_model_path) as file: - m_json = file.read() - - self.tf_model = tf.keras.models.model_from_json(m_json) - self.tf_model.load_weights(self.weights_file_path) - - self.helpers.logger.info("Model loaded ") - - self.tf_model.summary() - - def predict(self, img): - """ Gets a prediction for an image. """ - - predictions = self.tf_model.predict(img) - prediction = np.argmax(predictions, axis=-1) - - return prediction - - def reshape(self, img): - """ Reshapes an image. """ - - dx, dy, dz = img.shape - input_data = img.reshape((-1, dx, dy, dz)) - input_data = input_data / 255.0 - - return input_data - - def test(self): - """ Test mode - - Loops through the test directory and classifies the images. - """ - - files = 0 - tp = 0 - fp = 0 - tn = 0 - fn = 0 - totaltime = 0 - - for testFile in os.listdir(self.testing_dir): - if os.path.splitext(testFile)[1] in self.valid: - files += 1 - fileName = self.testing_dir + "/" + testFile - - img = cv2.imread(fileName).astype(np.float32) - self.helpers.logger.info("Loaded test image " + fileName) - - img = cv2.resize(img, (self.data.dim, self.data.dim)) - img = self.reshape(img) - - start = time.time() - prediction = self.predict(img) - end = time.time() - benchmark = end - start - totaltime += benchmark - - msg = "" - if prediction == 1 and "_1." in testFile: - tp += 1 - msg = "Acute Lymphoblastic Leukemia correctly detected (True Positive) in " + str(benchmark) + " seconds." - elif prediction == 1 and "_0." in testFile: - fp += 1 - msg = "Acute Lymphoblastic Leukemia incorrectly detected (False Positive) in " + str(benchmark) + " seconds." - elif prediction == 0 and "_0." in testFile: - tn += 1 - msg = "Acute Lymphoblastic Leukemia correctly not detected (True Negative) in " + str(benchmark) + " seconds." - elif prediction == 0 and "_1." in testFile: - fn += 1 - msg = "Acute Lymphoblastic Leukemia incorrectly not detected (False Negative) in " + str(benchmark) + " seconds." - self.helpers.logger.info(msg) - - self.helpers.logger.info("Images Classified: " + str(files)) - self.helpers.logger.info("True Positives: " + str(tp)) - self.helpers.logger.info("False Positives: " + str(fp)) - self.helpers.logger.info("True Negatives: " + str(tn)) - self.helpers.logger.info("False Negatives: " + str(fn)) - self.helpers.logger.info("Total Time Taken: " + str(totaltime)) - - def http_request(self, img_path): - """ Sends image to the inference API endpoint. """ - - self.helpers.logger.info("Sending request for: " + img_path) - - _, img_encoded = cv2.imencode('.jpg', cv2.imread(img_path)) - response = requests.post(self.addr, data=img_encoded.tostring(), headers=self.headers) - response = json.loads(response.text) - - return response - - def test_http(self): - """Server test mode - - Loops through the test directory and sends the images to the - classification server. - """ - - totaltime = 0 - files = 0 - - tp = 0 - fp = 0 - tn = 0 - fn = 0 - - self.addr = "http://" + self.helpers.get_ip_addr() + \ - ':'+str(self.helpers.confs["agent"]["port"]) + '/Inference' - self.headers = {'content-type': 'image/jpeg'} - - for testFile in os.listdir(self.testing_dir): - if os.path.splitext(testFile)[1] in self.valid: - - start = time.time() - prediction = self.http_request(self.testing_dir + "/" + testFile) - end = time.time() - benchmark = end - start - totaltime += benchmark - - msg = "" - status = "" - outcome = "" - - if prediction["Diagnosis"] == "Positive" and "_1." in testFile: - tp += 1 - status = "correctly" - outcome = "(True Positive)" - elif prediction["Diagnosis"] == "Positive" and "_0." in testFile: - fp += 1 - status = "incorrectly" - outcome = "(False Positive)" - elif prediction["Diagnosis"] == "Negative" and "_0." in testFile: - tn += 1 - status = "correctly" - outcome = "(True Negative)" - elif prediction["Diagnosis"] == "Negative" and "_1." in testFile: - fn += 1 - status = "incorrectly" - outcome = "(False Negative)" - - files += 1 - self.helpers.logger.info("Acute Lymphoblastic Leukemia " + status + - " detected " + outcome + " in " + str(benchmark) + " seconds.") - - self.helpers.logger.info("Images Classified: " + str(files)) - self.helpers.logger.info("True Positives: " + str(tp)) - self.helpers.logger.info("False Positives: " + str(fp)) - self.helpers.logger.info("True Negatives: " + str(tn)) - self.helpers.logger.info("False Negatives: " + str(fn)) - self.helpers.logger.info("Total Time Taken: " + str(totaltime)) - - def load_tfrt(self): - """ Loads the tfrt model """ - - self.tfrt_model = tf.saved_model.load(self.tfrt_model_path) - - def predict_tfrt(self, img): - """ Gets a prediction for an image. """ - - inference = self.tfrt_model.signatures["serving_default"] - prediction = inference(tf.constant(img, dtype=float))['softmax'] - prediction = self.data_labels[int(tf.argmax(prediction, axis=1))] - - return prediction - - def test_tfrt(self): - """TFRT test mode - - Loops through the test directory and classifies the images - usin the TFRT model. - """ - - files = 0 - tp = 0 - fp = 0 - tn = 0 - fn = 0 - totaltime = 0 - - for testFile in os.listdir(self.testing_dir): - if os.path.splitext(testFile)[1] in self.valid: - files += 1 - fileName = self.testing_dir + "/" + testFile - - img = cv2.imread(fileName).astype(np.float32) - self.helpers.logger.info("Loaded test image " + fileName) - - img = cv2.resize(img, (self.data.dim, self.data.dim)) - img = self.reshape(img) - - start = time.time() - prediction = self.predict_tfrt(img) - end = time.time() - benchmark = end - start - totaltime += benchmark - - msg = "" - if prediction == 1 and "_1." in testFile: - tp += 1 - msg = "Acute Lymphoblastic Leukemia correctly detected (True Positive) in " + str(benchmark) + " seconds." - elif prediction == 1 and "_0." in testFile: - fp += 1 - msg = "Acute Lymphoblastic Leukemia incorrectly detected (False Positive) in " + str(benchmark) + " seconds." - elif prediction == 0 and "_0." in testFile: - tn += 1 - msg = "Acute Lymphoblastic Leukemia correctly not detected (True Negative) in " + str(benchmark) + " seconds." - elif prediction == 0 and "_1." in testFile: - fn += 1 - msg = "Acute Lymphoblastic Leukemia incorrectly not detected (False Negative) in " + str(benchmark) + " seconds." - self.helpers.logger.info(msg) - - self.helpers.logger.info("Images Classified: " + str(files)) - self.helpers.logger.info("True Positives: " + str(tp)) - self.helpers.logger.info("False Positives: " + str(fp)) - self.helpers.logger.info("True Negatives: " + str(tn)) - self.helpers.logger.info("False Negatives: " + str(fn)) - self.helpers.logger.info("Total Time Taken: " + str(totaltime)) \ No newline at end of file + def train(self): + """ Trains the model + + Compiles and fits the model. + """ + + self.helpers.logger.info("Using Adam Optimizer.") + optimizer = tf.keras.optimizers.Adam(learning_rate=self.confs["train"]["learning_rate_adam"], + decay = self.confs["train"]["decay_adam"]) + + self.helpers.logger.info("Using Early Stopping.") + callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', + patience=3, + verbose=0, + mode='auto', + restore_best_weights=True) + + self.tf_model.compile(optimizer=optimizer, + loss='binary_crossentropy', + metrics=[tf.keras.metrics.BinaryAccuracy(name='acc'), + tf.keras.metrics.Precision(name='precision'), + tf.keras.metrics.Recall(name='recall'), + tf.keras.metrics.AUC(name='auc') ]) + + self.history = self.tf_model.fit(self.data.X_train, self.data.y_train, + validation_data=(self.data.X_test, self.data.y_test), + validation_steps=self.confs["train"]["val_steps"], + epochs=self.confs["train"]["epochs"], callbacks=[callback]) + + print(self.history) + print("") + + self.tf_model.save(self.saved_model_path) + + self.save_model_as_json() + self.save_weights() + self.convert_to_tfrt() + self.convert_to_onnx() + + def save_model_as_json(self): + """ Saves the model as JSON """ + + with open(self.json_model_path, "w") as file: + file.write(self.tf_model.to_json()) + + self.helpers.logger.info("Model JSON saved " + self.json_model_path) + + def save_weights(self): + """ Saves the model weights """ + + self.tf_model.save_weights(self.weights_file_path) + self.helpers.logger.info("Weights saved " + self.weights_file_path) + + def convert_to_tfrt(self): + """ Converts the model to TFRT format """ + + converter = trt.TrtGraphConverterV2( + input_saved_model_dir=self.saved_model_path) + converter.convert() + converter.save(self.tfrt_model_path) + + def convert_to_onnx(self): + """ Converts the model to ONNX format """ + + os.system('python3 -m tf2onnx.convert --saved-model ' \ + + self.saved_model_path \ + + ' --output ' + self.onnx_model_path \ + + ' --tag serve --signature_def serving_default') + + def predictions(self): + """ Gets a prediction for an image. """ + + self.train_preds = self.tf_model.predict(self.data.X_train) + self.test_preds = self.tf_model.predict(self.data.X_test) + + def evaluate(self): + """ Evaluates the model """ + + self.predictions() + + metrics = self.tf_model.evaluate( + self.data.X_test, self.data.y_test, verbose=0) + for name, value in zip(self.tf_model.metrics_names, metrics): + self.helpers.logger.info("Metrics: " + name + " " + str(value)) + print() + + self.plot_accuracy() + self.plot_loss() + self.plot_auc() + self.plot_precision() + self.plot_recall() + self.confusion_matrix() + self.figures_of_merit() + + def plot_accuracy(self): + """ Plots the accuracy. """ + + plt.plot(self.history.history['acc']) + plt.plot(self.history.history['val_acc']) + plt.title('Model Accuracy') + plt.ylabel('Accuracy') + plt.xlabel('Epoch') + plt.ylim((0, 1)) + plt.legend(['Train', 'Validate'], loc='upper left') + plt.savefig('model/plots/accuracy.png') + plt.show() + plt.clf() + + def plot_loss(self): + """ Plots the loss. """ + + plt.plot(self.history.history['loss']) + plt.plot(self.history.history['val_loss']) + plt.title('Model Loss') + plt.ylabel('loss') + plt.xlabel('Epoch') + plt.legend(['Train', 'Validate'], loc='upper left') + plt.savefig('model/plots/loss.png') + plt.show() + plt.clf() + + def plot_auc(self): + """ Plots the AUC. """ + + plt.plot(self.history.history['auc']) + plt.plot(self.history.history['val_auc']) + plt.title('Model AUC') + plt.ylabel('AUC') + plt.xlabel('Epoch') + plt.legend(['Train', 'Validate'], loc='upper left') + plt.savefig('model/plots/auc.png') + plt.show() + plt.clf() + + def plot_precision(self): + """ Plots the precision. """ + + plt.plot(self.history.history['precision']) + plt.plot(self.history.history['val_precision']) + plt.title('Model Precision') + plt.ylabel('Precision') + plt.xlabel('Epoch') + plt.legend(['Train', 'Validate'], loc='upper left') + plt.savefig('model/plots/precision.png') + plt.show() + plt.clf() + + def plot_recall(self): + """ Plots the recall. """ + + plt.plot(self.history.history['recall']) + plt.plot(self.history.history['val_recall']) + plt.title('Model Recall') + plt.ylabel('Recall') + plt.xlabel('Epoch') + plt.legend(['Train', 'Validate'], loc='upper left') + plt.savefig('model/plots/recall.png') + plt.show() + plt.clf() + + def confusion_matrix(self): + """ Plots the confusion matrix. """ + + self.matrix = confusion_matrix(self.data.y_test.argmax(axis=1), + self.test_preds.argmax(axis=1)) + + self.helpers.logger.info("Confusion Matrix: " + str(self.matrix)) + print("") + + plot_confusion_matrix(conf_mat=self.matrix) + plt.savefig('model/plots/confusion-matrix.png') + plt.show() + plt.clf() + + def figures_of_merit(self): + """ Calculates/prints the figures of merit. + + https://homes.di.unimi.it/scotti/all/ + """ + + test_len = len(self.data.X_test) + + TP = self.matrix[1][1] + TN = self.matrix[0][0] + FP = self.matrix[0][1] + FN = self.matrix[1][0] + + TPP = (TP * 100)/test_len + FPP = (FP * 100)/test_len + FNP = (FN * 100)/test_len + TNP = (TN * 100)/test_len + + specificity = TN/(TN+FP) + + misc = FP + FN + miscp = (misc * 100)/test_len + + self.helpers.logger.info( + "True Positives: " + str(TP) + "(" + str(TPP) + "%)") + self.helpers.logger.info( + "False Positives: " + str(FP) + "(" + str(FPP) + "%)") + self.helpers.logger.info( + "True Negatives: " + str(TN) + "(" + str(TNP) + "%)") + self.helpers.logger.info( + "False Negatives: " + str(FN) + "(" + str(FNP) + "%)") + + self.helpers.logger.info("Specificity: " + str(specificity)) + self.helpers.logger.info("Misclassification: " + + str(misc) + "(" + str(miscp) + "%)") + + def load(self): + """ Loads the model """ + + with open(self.json_model_path) as file: + m_json = file.read() + + self.tf_model = tf.keras.models.model_from_json(m_json) + self.tf_model.load_weights(self.weights_file_path) + + self.helpers.logger.info("Model loaded ") + + self.tf_model.summary() + + def predict(self, img): + """ Gets a prediction for an image. """ + + predictions = self.tf_model.predict(img) + prediction = np.argmax(predictions, axis=-1) + + return prediction + + def reshape(self, img): + """ Reshapes an image. """ + + dx, dy, dz = img.shape + input_data = img.reshape((-1, dx, dy, dz)) + input_data = input_data / 255.0 + + return input_data + + def test(self): + """ Test mode + + Loops through the test directory and classifies the images. + """ + + files = 0 + tp = 0 + fp = 0 + tn = 0 + fn = 0 + totaltime = 0 + + for testFile in os.listdir(self.testing_dir): + if os.path.splitext(testFile)[1] in self.valid: + files += 1 + fileName = self.testing_dir + "/" + testFile + + img = cv2.imread(fileName).astype(np.float32) + self.helpers.logger.info("Loaded test image " + fileName) + + img = cv2.resize(img, (self.data.dim, self.data.dim)) + img = self.reshape(img) + + start = time.time() + prediction = self.predict(img) + end = time.time() + benchmark = end - start + totaltime += benchmark + + msg = "" + if prediction == 1 and "_1." in testFile: + tp += 1 + msg = "Acute Lymphoblastic Leukemia correctly detected (True Positive) in " + str(benchmark) + " seconds." + elif prediction == 1 and "_0." in testFile: + fp += 1 + msg = "Acute Lymphoblastic Leukemia incorrectly detected (False Positive) in " + str(benchmark) + " seconds." + elif prediction == 0 and "_0." in testFile: + tn += 1 + msg = "Acute Lymphoblastic Leukemia correctly not detected (True Negative) in " + str(benchmark) + " seconds." + elif prediction == 0 and "_1." in testFile: + fn += 1 + msg = "Acute Lymphoblastic Leukemia incorrectly not detected (False Negative) in " + str(benchmark) + " seconds." + self.helpers.logger.info(msg) + + self.helpers.logger.info("Images Classified: " + str(files)) + self.helpers.logger.info("True Positives: " + str(tp)) + self.helpers.logger.info("False Positives: " + str(fp)) + self.helpers.logger.info("True Negatives: " + str(tn)) + self.helpers.logger.info("False Negatives: " + str(fn)) + self.helpers.logger.info("Total Time Taken: " + str(totaltime)) + + def http_request(self, img_path): + """ Sends image to the inference API endpoint. """ + + self.helpers.logger.info("Sending request for: " + img_path) + + _, img_encoded = cv2.imencode('.jpg', cv2.imread(img_path)) + response = requests.post(self.addr, data=img_encoded.tostring(), headers=self.headers) + response = json.loads(response.text) + + return response + + def test_http(self): + """Server test mode + + Loops through the test directory and sends the images to the + classification server. + """ + + totaltime = 0 + files = 0 + + tp = 0 + fp = 0 + tn = 0 + fn = 0 + + self.addr = "http://" + self.helpers.get_ip_addr() + \ + ':'+str(self.helpers.confs["agent"]["port"]) + '/Inference' + self.headers = {'content-type': 'image/jpeg'} + + for testFile in os.listdir(self.testing_dir): + if os.path.splitext(testFile)[1] in self.valid: + + start = time.time() + prediction = self.http_request(self.testing_dir + "/" + testFile) + end = time.time() + benchmark = end - start + totaltime += benchmark + + msg = "" + status = "" + outcome = "" + + if prediction["Diagnosis"] == "Positive" and "_1." in testFile: + tp += 1 + status = "correctly" + outcome = "(True Positive)" + elif prediction["Diagnosis"] == "Positive" and "_0." in testFile: + fp += 1 + status = "incorrectly" + outcome = "(False Positive)" + elif prediction["Diagnosis"] == "Negative" and "_0." in testFile: + tn += 1 + status = "correctly" + outcome = "(True Negative)" + elif prediction["Diagnosis"] == "Negative" and "_1." in testFile: + fn += 1 + status = "incorrectly" + outcome = "(False Negative)" + + files += 1 + self.helpers.logger.info("Acute Lymphoblastic Leukemia " + status + + " detected " + outcome + " in " + str(benchmark) + " seconds.") + + self.helpers.logger.info("Images Classified: " + str(files)) + self.helpers.logger.info("True Positives: " + str(tp)) + self.helpers.logger.info("False Positives: " + str(fp)) + self.helpers.logger.info("True Negatives: " + str(tn)) + self.helpers.logger.info("False Negatives: " + str(fn)) + self.helpers.logger.info("Total Time Taken: " + str(totaltime)) + + def load_tfrt(self): + """ Loads the tfrt model """ + + self.tfrt_model = tf.saved_model.load(self.tfrt_model_path) + + def predict_tfrt(self, img): + """ Gets a prediction for an image. """ + + inference = self.tfrt_model.signatures["serving_default"] + prediction = inference(tf.constant(img, dtype=float))['softmax'] + prediction = self.data_labels[int(tf.argmax(prediction, axis=1))] + + return prediction + + def test_tfrt(self): + """TFRT test mode + + Loops through the test directory and classifies the images + usin the TFRT model. + """ + + files = 0 + tp = 0 + fp = 0 + tn = 0 + fn = 0 + totaltime = 0 + + for testFile in os.listdir(self.testing_dir): + if os.path.splitext(testFile)[1] in self.valid: + files += 1 + fileName = self.testing_dir + "/" + testFile + + img = cv2.imread(fileName).astype(np.float32) + self.helpers.logger.info("Loaded test image " + fileName) + + img = cv2.resize(img, (self.data.dim, self.data.dim)) + img = self.reshape(img) + + start = time.time() + prediction = self.predict_tfrt(img) + end = time.time() + benchmark = end - start + totaltime += benchmark + + msg = "" + if prediction == 1 and "_1." in testFile: + tp += 1 + msg = "Acute Lymphoblastic Leukemia correctly detected (True Positive) in " + str(benchmark) + " seconds." + elif prediction == 1 and "_0." in testFile: + fp += 1 + msg = "Acute Lymphoblastic Leukemia incorrectly detected (False Positive) in " + str(benchmark) + " seconds." + elif prediction == 0 and "_0." in testFile: + tn += 1 + msg = "Acute Lymphoblastic Leukemia correctly not detected (True Negative) in " + str(benchmark) + " seconds." + elif prediction == 0 and "_1." in testFile: + fn += 1 + msg = "Acute Lymphoblastic Leukemia incorrectly not detected (False Negative) in " + str(benchmark) + " seconds." + self.helpers.logger.info(msg) + + self.helpers.logger.info("Images Classified: " + str(files)) + self.helpers.logger.info("True Positives: " + str(tp)) + self.helpers.logger.info("False Positives: " + str(fp)) + self.helpers.logger.info("True Negatives: " + str(tn)) + self.helpers.logger.info("False Negatives: " + str(fn)) + self.helpers.logger.info("Total Time Taken: " + str(totaltime)) \ No newline at end of file diff --git a/modules/server.py b/modules/server.py index b38cb17..c2c0f5a 100644 --- a/modules/server.py +++ b/modules/server.py @@ -47,47 +47,47 @@ from modules.AbstractServer import AbstractServer class server(AbstractServer): - """ Server/API class. + """ Server/API class. - Class for the classifier server/API. - """ + Class for the classifier server/API. + """ - def predict(self, req): - """ Classifies an image sent via HTTP. """ + def predict(self, req): + """ Classifies an image sent via HTTP. """ - img = np.fromstring(req.data, np.uint8) - img = cv2.imdecode(img, cv2.IMREAD_COLOR) + img = np.fromstring(req.data, np.uint8) + img = cv2.imdecode(img, cv2.IMREAD_COLOR) - img = cv2.resize(img, (self.model.data.dim, - self.model.data.dim)) - img = self.model.reshape(img) + img = cv2.resize(img, (self.model.data.dim, + self.model.data.dim)) + img = self.model.reshape(img) - return self.model.predict(img) + return self.model.predict(img) - def start(self): - """ Starts the server. """ + def start(self): + """ Starts the server. """ - app = Flask("AllJetsonNano") + app = Flask("AllJetsonNano") - @app.route('/Inference', methods=['POST']) - def Inference(): - """ Responds to HTTP POST requests. """ + @app.route('/Inference', methods=['POST']) + def Inference(): + """ Responds to HTTP POST requests. """ - prediction = self.predict(request) + prediction = self.predict(request) - if prediction == 1: - message = "Acute Lymphoblastic Leukemia detected!" - diagnosis = "Positive" - elif prediction == 0: - message = "Acute Lymphoblastic Leukemia not detected!" - diagnosis = "Negative" + if prediction == 1: + message = "Acute Lymphoblastic Leukemia detected!" + diagnosis = "Positive" + elif prediction == 0: + message = "Acute Lymphoblastic Leukemia not detected!" + diagnosis = "Negative" - resp = jsonpickle.encode({ - 'Response': 'OK', - 'Message': message, - 'Diagnosis': diagnosis - }) + resp = jsonpickle.encode({ + 'Response': 'OK', + 'Message': message, + 'Diagnosis': diagnosis + }) - return Response(response=resp, status=200, mimetype="application/json") + return Response(response=resp, status=200, mimetype="application/json") - app.run(host=self.helpers.get_ip_addr(), port=self.helpers.confs["agent"]["port"]) \ No newline at end of file + app.run(host=self.helpers.get_ip_addr(), port=self.helpers.confs["agent"]["port"]) \ No newline at end of file