Skip to content

Commit

Permalink
Improve nebullvm logging (#141)
Browse files Browse the repository at this point in the history
* improved logs

* upgrade version numbers

* fixed links on speedster readme & added save/load example to all notebooks
  • Loading branch information
valeriosofi authored Jan 9, 2023
1 parent edabce1 commit 9362fc7
Show file tree
Hide file tree
Showing 38 changed files with 567 additions and 129 deletions.
8 changes: 4 additions & 4 deletions apps/accelerate/speedster/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ python -m nebullvm.installers.auto_installer --backends all --compilers all
> on your environment before proceeding to the next step, please install it from this
> [link](https://pytorch.org/get-started/locally/).
For more details on the installation step, please visit [Installation](https://docs.nebuly.com/speedster/installation).
For more details on the installation step, please visit [Installation](https://docs.nebuly.com/modules/speedster/installation).


# API quick view
Expand Down Expand Up @@ -116,8 +116,8 @@ Don't forget to leave a star ⭐ to support the project and happy acceleration


<p align="center">
<a href="https://docs.nebuly.com/speedster/installation">Installation</a> •
<a href="https://docs.nebuly.com/speedster/get-started">Get started</a> •
<a href="https://docs.nebuly.com/modules/speedster/installation">Installation</a> •
<a href="https://docs.nebuly.com/modules/speedster/getting-started">Get started</a> •
<a href="https://github.com/nebuly-ai/nebullvm/tree/main/notebooks">Notebooks</a> •
<a href="https://docs.nebuly.com/speedster/benchmarks">Benchmarks</a>
<a href="https://docs.nebuly.com/modules/speedster/benchmarks">Benchmarks</a>
</p>
2 changes: 1 addition & 1 deletion apps/accelerate/speedster/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

setup(
name="speedster",
version="0.0.2",
version="0.1.0",
packages=find_packages(),
install_requires=REQUIREMENTS,
long_description=long_description,
Expand Down
35 changes: 20 additions & 15 deletions apps/accelerate/speedster/speedster/api/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,14 @@
Optional,
)

from loguru import logger
from nebullvm.optional_modules.tensorflow import tensorflow as tf
from nebullvm.optional_modules.torch import torch
from nebullvm.tools.base import Device
from nebullvm.tools.logger import debug_mode_enabled, LoggingContext
from nebullvm.tools.utils import gpu_is_available
from speedster.root_op import SpeedsterRootOp

logger = logging.getLogger("nebullvm_logger")
from speedster.root_op import SpeedsterRootOp


def _check_device(device: Optional[str]) -> Device:
Expand Down Expand Up @@ -143,18 +144,22 @@ def optimize_model(
"""
root_op = SpeedsterRootOp()
device = _check_device(device)
root_op.to(device).execute(
model=model,
input_data=input_data,
metric_drop_ths=metric_drop_ths,
metric=metric,
optimization_time=optimization_time,
dynamic_info=dynamic_info,
config_file=config_file,
ignore_compilers=ignore_compilers,
ignore_compressors=ignore_compressors,
store_latencies=store_latencies,
**kwargs,
)

disable_log = True if not debug_mode_enabled() else False

with LoggingContext(logging.getLogger(), disabled=disable_log):
root_op.to(device).execute(
model=model,
input_data=input_data,
metric_drop_ths=metric_drop_ths,
metric=metric,
optimization_time=optimization_time,
dynamic_info=dynamic_info,
config_file=config_file,
ignore_compilers=ignore_compilers,
ignore_compressors=ignore_compressors,
store_latencies=store_latencies,
**kwargs,
)

return root_op.get_result()
5 changes: 2 additions & 3 deletions nebullvm/api/functions.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import logging
from typing import (
Union,
Iterable,
Expand All @@ -9,13 +8,13 @@
Optional,
)

from loguru import logger

from nebullvm.optional_modules.tensorflow import tensorflow as tf
from nebullvm.optional_modules.torch import torch
from nebullvm.tools.base import Device
from nebullvm.tools.utils import gpu_is_available

logger = logging.getLogger("nebullvm_logger")


def _check_device(device: Optional[str]) -> Device:
if device is None:
Expand Down
2 changes: 1 addition & 1 deletion nebullvm/config.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from nebullvm.optional_modules.torch import torch


VERSION = "0.6.1"
VERSION = "0.7.0"
LEARNER_METADATA_FILENAME = "metadata.json"
ONNX_OPSET_VERSION = 13
NEBULLVM_DEBUG_FILE = "nebullvm_debug.json"
Expand Down
5 changes: 2 additions & 3 deletions nebullvm/installers/auto_installer.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import argparse
import logging
from typing import List, Union

from loguru import logger

from nebullvm.config import (
ONNX_MODULES,
TENSORFLOW_MODULES,
Expand All @@ -16,8 +17,6 @@
)


logger = logging.getLogger("nebullvm_logger")

SUPPORTED_BACKENDS = [
"torch-full",
"torch-base",
Expand Down
4 changes: 1 addition & 3 deletions nebullvm/installers/installers.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import logging
import os
import platform
import subprocess
Expand All @@ -8,6 +7,7 @@
from typing import List, Union

import cpuinfo
from loguru import logger

from nebullvm.config import (
LIBRARIES_GPU,
Expand All @@ -25,8 +25,6 @@
check_module_version,
)

logger = logging.getLogger("nebullvm_logger")


def get_cpu_arch():
arch = cpuinfo.get_cpu_info()["arch"].lower()
Expand Down
5 changes: 2 additions & 3 deletions nebullvm/operations/base.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
import abc
import logging
from typing import Dict, Any, Optional, Union

from loguru import logger

from nebullvm.tools.base import Device
from nebullvm.tools.feedback_collector import FeedbackCollector
from nebullvm.tools.utils import gpu_is_available

logger = logging.getLogger("nebullvm_logger")


def _check_device(device: Optional[str]) -> Device:
if device is None:
Expand Down
5 changes: 2 additions & 3 deletions nebullvm/operations/conversions/pytorch.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import logging
from pathlib import Path

from loguru import logger

from nebullvm.config import ONNX_OPSET_VERSION
from nebullvm.optional_modules.torch import torch, Module
from nebullvm.tools.base import ModelParams, Device
Expand All @@ -10,8 +11,6 @@
create_model_inputs_torch,
)

logger = logging.getLogger("nebullvm_logger")


def convert_torch_to_onnx(
torch_model: Module,
Expand Down
5 changes: 2 additions & 3 deletions nebullvm/operations/conversions/tensorflow.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,16 @@
import logging
import subprocess
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Union

from loguru import logger

from nebullvm.config import ONNX_OPSET_VERSION
from nebullvm.optional_modules.tensorflow import tensorflow as tf, tf2onnx
from nebullvm.optional_modules.onnx import onnx
from nebullvm.tools.base import ModelParams
from nebullvm.tools.huggingface import TensorFlowTransformerWrapper

logger = logging.getLogger("nebullvm_logger")


def convert_tf_to_onnx(
model: Union[tf.Module, tf.keras.Model],
Expand Down
4 changes: 1 addition & 3 deletions nebullvm/operations/inference_learners/deepsparse.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import logging
import os
import shutil
from abc import ABC
from pathlib import Path
from typing import Union, List, Generator, Tuple, Dict, Type

import numpy as np
from loguru import logger

from nebullvm.config import ONNX_FILENAMES
from nebullvm.operations.inference_learners.base import (
Expand All @@ -18,8 +18,6 @@
from nebullvm.tools.base import ModelParams, DeepLearningFramework
from nebullvm.tools.transformations import MultiStageTransformation

logger = logging.getLogger("nebullvm_logger")


class DeepSparseInferenceLearner(BaseInferenceLearner, ABC):
"""Model optimized on CPU using DeepSparse. DeepSparse is an engine
Expand Down
5 changes: 2 additions & 3 deletions nebullvm/operations/inference_learners/neural_compressor.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import logging
import pickle
from abc import ABC
from pathlib import Path
from typing import Union, Tuple, Dict, Type

from loguru import logger

from nebullvm.operations.inference_learners.base import (
BaseInferenceLearner,
LearnerMetadata,
Expand All @@ -29,8 +30,6 @@
from nebullvm.tools.transformations import MultiStageTransformation
from nebullvm.tools.utils import check_module_version

logger = logging.getLogger("nebullvm_logger")


class NeuralCompressorInferenceLearner(BaseInferenceLearner, ABC):
"""Model optimized on CPU using IntelNeuralCompressor.
Expand Down
4 changes: 1 addition & 3 deletions nebullvm/operations/inference_learners/onnx.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import logging
import multiprocessing
import os
import shutil
Expand All @@ -8,6 +7,7 @@

import cpuinfo
import numpy as np
from loguru import logger

from nebullvm.config import (
ONNX_FILENAMES,
Expand All @@ -27,8 +27,6 @@
from nebullvm.tools.base import DeepLearningFramework, Device, ModelParams
from nebullvm.tools.transformations import MultiStageTransformation

logger = logging.getLogger("nebullvm_logger")


def _running_on_intel_cpu(use_gpu):
if use_gpu:
Expand Down
4 changes: 1 addition & 3 deletions nebullvm/operations/inference_learners/openvino.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import json
import logging
import shutil
from abc import ABC
from pathlib import Path
from typing import Dict, Union, Type, Generator, Tuple, List, Optional

import numpy as np
from loguru import logger

from nebullvm.config import OPENVINO_FILENAMES
from nebullvm.operations.inference_learners.base import (
Expand All @@ -27,8 +27,6 @@
from nebullvm.tools.data import DataManager
from nebullvm.tools.transformations import MultiStageTransformation

logger = logging.getLogger("nebullvm_logger")


class OpenVinoInferenceLearner(BaseInferenceLearner, ABC):
"""Model optimized using OpenVINO.
Expand Down
4 changes: 1 addition & 3 deletions nebullvm/operations/inference_learners/tensor_rt.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
import json
import logging
import os
from abc import ABC
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Union, Dict, Type, List, Tuple, Generator, Optional

import numpy as np
from loguru import logger

from nebullvm.config import NVIDIA_FILENAMES
from nebullvm.operations.inference_learners.base import (
Expand All @@ -26,8 +26,6 @@
VerifyContiguity,
)

logger = logging.getLogger("nebullvm_logger")


class ONNXTensorRTInferenceLearner(BaseInferenceLearner, ABC):
"""Model optimized using TensorRT.
Expand Down
4 changes: 1 addition & 3 deletions nebullvm/operations/measures/utils.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import logging
import time
from typing import Tuple, List, Union, Any

import numpy as np
from loguru import logger

from nebullvm.config import ONNX_PROVIDERS
from nebullvm.operations.inference_learners.base import BaseInferenceLearner
Expand All @@ -16,8 +16,6 @@
get_output_names,
)

logger = logging.getLogger("nebullvm_logger")


def compute_torch_latency(
xs: List[Tuple[torch.Tensor]],
Expand Down
13 changes: 0 additions & 13 deletions nebullvm/operations/optimizations/compilers/onnxruntime.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,6 @@
)
from nebullvm.tools.base import QuantizationType
from nebullvm.tools.data import DataManager
from nebullvm.tools.logger import (
debug_mode_enabled,
save_root_logger_state,
raise_logger_level,
load_root_logger_state,
)
from nebullvm.tools.transformations import MultiStageTransformation


Expand Down Expand Up @@ -80,18 +74,11 @@ def execute(
QUANTIZATION_DATA_NUM
)

if not debug_mode_enabled():
logger_state = save_root_logger_state()
raise_logger_level()

if quantization_type is not None:
model = self._quantize_model(
model, train_input_data, quantization_type, input_tfms
)

if not debug_mode_enabled():
load_root_logger_state(logger_state)

self.compiled_model = self._compile_model(model)

def _compile_model(self, model: Union[str, Path]):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import copy
import logging
from typing import List, Tuple, Union

from loguru import logger

from nebullvm.optional_modules.torch import (
torch,
Module,
Expand All @@ -21,8 +22,6 @@
)
from nebullvm.tools.utils import check_module_version

logger = logging.getLogger("nebullvm_logger")


class _QuantWrapper(Module):
def __init__(self, model: Module):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
import logging
from loguru import logger

from nebullvm.tools.base import QuantizationType

logger = logging.getLogger("nebullvm_logger")


def check_quantization(
quantization_type: QuantizationType, perf_loss_ths: float
Expand Down
Loading

0 comments on commit 9362fc7

Please sign in to comment.