-

-

hannah.conf.optimizer

-

-

SGDConf Objects

-
@dataclass
-class SGDConf()
-
- -

-

lr

-

_RequiredParameter

-

-

MADGRADConf Objects

-
@dataclass
-class MADGRADConf()
-
- -

-

lr

-

_RequiredParameter

-

-

hannah.conf

-

-

hannah.conf.scheduler

-

-

OneCycleLRConf Objects

-
@dataclass
-class OneCycleLRConf()
-
- -

Config for one cycle lr total steps are configured from module

-

-

hannah.conf.nas

-

-

hannah.models.honk

-

-

hannah.models.honk.model

-

-

truncated_normal

-
def truncated_normal(tensor, std_dev=0.01)
+                

+

hannah.logo

+

+

hannah.test_linear_classifier

+

+

hannah.tools.objectdetection_eval

+

+

eval_train

+
def eval_train(config, module, test=True)
 

Arguments:

-

tensor: -- std_dev - (Default value = 0.01)

-

-

SpeechResModel Objects

-
class SpeechResModel(nn.Module)
+
    +
  • config - param module:
  • +
  • test - Default value = True) + module:
  • +
+

+

eval_steps

+
def eval_steps(config, module, hparams, checkpoint)
 
-

-

forward

-
def forward(x)
+

Arguments:

+
    +
  • config - param module:
  • +
  • hparams - param checkpoint: + module: + checkpoint:
  • +
+

+

eval_checkpoint

+
def eval_checkpoint(config: DictConfig, checkpoint)
 

Arguments:

-

x:

-

-

SpeechModel Objects

-
class SpeechModel(nn.Module)
+
    +
  • config - DictConfig: + checkpoint:
  • +
  • config - DictConfig:
  • +
  • config - DictConfig:
  • +
+

+

eval

+
def eval(config: DictConfig)
 
-

-

forward

-
def forward(x)
+

Arguments:

+
    +
  • config - DictConfig:
  • +
  • config - DictConfig:
  • +
  • config - DictConfig:
  • +
+

+

main

+
@hydra.main(config_name="objectdetection_eval",
+            config_path="../conf",
+            version_base="1.2")
+def main(config: DictConfig)
 

Arguments:

-

x:

-

-

hannah.models.functional_net_test.expressions

-

-

padding_expression

-
def padding_expression(kernel_size, stride, dilation=1)
+
    +
  • config - DictConfig:
  • +
  • config - DictConfig:
  • +
  • config - DictConfig:
  • +
+

+

hannah.tools.characterize

+

+

main

+
@hydra.main(config_name="characterize",
+            config_path="../conf",
+            version_base="1.2")
+def main(config: DictConfig)
 
-

Symbolically calculate padding such that for a given kernel_size, stride and dilation -the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2). -Note: If the input dimension is 1 and stride = 2, the calculated padding will result in -an output with also dimension 1.

-

Parameters

-

kernel_size : Union[int, Expression] -stride : Union[int, Expression] -dilation : Union[int, Expression], optional -description, by default 1

-

Returns

-

Expression

-

-

hannah.models.functional_net_test.models

-

-

hannah.models.vad

-

-

hannah.models.vad.models

-

-

BottleneckVad Objects

-
class BottleneckVad(nn.Module)
+

Arguments:

+
    +
  • config - DictConfig:
  • +
  • config - DictConfig:
  • +
  • config - DictConfig:
  • +
+

+

hannah.tools.train

+

+

hannah.tools

+

+

hannah.tools.eval

+

+

eval_checkpoint

+
def eval_checkpoint(config: DictConfig, checkpoint) -> None
 
-

-

forward

-
def forward(x)
+

Arguments:

+
    +
  • config - DictConfig: + checkpoint:
  • +
  • config - DictConfig:
  • +
  • config - DictConfig:
  • +
+

+

eval

+
def eval(config: DictConfig) -> Optional[bool]
 

Arguments:

-

x:

-

-

num_flat_features

-
def num_flat_features(x)
+
    +
  • config - DictConfig:
  • +
  • config - DictConfig:
  • +
  • config - DictConfig:
  • +
+

+

main

+
@hydra.main(config_name="eval", config_path="conf", version_base="1.2")
+def main(config: DictConfig)
 

Arguments:

-

x:

-

-

SmallVad Objects

-
class SmallVad(nn.Module)
+
    +
  • config - DictConfig:
  • +
  • config - DictConfig:
  • +
  • config - DictConfig:
  • +
+

+

hannah.utils.utils

+

+

log_execution_env_state

+
def log_execution_env_state() -> None
 
-

-

forward

-
def forward(x)
+

Log information about the execution environment.

+

+

git_version

+
def git_version(short=True)
 
+

Return the current git sha

Arguments:

-

x:

-

-

num_flat_features

-
def num_flat_features(x)
+
    +
  • short bool - If True, return the short (7 character) version of the SHA
  • +
+

Returns:

+
    +
  • str - The current git SHA
  • +
+

+

extract_from_download_cache

+
def extract_from_download_cache(filename,
+                                url,
+                                cached_files,
+                                target_cache,
+                                target_folder,
+                                target_test_folder="",
+                                clear_download=False,
+                                no_exist_check=False) -> None
 
+

extracts given file from cache or donwloads first from url

Arguments:

-

x:

-

-

SimpleVad Objects

-
class SimpleVad(nn.Module)
+
    +
  • filename str - name of the file to download or extract
  • +
  • url str - possible url to download the file + cached_files (list(str)): cached files in download cache
  • +
  • target_cache str - path to the folder to cache file if download necessary
  • +
  • target_folder str - path where to extract file
  • +
  • target_test_folder str, optional - folder to check if data are already there
  • +
  • clear_download bool - clear download after usage
  • +
  • no_exist_check bool - disables the check if folder exists
  • +
+

+

fullname

+
def fullname(o) -> Any
 
-

-

forward

-
def forward(x)
+

Get the full classname of an object including surrounding packages/modules/namespaces

+

+

set_deterministic

+
@contextmanager
+def set_deterministic(mode, warn_only=False)
 
-

Arguments:

-

x:

-

-

num_flat_features

-
def num_flat_features(x)
+

A contextmanager to set deterministic algorithms

+

+

hannah.utils

+

+

hannah.utils.imports

+

+

lazy_import

+
def lazy_import(module_name, callback=None)
 
+

Returns a proxy module object that will lazily import the given module the first time it is used. +Example usage::

+

Lazy version of import tensorflow as tf

+

tf = lazy_import("tensorflow")

+

Other commands

+

Now the module is loaded

+

tf.version

Arguments:

-

x:

-

-

BottleneckVadModel Objects

-
class BottleneckVadModel(nn.Module)
+
    +
  • module_name - the fully-qualified module name to import
  • +
  • callback None - a callback function to call before importing the + module
  • +
+

Returns:

+

a proxy module object that will be lazily imported when first used

+

+

LazyModule Objects

+
class LazyModule(types.ModuleType)
 
-

-

forward

-
def forward(x)
+

Proxy module that lazily imports the underlying module the first time it is actually used.

+

Arguments:

+
    +
  • module_name - the fully-qualified module name to import
  • +
  • callback None - a callback function to call before importing the + module
  • +
+

+

hannah.utils.tuple

+

+

hannah.utils.logger

+

+

JSONLogger Objects

+
class JSONLogger(Logger)
 
-

Arguments:

-

x:

-

-

SimpleVadModel Objects

-
class SimpleVadModel(nn.Module)
+

+

name

+
@property
+def name() -> str
 
-

-

forward

-
def forward(x)
+

Gets the name of the experiment.

+

Returns:

+

The name of the experiment.

+

+

version

+
@property
+def version() -> Union[int, str]
 
-

Arguments:

-

x:

-

-

SmallVadModel Objects

-
class SmallVadModel(nn.Module)
+

Gets the version of the experiment.

+

Returns:

+

The version of the experiment if it is specified, else the next version.

+

+

root_dir

+
@property
+def root_dir() -> str
 
-

-

forward

-
def forward(x)
+

Gets the save directory where the versioned JSON experiments are saved.

+

+

log_dir

+
@property
+def log_dir() -> str
 
-

Arguments:

-

x:

-

-

hannah.models

-

-

hannah.models.simple1d

-

-

hannah.models.kakao_resnet

-

-

hannah.models.lstm

-

-

hannah.models.lstm.models

-

-

LSTMModel Objects

-
class LSTMModel(nn.Module)
+

The log directory for this run.

+

By default, it is named 'version_${self.version}' but it can be overridden by passing a string value for the +constructor's version parameter instead of None or an int.

+

+

experiment

+
@property
+@rank_zero_experiment
+def experiment() -> "_ExperimentWriter"
 
-

Simple LSTM model.

-

-

forward

-
def forward(x)
+

Actual ExperimentWriter object. To use ExperimentWriter features anywhere in your code, do the following.

+

Example::

+

self.logger.experiment.some_experiment_writer_function()

+

+

_ExperimentWriter Objects

+
class _ExperimentWriter()
 
+

Experiment writer for CSVLogger.

Arguments:

-

x:

-

-

hannah.models._vendor

-

-

hannah.models._vendor.focalnet

-

-

Mlp Objects

-
class Mlp(nn.Module)
+
    +
  • log_dir - Directory for the experiment logs
  • +
+

+

log_metrics

+
def log_metrics(metrics_dict: Dict[str, float],
+                step: Optional[int] = None) -> None
 
-

-

forward

-
def forward(x)
+

Record metrics.

+

+

save

+
def save() -> None
 
-

Arguments:

-

x:

-

-

FocalModulation Objects

-
class FocalModulation(nn.Module)
+

Save recorded metrics into files.

+

+

hannah.utils.dvclive

+

+

hannah.modules.metrics

+

+

Error Objects

+
class Error()
 
-

-

forward

-
def forward(x)
+

Computes Error = 1 - Accuracy_

+

.. math:: + \text{Error} = 1 - \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)

+

Where :math:y is a tensor of target values, and :math:\hat{y} is a tensor of predictions.

+

This module is a simple wrapper to get the task specific versions of this metric, which is done by setting the +task argument to either 'binary', 'multiclass' or multilabel. See the documentation of +:mod:BinaryError, :mod:MulticlassError and :mod:MultilabelError for the specific details of +each argument influence and examples.

+

+

plot_confusion_matrix

+
def plot_confusion_matrix(cf,
+                          group_names=None,
+                          categories="auto",
+                          count=True,
+                          percent=True,
+                          cbar=True,
+                          xyticks=True,
+                          xyplotlabels=True,
+                          sum_stats=True,
+                          figsize=None,
+                          cmap="Blues",
+                          title=None)
 
-

Arguments:

-
    -
  • x - input features with shape of (B, H, W, C)
  • -
-

-

extra_repr

-
def extra_repr() -> str
+

This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization.

+

Arguments

+

cf: confusion matrix to be passed in

+

group_names: List of strings that represent the labels row by row to be shown in each square.

+

categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'

+

count: If True, show the raw number in the confusion matrix. Default is True.

+

normalize: If True, show the proportions for each category. Default is True.

+

cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix. + Default is True.

+

xyticks: If True, show x and y ticks. Default is True.

+

xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.

+

sum_stats: If True, display summary statistics below the figure. Default is True.

+

figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.

+

cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues' + See http://matplotlib.org/examples/color/colormaps_reference.html

+

title: Title for the heatmap. Default is None.

+

+

hannah.modules.classifier

+

+

hannah.modules

+

+

hannah.modules.augmentation.bordersearch

+

+

hannah.modules.augmentation

+

+

hannah.modules.augmentation.transforms.kornia_transforms

+

+

hannah.modules.augmentation.transforms

+

+

hannah.modules.augmentation.transforms.registry

+

+

hannah.modules.augmentation.batch_augmentation

+

+

BatchAugmentationPipeline Objects

+
class BatchAugmentationPipeline(nn.Module)
 
-

-

flops

-
def flops(N)
+

+

__init__

+
def __init__(transforms={})
 
-

calculate flops for 1 window with token length of N

+

Augmentation pipeline especially for self supervised learning

Arguments:

-

N:

-

-

FocalNetBlock Objects

-
class FocalNetBlock(nn.Module)
+
    +
  • replica int - number of replicated different random augmentations
  • +
  • transforms dict - configuration of transforms
  • +
+

+

forward

+
@torch.no_grad()
+def forward(x) -> torch.Tensor
 
-

Focal Modulation Network Block.

+

Perform Augmentations

Arguments:

    -
  • dim(int) - Number of input channels.
  • -
  • input_resolution(tuple[int]) - Input resulotion.
  • -
  • mlp_ratio(float) - Ratio of mlp hidden dim to embedding dim.
  • -
  • drop(float) - Dropout rate. Default: 0.0
  • -
  • drop_path(float) - Stochastic depth rate. Default: 0.0
  • -
  • act_layer(nn.Module) - Activation layer. Default: nn.GELU
  • -
  • norm_layer(nn.Module) - Normalization layer. Default: nn.LayerNorm
  • -
  • focal_level(int) - Number of focal levels.
  • -
  • focal_window(int) - Focal window size at first focal level
  • -
  • use_layerscale(bool) - Whether use layerscale
  • -
  • layerscale_value(float) - Initial layerscale value
  • -
  • use_postln(bool) - Whether use layernorm after modulation
  • +
  • x torch.Tensor - a torch.Tensor representing the augementation pipeline
-

-

forward

-
def forward(x)
-
- -

Arguments:

-

x:

-

-

extra_repr

-
def extra_repr() -> str
-
- -

-

flops

-
def flops()
+

Returns:

+

Tuple[torch.Tensor, torch.Tensor]; Batch augmented with replica different random augmentations

+

+

hannah.modules.augmentation.augmentation

+

+

hannah.modules.config_utils

+

+

dump_config

+
def dump_config(output_dir, config)
 
-

-

BasicLayer Objects

-
class BasicLayer(nn.Module)
+

Dumps the configuration to json format

+

Creates file config.json in output_dir

+

Parameters

+

output_dir : str + Output directory +config : dict + Configuration to dump

+

+

save_model

+
def save_model(output_dir, model)
 
-

A basic Focal Transformer layer for one stage.

-

Arguments:

+

Creates serialization of the model for later inference, evaluation

+

Creates the following files:

    -
  • dim(int) - Number of input channels.
  • -
  • input_resolution(tuple[int]) - Input resolution.
  • -
  • depth(int) - Number of blocks.
  • -
  • window_size(int) - Local window size.
  • -
  • mlp_ratio(float) - Ratio of mlp hidden dim to embedding dim.
  • -
  • qkv_bias(bool) - If True, add a learnable bias to query, key, value. Default: True - qk_scale(float | None): Override default qk scale of head_dim ** -0.5 if set.
  • -
  • drop(float) - Dropout rate. Default: 0.0 - drop_path(float | tuple[float]): Stochastic depth rate. Default: 0.0
  • -
  • norm_layer(nn.Module) - Normalization layer. Default: nn.LayerNorm - downsample(nn.Module | None): Downsample layer at the end of the layer. Default: None
  • -
  • use_checkpoint(bool) - Whether to use checkpointing to save memory. Default: False.
  • -
  • focal_level(int) - Number of focal levels
  • -
  • focal_window(int) - Focal window size at first focal level
  • -
  • use_layerscale(bool) - Whether use layerscale
  • -
  • layerscale_value(float) - Initial layerscale value
  • -
  • use_postln(bool) - Whether use layernorm after modulation
  • +
  • model.pt: Serialized version of network parameters in pytorch
  • +
  • model.json: Serialized version of network parameters in json format
  • +
  • model.onnx: full model including paramters in onnx format
-

-

forward

-
def forward(x, H, W)
-
- -

Arguments:

-

x: - H: - W:

-

-

extra_repr

-
def extra_repr() -> str
+

Parameters

+

output_dir : str + Directory to put serialized models +model : LightningModule + Model to serialize

+

+

hannah.modules.base

+

+

ClassifierModule Objects

+
class ClassifierModule(LightningModule, ABC)
 
-

-

flops

-
def flops()
+

+

total_training_steps

+
def total_training_steps() -> int
 
-

-

PatchEmbed Objects

-
class PatchEmbed(nn.Module)
+

Total training steps inferred from datamodule and devices.

+

+

hannah.modules.angle_classifier

+

+

hannah.modules.object_detection

+

+

hannah.modules.vision.anomaly_detection

+

+

AnomalyDetectionModule Objects

+
class AnomalyDetectionModule(VisionBaseModule)
 
-

Image to Patch Embedding

-

Arguments:

-
    -
  • img_size(int) - Image size. Default: 224.
  • -
  • patch_size(int) - Patch token size. Default: 4.
  • -
  • in_chans(int) - Number of input image channels. Default: 3.
  • -
  • embed_dim(int) - Number of linear projection output channels. Default: 96.
  • -
  • norm_layer(nn.Module) - Normalization layer. Default: None
  • -
-

-

forward

-
def forward(x)
+

+

on_test_end

+
def on_test_end()
 
-

Arguments:

-

x:

-

-

flops

-
def flops()
+

wd_dir = os.getcwd() +score, largest_train_error = self.compute_anomaly_score() +train_errors = self.normalized_train_errors +plt.hist(train_errors.detach().cpu().numpy(), bins=100) +plt.axvline(score, linestyle="dashed") +plt.title("Normalized train reconstruction errors") +plt.savefig(wd_dir + "/normalized_train_errors.png")

+

test = ( + torch.tensor(self.test_losses, device=self.device) + / torch.max(torch.stack(self.train_losses), dim=0).values +) +plt.hist(test.detach().cpu().numpy(), bins=100) +plt.title("Normalized test reconstruction errors") +plt.savefig(wd_dir + "/normalized_test_errors.png") +print("Anomaly score", score) +print( + "Largest train error", + torch.max(torch.stack(self.train_losses), dim=0).values, +)

+

+

hannah.modules.vision.anomaly_score

+

class AnomalyScore(CatMetric): +def init(self, percentile, nan_strategy="warn", kwargs): + super().init(nan_strategy=nan_strategy, kwargs) + self.percentile = percentile

+

def compute(self): + anomaly_score = None + train_losses = super().compute() + if train_losses: + normalized_train_errors = torch.stack(train_losses) / ( + torch.max(torch.stack(train_losses), dim=0).values + ) + anomaly_score = np.percentile( + normalized_train_errors.cpu().numpy(), self.percentile + ) + return anomaly_score

+

+

hannah.modules.vision

+

+

hannah.modules.vision.loss

+

+

hannah.modules.vision.base

+

+

hannah.modules.vision.image_classifier

+

+

hannah.visualization

+

+

hannah.train

+

+

hannah.trainer.cross_validation

+

+

hannah.trainer

+

+

hannah.nn.quantized

+

+

hannah.nn.qat

+

Implementations of torch.nn.intrinsics qat with an optional +quantize bias parameter.

+

Qconfigs can support an optional bias quantization funciton which should be returned by + qconfig.bias() else biases will be quantized with qconfig.activation()

+

+

_ConvBnNd Objects

+
class _ConvBnNd(nn.modules.conv._ConvNd, _ConvForwardMixin)
 
-

-

FocalNet Objects

-
class FocalNet(nn.Module)
+

+

train

+
def train(mode: bool = True) -> Any
 
-

Focal Modulation Networks (FocalNets)

-

Arguments:

-

img_size(int | tuple(int): Input image size. Default 224 - patch_size(int | tuple(int): Patch size. Default: 4 -- in_chans(int) - Number of input image channels. Default: 3 -- num_classes(int) - Number of classes for classification head. Default: 1000 -- embed_dim(int) - Patch embedding dimension. Default: 96 -- depths(tuple(int) - Depth of each Focal Transformer layer. -- mlp_ratio(float) - Ratio of mlp hidden dim to embedding dim. Default: 4 -- drop_rate(float) - Dropout rate. Default: 0 -- drop_path_rate(float) - Stochastic depth rate. Default: 0.1 -- norm_layer(nn.Module) - Normalization layer. Default: nn.LayerNorm. -- patch_norm(bool) - If True, add normalization after patch embedding. Default: True -- use_checkpoint(bool) - Whether to use checkpointing to save memory. Default: False -- focal_levels(list) - How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1] -- focal_windows(list) - The focal window size at all stages. Default: [7, 5, 3, 1] -- use_conv_embed(bool) - Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False -- use_layerscale(bool) - Whether use layerscale proposed in CaiT. Default: False -- layerscale_value(float) - Value for layer scale. Default: 1e-4 -- use_postln(bool) - Whether use layernorm after modulation (it helps stablize training of large models)

-

-

no_weight_decay

-
@torch.jit.ignore
-def no_weight_decay()
+

Batchnorm's training behavior is using the self.training flag. Prevent +changing it if BN is frozen. This makes sure that calling model.train() +on a model with a frozen BN will behave properly.

+

+

from_float

+
@classmethod
+def from_float(cls, mod)
 
-

-

no_weight_decay_keywords

-
@torch.jit.ignore
-def no_weight_decay_keywords()
+

Create a qat module from a float module or qparams_dict +Args: mod a float module, either produced by torch.quantization utilities +or directly from user

+

+

ConvBn1d Objects

+
class ConvBn1d(_ConvBnNd)
 
-

-

forward_features

-
def forward_features(x)
+

A ConvBn1d module is a module fused from Conv1d and BatchNorm1d, +attached with FakeQuantize modules for weight, +used in quantization aware training. +We combined the interface of :class:torch.nn.Conv1d and +:class:torch.nn.BatchNorm1d. +Similar to :class:torch.nn.Conv1d, with FakeQuantize modules initialized +to default.

+

Attributes:

+

freeze_bn: +- weight_fake_quant - fake quant module for weight

+

+

ConvBnReLU1d Objects

+
class ConvBnReLU1d(ConvBn1d)
 
-

Arguments:

-

x:

-

-

forward

-
def forward(x)
-
- -

Arguments:

-

x:

-

-

flops

-
def flops()
-
- -

-

build_transforms

-
def build_transforms(img_size, center_crop=False)
-
- -

Arguments:

-

img_size: -- center_crop - (Default value = False)

-

-

build_transforms4display

-
def build_transforms4display(img_size, center_crop=False)
-
- -

Arguments:

-

img_size: -- center_crop - (Default value = False)

-

-

focalnet_tiny_srf

-
@register_model
-def focalnet_tiny_srf(pretrained=False, **kwargs)
-
- -

Arguments:

+

A ConvBnReLU1d module is a module fused from Conv1d, BatchNorm1d and ReLU, +attached with FakeQuantize modules for weight, +used in quantization aware training. +We combined the interface of :class:torch.nn.Conv1d and +:class:torch.nn.BatchNorm1d and :class:torch.nn.ReLU. +Similar to torch.nn.Conv1d, with FakeQuantize modules initialized to +default.

+

Attributes:

    -
  • pretrained - (Default value = False) - **kwargs:
  • +
  • weight_fake_quant - fake quant module for weight
-

-

focalnet_small_srf

-
@register_model
-def focalnet_small_srf(pretrained=False, **kwargs)
+

+

ConvBn2d Objects

+
class ConvBn2d(_ConvBnNd)
 
-

Arguments:

-
    -
  • pretrained - (Default value = False) - **kwargs:
  • -
-

-

focalnet_base_srf

-
@register_model
-def focalnet_base_srf(pretrained=False, **kwargs)
+

A ConvBn2d module is a module fused from Conv2d and BatchNorm2d, +attached with FakeQuantize modules for weight, +used in quantization aware training. +We combined the interface of :class:torch.nn.Conv2d and +:class:torch.nn.BatchNorm2d. +Similar to :class:torch.nn.Conv2d, with FakeQuantize modules initialized +to default.

+

Attributes:

+

freeze_bn: +- weight_fake_quant - fake quant module for weight

+

+

ConvBnReLU2d Objects

+
class ConvBnReLU2d(ConvBn2d)
 
-

Arguments:

+

A ConvBnReLU2d module is a module fused from Conv2d, BatchNorm2d and ReLU, +attached with FakeQuantize modules for weight, +used in quantization aware training. +We combined the interface of :class:torch.nn.Conv2d and +:class:torch.nn.BatchNorm2d and :class:torch.nn.ReLU. +Similar to torch.nn.Conv2d, with FakeQuantize modules initialized to +default.

+

Attributes:

    -
  • pretrained - (Default value = False) - **kwargs:
  • +
  • weight_fake_quant - fake quant module for weight
-

-

focalnet_tiny_lrf

-
@register_model
-def focalnet_tiny_lrf(pretrained=False, **kwargs)
+

+

ConvReLU2d Objects

+
class ConvReLU2d(nn.Conv2d, _ConvForwardMixin)
 
-

Arguments:

+

A ConvReLU2d module is a fused module of Conv2d and ReLU, attached with +FakeQuantize modules for weight for +quantization aware training. +We combined the interface of :class:~torch.nn.Conv2d and +:class:~torch.nn.BatchNorm2d.

+

Attributes:

    -
  • pretrained - (Default value = False) - **kwargs:
  • +
  • weight_fake_quant - fake quant module for weight
-

-

focalnet_small_lrf

-
@register_model
-def focalnet_small_lrf(pretrained=False, **kwargs)
+

+

ConvReLU1d Objects

+
class ConvReLU1d(nn.Conv1d, _ConvForwardMixin)
 
-

Arguments:

-
    -
  • pretrained - (Default value = False) - **kwargs:
  • -
-

-

focalnet_base_lrf

-
@register_model
-def focalnet_base_lrf(pretrained=False, **kwargs)
+

A ConvReLU1d module is fused module of Conv1d and ReLU, attached with +FakeQuantize modules for quantization aware training

+

+

Conv1d Objects

+
class Conv1d(nn.Conv1d, _ConvForwardMixin)
 
-

Arguments:

+

A Conv1d module is a Conv1d module , attached with +FakeQuantize modules for weight for +quantization aware training.

+

Attributes:

    -
  • pretrained - (Default value = False) - **kwargs:
  • +
  • weight_fake_quant - fake quant module for weight
  • +
  • bias_fake_quant - fake quant module for bias
  • +
  • activation_post_process - fake_quant_module for activations
-

-

focalnet_tiny_iso_16

-
@register_model
-def focalnet_tiny_iso_16(pretrained=False, **kwargs)
+

+

Conv2d Objects

+
class Conv2d(nn.Conv2d, _ConvForwardMixin)
 
-

Arguments:

+

A Conv2d module is a Conv2d module , attached with +FakeQuantize modules for weight for +quantization aware training.

+

Attributes:

    -
  • pretrained - (Default value = False) - **kwargs:
  • +
  • weight_fake_quant - fake quant module for weight
  • +
  • bias_fake_quant - fake quant module for bias
  • +
  • activation_post_process - fake_quant_module for activations
-

-

focalnet_small_iso_16

-
@register_model
-def focalnet_small_iso_16(pretrained=False, **kwargs)
+

+

Linear Objects

+
class Linear(nn.Linear)
 
-

Arguments:

+

A linear module attached with FakeQuantize modules for weight, +used for quantization aware training.

+

We adopt the same interface as torch.nn.Linear, please see +https://pytorch.org/docs/stable/nn.html#torch.nn.Linear +for documentation.

+

Similar to torch.nn.Linear, with FakeQuantize modules initialized to +default.

+

Attributes:

    -
  • pretrained - (Default value = False) - **kwargs:
  • +
  • weight - fake quant module for weight
-

-

focalnet_base_iso_16

-
@register_model
-def focalnet_base_iso_16(pretrained=False, **kwargs)
+

+

from_float

+
@classmethod
+def from_float(cls, mod)
 
-

Arguments:

-
    -
  • pretrained - (Default value = False) - **kwargs:
  • -
-

-

focalnet_large_fl3

-
@register_model
-def focalnet_large_fl3(pretrained=False, **kwargs)
+

Create a qat module from a float module or qparams_dict

+

Args: mod a float module, either produced by torch.quantization utilities +or directly from user

+

+

LinearReLU Objects

+
class LinearReLU(nn.Linear)
 
-

Arguments:

+

A linear module attached with FakeQuantize modules and ReLU for weight, +used for quantization aware training.

+

We adopt the same interface as torch.nn.Linear, please see +https://pytorch.org/docs/stable/nn.html#torch.nn.Linear +for documentation.

+

Similar to torch.nn.Linear, with FakeQuantize modules initialized to +default.

+

Attributes:

    -
  • pretrained - (Default value = False) - **kwargs:
  • +
  • weight - fake quant module for weight
-

-

focalnet_large_fl4

-
@register_model
-def focalnet_large_fl4(pretrained=False, **kwargs)
+

+

from_float

+
@classmethod
+def from_float(cls, mod)
 
-

Arguments:

-
    -
  • pretrained - (Default value = False) - **kwargs:
  • -
-

-

focalnet_xlarge_fl3

-
@register_model
-def focalnet_xlarge_fl3(pretrained=False, **kwargs)
+

Create a qat module from a float module or qparams_dict

+

Args: mod a float module, either produced by torch.quantization utilities +or directly from user

+

+

Identity Objects

+
class Identity(nn.Identity)
 
-

Arguments:

-
    -
  • pretrained - (Default value = False) - **kwargs:
  • -
-

-

focalnet_xlarge_fl4

-
@register_model
-def focalnet_xlarge_fl4(pretrained=False, **kwargs)
+

A identity module attached with FakeQuantize modules for weight, +used for quantization aware training.

+

We adopt the same interface as torch.nn.Identity, please see +https://pytorch.org/docs/stable/nn.html#torch.nn.Identity +for documentation.

+

Similar to torch.nn.Identity, with FakeQuantize modules initialized to +default.

+

+

from_float

+
@classmethod
+def from_float(cls, mod)
 
-

Arguments:

-
    -
  • pretrained - (Default value = False) - **kwargs:
  • -
-

-

focalnet_huge_fl3

-
@register_model
-def focalnet_huge_fl3(pretrained=False, **kwargs)
-
+

Create a qat module from a float module or qparams_dict

+

Args: mod a float module, either produced by torch.quantization utilities +or directly from user

+

+

hannah.ssl.hard_labeling

+

+

HardLabeling Objects

+
class HardLabeling()
+
-

Arguments:

-
    -
  • pretrained - (Default value = False) - **kwargs:
  • -
-

-

focalnet_huge_fl4

-
@register_model
-def focalnet_huge_fl4(pretrained=False, **kwargs)
+

+

training_step

+
def training_step(unlabeled_data: torch.Tensor,
+                  trainer: pl.Trainer,
+                  pl_module: pl.LightningModule,
+                  batch_idx: int = -1) -> torch.Tensor
 
-

Arguments:

-
    -
  • pretrained - (Default value = False) - **kwargs:
  • -
-

-

hannah.models._vendor.resnet_mc_dropout

-

PyTorch ResNet

-

This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with -additional dropout and dynamic global avg/max pool.

-

ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman

-

Copyright 2019, Ross Wightman

-

-

ResNet Objects

-
class ResNet(nn.Module)
+

Calculate pseudo label loss from unlabeled data.

+

+

get_dropout_layers

+
def get_dropout_layers()
 
-

ResNet / ResNeXt / SE-ResNeXt / SE-Net

-

This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that - * have > 1 stride in the 3x3 conv layer of bottleneck - * have conv-bn-act ordering

-

This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s -variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the -'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default.

-

ResNet variants (the same modifications can be used in SE/ResNeXt models as well): - * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b - * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64) - * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample - * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample - * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128) - * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample - * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample

-

ResNeXt - * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths - * same c,d, e, s variants as ResNet can be enabled

-

SE-ResNeXt - * normal - 7x7 stem, stem_width = 64 - * same c, d, e, s variants as ResNet can be enabled

-

SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64, - reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block

-

-

__init__

-
def __init__(block,
-             layers,
-             num_classes=1000,
-             in_chans=3,
-             output_stride=32,
-             global_pool="avg",
-             cardinality=1,
-             base_width=64,
-             stem_width=64,
-             stem_type="",
-             replace_stem_pool=False,
-             block_reduce_first=1,
-             down_kernel_size=1,
-             avg_down=False,
-             act_layer=nn.ReLU,
-             norm_layer=nn.BatchNorm2d,
-             aa_layer=None,
-             drop_rate=0.0,
-             drop_path_rate=0.0,
-             drop_block_rate=0.0,
-             zero_init_last=True,
-             block_args=None)
+

Returns all model layers of class dropout or dropblock.

+

+

compute_loss

+
def compute_loss(inputs, logits, targets, loss_fn=None)
 
-

Arguments:

-
    -
  • block nn.Module - class for the residual block. Options are BasicBlock, Bottleneck. - layers (List[int]) : number of layers in each block
  • -
  • num_classes int - number of classification classes (default 1000)
  • -
  • in_chans int - number of input (color) channels. (default 3)
  • -
  • output_stride int - output stride of the network, 32, 16, or 8. (default 32)
  • -
  • global_pool str - Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' (default 'avg')
  • -
  • cardinality int - number of convolution groups for 3x3 conv in Bottleneck. (default 1)
  • -
  • base_width int - bottleneck channels factor. planes * base_width / 64 * cardinality (default 64)
  • -
  • stem_width int - number of channels in stem convolutions (default 64)
  • -
  • stem_type str - The type of stem (default ''):
  • -
  • '', default - a single 7x7 conv with a width of stem_width
  • -
  • 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2
  • -
  • 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2
  • -
  • replace_stem_pool bool - replace stem max-pooling layer with a 3x3 stride-2 convolution
  • -
  • block_reduce_first int - Reduction factor for first convolution output width of residual blocks, - 1 for all archs except senets, where 2 (default 1)
  • -
  • down_kernel_size int - kernel size of residual block downsample path, - 1x1 for most, 3x3 for senets (default: 1)
  • -
  • avg_down bool - use avg pooling for projection skip connection between stages/downsample (default False)
  • -
  • act_layer str, nn.Module - activation layer
  • -
  • norm_layer str, nn.Module - normalization layer
  • -
  • aa_layer nn.Module - anti-aliasing layer
  • -
  • drop_rate float - Dropout probability before classifier, for training (default 0.)
  • -
  • drop_path_rate float - Stochastic depth drop-path rate (default 0.)
  • -
  • drop_block_rate float - Drop block rate (default 0.)
  • -
  • zero_init_last bool - zero-init the last weight in residual path (usually last BN affine weight)
  • -
  • block_args dict - Extra kwargs to pass through to block module
  • -
-

-

resnet10t_mc_dropout

-
@register_model
-def resnet10t_mc_dropout(pretrained=False, **kwargs)
+

Helper function to compute loss, possibly with consistency +regularization by augmentations (FixMatch).

+

+

negative_cre_loss

+
def negative_cre_loss(logits, targets)
 
-

Constructs a ResNet-10-T model.

-

-

resnet14t_mc_dropout

-
@register_model
-def resnet14t_mc_dropout(pretrained=False, **kwargs)
+

Cross Entropy Loss for negative learning which requires a mutli- +class and multi-label loss function.

+

+

hannah.ssl

+

+

hannah.models.convnet

+

+

hannah.models.convnet.models

+

+

padding_expression

+
def padding_expression(kernel_size, stride, dilation=1)
 
-

Constructs a ResNet-14-T model.

-

-

resnet18_mc_dropout

-
@register_model
-def resnet18_mc_dropout(pretrained=False, **kwargs)
+

Symbolically calculate padding such that for a given kernel_size, stride and dilation +the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2). +Note: If the input dimension is 1 and stride = 2, the calculated padding will result in +an output with also dimension 1.

+

Parameters

+

kernel_size : Union[int, Expression] +stride : Union[int, Expression] +dilation : Union[int, Expression], optional +description, by default 1

+

Returns

+

Expression

+

+

hannah.models.functional_net_test.expressions

+

+

padding_expression

+
def padding_expression(kernel_size, stride, dilation=1)
 
-

Constructs a ResNet-18 model.

-

-

resnet18d_mc_dropout

-
@register_model
-def resnet18d_mc_dropout(pretrained=False, **kwargs)
+

Symbolically calculate padding such that for a given kernel_size, stride and dilation +the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2). +Note: If the input dimension is 1 and stride = 2, the calculated padding will result in +an output with also dimension 1.

+

Parameters

+

kernel_size : Union[int, Expression] +stride : Union[int, Expression] +dilation : Union[int, Expression], optional +description, by default 1

+

Returns

+

Expression

+

+

hannah.models.functional_net_test.models

+

+

hannah.models.ai8x

+

+

hannah.models.ai8x.models

+

A search space based on the cifar 10 NASNet search space for ai85x devices from: htt

+

+

hannah.models.mobilenet.operators

+

+

hannah.models.mobilenet.expressions

+

+

padding_expression

+
def padding_expression(kernel_size, stride, dilation=1)
 
-

Constructs a ResNet-18-D model.

-

-

resnet34_mc_dropout

-
@register_model
-def resnet34_mc_dropout(pretrained=False, **kwargs)
+

Symbolically calculate padding such that for a given kernel_size, stride and dilation +the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2). +Note: If the input dimension is 1 and stride = 2, the calculated padding will result in +an output with also dimension 1.

+

Parameters

+

kernel_size : Union[int, Expression] +stride : Union[int, Expression] +dilation : Union[int, Expression], optional +description, by default 1

+

Returns

+

Expression

+

+

hannah.models.mobilenet.models

+

+

hannah.models.ekut

+

+

hannah.models.ekut.models

+

+

conv_bn

+
def conv_bn(inp, oup, stride)
 
-

Constructs a ResNet-34 model.

-

-

resnet34d_mc_dropout

-
@register_model
-def resnet34d_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

inp: + oup: + stride:

+

+

conv_1x1_bn

+
def conv_1x1_bn(inp, oup)
 
-

Constructs a ResNet-34-D model.

-

-

resnet26_mc_dropout

-
@register_model
-def resnet26_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

inp: + oup:

+

+

InvertedResidual Objects

+
class InvertedResidual(nn.Module)
 
-

Constructs a ResNet-26 model.

-

-

resnet26t_mc_dropout

-
@register_model
-def resnet26t_mc_dropout(pretrained=False, **kwargs)
+

+

forward

+
def forward(x)
 
-

Constructs a ResNet-26-T model.

-

-

resnet26d_mc_dropout

-
@register_model
-def resnet26d_mc_dropout(pretrained=False, **kwargs)
-
- -

Constructs a ResNet-26-D model.

-

-

resnet50_mc_dropout

-
@register_model
-def resnet50_mc_dropout(pretrained=False, **kwargs)
-
- -

Constructs a ResNet-50 model.

-

-

resnet50d_mc_dropout

-
@register_model
-def resnet50d_mc_dropout(pretrained=False, **kwargs) -> ResNet
-
- -

Constructs a ResNet-50-D model.

-

-

resnet50t_mc_dropout

-
@register_model
-def resnet50t_mc_dropout(pretrained=False, **kwargs)
-
- -

Constructs a ResNet-50-T model.

-

-

resnet101_mc_dropout

-
@register_model
-def resnet101_mc_dropout(pretrained=False, **kwargs)
-
- -

Constructs a ResNet-101 model.

-

-

resnet101d_mc_dropout

-
@register_model
-def resnet101d_mc_dropout(pretrained=False, **kwargs)
-
- -

Constructs a ResNet-101-D model.

-

-

resnet152_mc_dropout

-
@register_model
-def resnet152_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

RawSpeechModel Objects

+
class RawSpeechModel(nn.Module)
 
-

Constructs a ResNet-152 model.

-

-

resnet152d_mc_dropout

-
@register_model
-def resnet152d_mc_dropout(pretrained=False, **kwargs)
+

Speech Recognition on RAW Data using Wolfgang Fuhls Networks

+

+

forward

+
def forward(x)
 
-

Constructs a ResNet-152-D model.

-

-

resnet200_mc_dropout

-
@register_model
-def resnet200_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

RawSpeechModelInvertedResidual Objects

+
class RawSpeechModelInvertedResidual(nn.Module)
 
-

Constructs a ResNet-200 model.

-

-

resnet200d_mc_dropout

-
@register_model
-def resnet200d_mc_dropout(pretrained=False, **kwargs)
+

+

forward

+
def forward(x)
 
-

Constructs a ResNet-200-D model.

-

-

tv_resnet34_mc_dropout

-
@register_model
-def tv_resnet34_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

hannah.models.utils

+

+

next_power_of2

+
def next_power_of2(x)
 
-

Constructs a ResNet-34 model with original Torchvision weights.

-

-

tv_resnet50_mc_dropout

-
@register_model
-def tv_resnet50_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

hannah.models

+

+

hannah.models.factory.quantized

+

Import from new loacation for backwards compatibility

+

+

hannah.models.factory

+

+

hannah.models.factory.network

+

+

ConvNet Objects

+
class ConvNet(nn.Module)
 
-

Constructs a ResNet-50 model with original Torchvision weights.

-

-

tv_resnet101_mc_dropout

-
@register_model
-def tv_resnet101_mc_dropout(pretrained=False, **kwargs)
+

+

forward

+
def forward(x)
 
-

Constructs a ResNet-101 model w/ Torchvision pretrained weights.

-

-

tv_resnet152_mc_dropout

-
@register_model
-def tv_resnet152_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

hannah.models.factory.reduction

+

+

ReductionBlockAdd Objects

+
class ReductionBlockAdd(nn.Module)
 
-

Constructs a ResNet-152 model w/ Torchvision pretrained weights.

-

-

wide_resnet50_2_mc_dropout

-
@register_model
-def wide_resnet50_2_mc_dropout(pretrained=False, **kwargs)
+

Reduction block that sums over its inputs

+

+

forward

+
def forward(x: Tensor) -> Tensor
 
-

Constructs a Wide ResNet-50-2 model. -The model is the same as ResNet except for the bottleneck number of channels -which is twice larger in every block. The number of channels in outer 1x1 -convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 -channels, and in Wide ResNet-50-2 has 2048-1024-2048.

-

-

wide_resnet101_2_mc_dropout

-
@register_model
-def wide_resnet101_2_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+
    +
  • x - Tensor:
  • +
  • x - Tensor:
  • +
+

+

ReductionBlockConcat Objects

+
class ReductionBlockConcat(nn.Module)
 
-

Constructs a Wide ResNet-101-2 model. -The model is the same as ResNet except for the bottleneck number of channels -which is twice larger in every block. The number of channels in outer 1x1 -convolutions is the same.

-

-

resnet50_gn_mc_dropout

-
@register_model
-def resnet50_gn_mc_dropout(pretrained=False, **kwargs)
+

Reduction block that concatenates its inputs

+

+

forward

+
def forward(x: Tensor) -> Tensor
 
-

Constructs a ResNet-50 model w/ GroupNorm

-

-

resnext50_32x4d_mc_dropout

-
@register_model
-def resnext50_32x4d_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+
    +
  • x - Tensor:
  • +
  • x - Tensor:
  • +
+

+

hannah.models.factory.qat

+

Import from new loacation for backwards compatibility

+

+

hannah.models.factory.qconfig

+

Import from new loacation for backwards compatibility

+

+

hannah.models.factory.act

+

+

DummyActivation Objects

+
class DummyActivation(nn.Identity)
 
-

Constructs a ResNeXt50-32x4d model.

-

-

resnext50d_32x4d_mc_dropout

-
@register_model
-def resnext50d_32x4d_mc_dropout(pretrained=False, **kwargs)
+

Dummy class that instantiated to mark a missing activation.

+

This can be used to mark requantization of activations for convolutional layers without +activation functions.

+

Arguments:

+

+

hannah.models.factory.factory

+

A neural network model factory

+

It allows us to construct quantized and unquantized versions of the same network, +allows to explore implementation alternatives using a common neural network construction +interface.

+

+

NormConfig Objects

+
@dataclass
+class NormConfig()
 
-

Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample

-

-

resnext101_32x4d_mc_dropout

-
@register_model
-def resnext101_32x4d_mc_dropout(pretrained=False, **kwargs)
+

+

BNConfig Objects

+
@dataclass
+class BNConfig(NormConfig)
 
-

Constructs a ResNeXt-101 32x4d model.

-

-

resnext101_32x8d_mc_dropout

-
@register_model
-def resnext101_32x8d_mc_dropout(pretrained=False, **kwargs)
+

+

ActConfig Objects

+
@dataclass
+class ActConfig()
 
-

Constructs a ResNeXt-101 32x8d model.

-

-

resnext101_64x4d_mc_dropout

-
@register_model
-def resnext101_64x4d_mc_dropout(pretrained=False, **kwargs)
+

+

ELUConfig Objects

+
@dataclass
+class ELUConfig(ActConfig)
 
-

Constructs a ResNeXt101-64x4d model.

-

-

tv_resnext50_32x4d_mc_dropout

-
@register_model
-def tv_resnext50_32x4d_mc_dropout(pretrained=False, **kwargs)
+

+

HardtanhConfig Objects

+
@dataclass
+class HardtanhConfig(ActConfig)
 
-

Constructs a ResNeXt50-32x4d model with original Torchvision weights.

-

-

ig_resnext101_32x8d_mc_dropout

-
@register_model
-def ig_resnext101_32x8d_mc_dropout(pretrained=False, **kwargs)
+

+

MinorBlockConfig Objects

+
@dataclass
+class MinorBlockConfig()
 
-

Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data -and finetuned on ImageNet from Figure 5 in -"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>_ -Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/

-

-

ig_resnext101_32x16d_mc_dropout

-
@register_model
-def ig_resnext101_32x16d_mc_dropout(pretrained=False, **kwargs)
+

+

target

+

target Operation

+

+

parallel

+

execute block in parallel with preceding block

+

+

out_channels

+

number of output channels

+

+

kernel_size

+

kernel size of this Operation (if applicable)

+

+

stride

+

stride for this operation use

+

+

padding

+

use padding for this operation (padding will always try to keep input dimensions / stride)

+

+

dilation

+

dilation factor to use for this operation

+

+

groups

+

number of groups for this operation

+

+

norm

+

normalization to use (true uses networks default configs)

+

+

act

+

activation to use (true uses default configs)

+

+

upsampling

+

Upsampling factor for mbconv layers

+

+

bias

+

use bias for this operation

+

+

out_quant

+

use output quantization for this operation

+

+

MajorBlockConfig Objects

+
@dataclass
+class MajorBlockConfig()
 
-

Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data -and finetuned on ImageNet from Figure 5 in -"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>_ -Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/

-

-

ig_resnext101_32x32d_mc_dropout

-
@register_model
-def ig_resnext101_32x32d_mc_dropout(pretrained=False, **kwargs)
+

+

stride

+

Union[None, int, Tuple[int, ...], Tuple[int, ...]]

+

+

last

+

Indicates wether this block is the last reduction block

+

+

LinearConfig Objects

+
@dataclass
+class LinearConfig()
+
+ +

+

norm

+

Union[bool, NormConfig]

+

+

act

+

Union[bool, ActConfig]

+

+

NetworkConfig Objects

+
@dataclass
+class NetworkConfig()
+
+ +

+

NetworkFactory Objects

+
class NetworkFactory()
+
+ +

+

act

+
def act(config: ActConfig) -> nn.Module
+
+ +

Arguments:

+
    +
  • config - ActConfig:
  • +
  • config - ActConfig:
  • +
+

+

conv2d

+
def conv2d(input_shape: Tuple[int, ...],
+           out_channels: int,
+           kernel_size: Union[int, Tuple[int, ...]],
+           stride: Union[int, Tuple[int, ...]] = 1,
+           padding: Union[int, Tuple[int, ...], bool] = True,
+           dilation: Union[int, Tuple[int, ...]] = 0,
+           groups: int = 1,
+           norm: Union[BNConfig, bool] = False,
+           act: Union[ActConfig, bool] = False,
+           bias: bool = False) -> None
+
+ +

Arguments:

+
    +
  • input_shape - Tuple[int: + int: + int]:
  • +
  • out_channels - int:
  • +
  • kernel_size - Union[int: + Tuple[int, ...]]: (Default value = 1)
  • +
  • stride - Union[int:
  • +
  • padding - Union[int: + Tuple[int, ...]:
  • +
  • bool] - (Default value = False)
  • +
  • dilation - int: (Default value = 0)
  • +
  • groups - int: (Default value = 1)
  • +
  • norm - Union[BNConfig:
  • +
  • act - Union[ActConfig:
  • +
  • bias - bool: (Default value = False)
  • +
  • input_shape - Tuple[int:
  • +
  • out_channels - int:
  • +
  • kernel_size - Union[int:
  • +
  • stride - Union[int:
  • +
  • padding - Union[int:
  • +
  • dilation - int: (Default value = 0)
  • +
  • groups - int: (Default value = 1)
  • +
  • norm - Union[BNConfig:
  • +
  • act - Union[ActConfig:
  • +
  • bias - bool: (Default value = False)
  • +
+

+

mbconv1d

+
def mbconv1d(input_shape: Tuple[int, ...],
+             out_channels: int,
+             kernel_size: int,
+             dilation: int = 1,
+             stride: int = 1,
+             padding: Union[int, bool] = True,
+             bias=False,
+             upsampling: float = 1.0,
+             groups: int = 1,
+             norm: Union[BNConfig, bool] = False,
+             act: Union[ActConfig, bool] = False)
+
+ +

Arguments:

+
    +
  • input_shape - Tuple[int: + int: + int]:
  • +
  • out_channels - int:
  • +
  • kernel_size - int:
  • +
  • dilation - int: (Default value = 1)
  • +
  • stride - int: (Default value = 1)
  • +
  • padding - Union[int:
  • +
  • bool] - (Default value = False)
  • +
  • bias - (Default value = False)
  • +
  • upsampling - float: (Default value = 1.0)
  • +
  • groups - int: (Default value = 1)
  • +
  • norm - Union[BNConfig:
  • +
  • act - Union[ActConfig:
  • +
  • input_shape - Tuple[int:
  • +
  • out_channels - int:
  • +
  • kernel_size - int:
  • +
  • dilation - int: (Default value = 1)
  • +
  • stride - int: (Default value = 1)
  • +
  • padding - Union[int:
  • +
  • upsampling - float: (Default value = 1.0)
  • +
  • groups - int: (Default value = 1)
  • +
  • norm - Union[BNConfig:
  • +
  • act - Union[ActConfig:
  • +
+

+

conv1d

+
def conv1d(input_shape: Tuple[int, ...],
+           out_channels: int,
+           kernel_size: int,
+           stride: int = 1,
+           bias: bool = False,
+           padding: Union[int, bool] = True,
+           dilation: int = 1,
+           groups: int = 1,
+           norm: Union[BNConfig, bool] = False,
+           act: Union[ActConfig, bool] = False,
+           out_quant: bool = True) -> Tuple[Tuple[int, ...], nn.Module]
+
+ +

Arguments:

+
    +
  • input_shape - Tuple[int: + int: + int]:
  • +
  • out_channels - int:
  • +
  • kernel_size - int:
  • +
  • stride - int: (Default value = 1)
  • +
  • bias - bool: (Default value = False)
  • +
  • padding - Union[int:
  • +
  • bool] - (Default value = False)
  • +
  • dilation - int: (Default value = 1)
  • +
  • groups - int: (Default value = 1)
  • +
  • norm - Union[BNConfig:
  • +
  • act - Union[ActConfig:
  • +
  • out_quant - bool: (Default value = True)
  • +
  • input_shape - Tuple[int:
  • +
  • out_channels - int:
  • +
  • kernel_size - int:
  • +
  • stride - int: (Default value = 1)
  • +
  • bias - bool: (Default value = False)
  • +
  • padding - Union[int:
  • +
  • dilation - int: (Default value = 1)
  • +
  • groups - int: (Default value = 1)
  • +
  • norm - Union[BNConfig:
  • +
  • act - Union[ActConfig:
  • +
  • out_quant - bool: (Default value = True)
  • +
+

+

minor

+
def minor(input_shape, config: MinorBlockConfig, major_stride=None)
+
+ +

Arguments:

+

input_shape: +- config - MinorBlockConfig: +- major_stride - (Default value = None) +- config - MinorBlockConfig:

+

+

forward

+
def forward(input_shape: Tuple[int, ...], config: MajorBlockConfig)
+
+ +

Create a forward neural network block without parallelism

+

If parallel is set to [True, False, True, False]

+

Input: ------->|---> parallel: False ---> parallel: False ---> | --> output

+

Arguments:

+
    +
  • input_shape - Tuple[int, ...]:
  • +
  • config - MajorBlockConfig:
  • +
  • input_shape - Tuple[int, ...]:
  • +
  • config - MajorBlockConfig:
  • +
+

+

residual

+
def residual(input_shape: Tuple[int, ...], config: MajorBlockConfig)
+
+ +

Create a neural network block with with residual parallelism

+

If parallel is set to [True, False, True, False] +|---> parallel: True ---> parallel: True ---> | +Input: ------->| +---> +|---> parallel: False ---> parallel: False ---> |

+

If the major block does change the output dimensions compared to the input +and one of the branches does not contain any layers, we infer +1x1 conv of maximum group size (gcd (input_channels, output_channels)) to do the +downsampling.

+

Arguments:

+
    +
  • input_shape - Tuple[int, ...]:
  • +
  • config - MajorBlockConfig:
  • +
  • input_shape - Tuple[int, ...]:
  • +
  • config - MajorBlockConfig:
  • +
+

+

input

+
def input(in_channels: int, config: MajorBlockConfig)
+
+ +

Create a neural network block with input parallelism

+

If parallel is set to [True, False, True, False] +|---> parallel: True ---> | +|---> parallel: True ---> + -----------------> | +Input:--------->| +---> +|---> parallel: False ---> parallel: False ---> |

+

If there are no parallel branches in the network. The major block is +a standard feed forward layer.

+

Arguments:

+
    +
  • in_channels - int:
  • +
  • config - MajorBlockConfig:
  • +
  • in_channels - int:
  • +
  • config - MajorBlockConfig:
  • +
+

+

full

+
def full(in_channels: int, config: MajorBlockConfig)
+
+ +

Create a neural network block with full parallelism

+

If parallel is set to [True, False, True, False] +|---> parallel: True ---------------------------------- -| +Input:--->| +---> +| |--> parallel: False --->| | +|---> parallel: False ----> | +--->| +|--> parallel: True ---->|

+

If there are no parallel blocks the block is a standard feed forward network.

+

Arguments:

+
    +
  • in_channels - int:
  • +
  • config - MajorBlockConfig:
  • +
  • in_channels - int:
  • +
  • config - MajorBlockConfig:
  • +
+

+

major

+
def major(input_shape, config: MajorBlockConfig)
+
+ +

Arguments:

+

input_shape: +- config - MajorBlockConfig: +- config - MajorBlockConfig:

+

+

linear

+
def linear(input_shape, config: LinearConfig)
+
+ +

Arguments:

+

input_shape: +- config - LinearConfig: +- config - LinearConfig:

+

+

identity

+
def identity() -> Identity
+
+ +

+

network

+
def network(input_shape, labels: int, network_config: Union[ListConfig,
+                                                            DictConfig])
+
+ +

Arguments:

+

input_shape: +- labels - int: +- network_config - NetworkConfig: +- labels - int: +- network_config - NetworkConfig:

+

+

create_cnn

+
def create_cnn(input_shape: Sequence[int],
+               labels: int,
+               name: str,
+               conv: Optional[List[MajorBlockConfig]] = None,
+               linear: Optional[List[LinearConfig]] = None,
+               norm: Optional[NormConfig] = None,
+               act: Optional[ActConfig] = None,
+               qconfig: Any = None,
+               dropout: float = 0.5)
+
+ +

Arguments:

+
    +
  • input_shape - Sequence[int]:
  • +
  • labels - int:
  • +
  • name - str:
  • +
  • conv - Optional[List[MajorBlockConfig]]: (Default value = None)
  • +
  • linear - Optional[List[LinearConfig]]: (Default value = None)
  • +
  • norm - Optional[NormConfig]: (Default value = None)
  • +
  • act - Optional[ActConfig]: (Default value = None)
  • +
  • qconfig - Any: (Default value = None)
  • +
  • dropout - float: (Default value = 0.5)
  • +
  • input_shape - Sequence[int]:
  • +
  • labels - int:
  • +
  • name - str:
  • +
  • conv - Optional[List[MajorBlockConfig]]: (Default value = None)
  • +
  • linear - Optional[List[LinearConfig]]: (Default value = None)
  • +
  • norm - Optional[NormConfig]: (Default value = None)
  • +
  • act - Optional[ActConfig]: (Default value = None)
  • +
  • qconfig - Any: (Default value = None)
  • +
  • dropout - float: (Default value = 0.5)
  • +
+

+

hannah.models.factory.rounding

+

Import from new loacation for backwards compatibility

+

+

hannah.models.factory.pooling

+

+

ApproximateGlobalAveragePooling1D Objects

+
class ApproximateGlobalAveragePooling1D(nn.Module)
+
+ +

A global average pooling layer, that divides by the next power of 2 instead of true number of elements

+

+

forward

+
def forward(x)
+
+ +

Arguments:

+

x:

+

+

ApproximateGlobalAveragePooling2D Objects

+
class ApproximateGlobalAveragePooling2D(nn.Module)
+
+ +

A global average pooling layer, that divides by the next power of 2 instead of true number of elements

+

+

forward

+
def forward(x)
+
+ +

Arguments:

+

x:

+

+

hannah.models.kakao_resnet

+

+

hannah.models.tc

+

+

hannah.models.tc.models

+

+

create_act

+
def create_act(act, clipping_value)
+
+ +

Arguments:

+

act: + clipping_value:

+

+

ApproximateGlobalAveragePooling1D Objects

+
class ApproximateGlobalAveragePooling1D(nn.Module)
+
+ +

+

forward

+
def forward(x)
+
+ +

Arguments:

+

x:

+

+

TCResidualBlock Objects

+
class TCResidualBlock(nn.Module)
+
+ +

+

forward

+
def forward(x)
+
+ +

Arguments:

+

x:

+

+

TCResNetModel Objects

+
class TCResNetModel(nn.Module)
+
+ +

+

forward

+
def forward(x)
 
-

Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data -and finetuned on ImageNet from Figure 5 in -"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>_ -Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/

-

-

ig_resnext101_32x48d_mc_dropout

-
@register_model
-def ig_resnext101_32x48d_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

ExitWrapperBlock Objects

+
class ExitWrapperBlock(nn.Module)
 
-

Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data -and finetuned on ImageNet from Figure 5 in -"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>_ -Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/

-

-

ssl_resnet18_mc_dropout

-
@register_model
-def ssl_resnet18_mc_dropout(pretrained=False, **kwargs)
+

+

forward

+
def forward(x)
 
-

Constructs a semi-supervised ResNet-18 model pre-trained on YFCC100M dataset and finetuned on ImageNet -"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ -Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

-

-

ssl_resnet50_mc_dropout

-
@register_model
-def ssl_resnet50_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

BranchyTCResNetModel Objects

+
class BranchyTCResNetModel(TCResNetModel)
 
-

Constructs a semi-supervised ResNet-50 model pre-trained on YFCC100M dataset and finetuned on ImageNet -"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ -Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

-

-

ssl_resnext50_32x4d_mc_dropout

-
@register_model
-def ssl_resnext50_32x4d_mc_dropout(pretrained=False, **kwargs)
+

+

on_val

+
def on_val()
 
-

Constructs a semi-supervised ResNeXt-50 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet -"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ -Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

-

-

ssl_resnext101_32x4d_mc_dropout

-
@register_model
-def ssl_resnext101_32x4d_mc_dropout(pretrained=False, **kwargs)
+

+

on_val_end

+
def on_val_end()
 
-

Constructs a semi-supervised ResNeXt-101 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet -"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ -Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

-

-

ssl_resnext101_32x8d_mc_dropout

-
@register_model
-def ssl_resnext101_32x8d_mc_dropout(pretrained=False, **kwargs)
+

+

on_test

+
def on_test()
 
-

Constructs a semi-supervised ResNeXt-101 32x8 model pre-trained on YFCC100M dataset and finetuned on ImageNet -"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ -Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

-

-

ssl_resnext101_32x16d_mc_dropout

-
@register_model
-def ssl_resnext101_32x16d_mc_dropout(pretrained=False, **kwargs)
+

+

on_test_end

+
def on_test_end()
 
-

Constructs a semi-supervised ResNeXt-101 32x16 model pre-trained on YFCC100M dataset and finetuned on ImageNet -"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ -Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

-

-

swsl_resnet18_mc_dropout

-
@register_model
-def swsl_resnet18_mc_dropout(pretrained=False, **kwargs)
+

+

reset_stats

+
def reset_stats()
 
-

Constructs a semi-weakly supervised Resnet-18 model pre-trained on 1B weakly supervised -image dataset and finetuned on ImageNet. -"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ -Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

-

-

swsl_resnet50_mc_dropout

-
@register_model
-def swsl_resnet50_mc_dropout(pretrained=False, **kwargs)
+

+ +
def print_stats()
 
-

Constructs a semi-weakly supervised ResNet-50 model pre-trained on 1B weakly supervised -image dataset and finetuned on ImageNet. -"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ -Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

-

-

swsl_resnext50_32x4d_mc_dropout

-
@register_model
-def swsl_resnext50_32x4d_mc_dropout(pretrained=False, **kwargs)
+

+

forward

+
def forward(x)
 
-

Constructs a semi-weakly supervised ResNeXt-50 32x4 model pre-trained on 1B weakly supervised -image dataset and finetuned on ImageNet. -"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ -Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

-

-

swsl_resnext101_32x4d_mc_dropout

-
@register_model
-def swsl_resnext101_32x4d_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

get_loss_function

+
def get_loss_function()
 
-

Constructs a semi-weakly supervised ResNeXt-101 32x4 model pre-trained on 1B weakly supervised -image dataset and finetuned on ImageNet. -"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ -Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

-

-

swsl_resnext101_32x8d_mc_dropout

-
@register_model
-def swsl_resnext101_32x8d_mc_dropout(pretrained=False, **kwargs)
+

+

hannah.models.simple1d

+

+

hannah.models.sinc

+

+

hannah.models.sinc.models

+

+

GDSConv Objects

+
class GDSConv(nn.Module)
 
-

Constructs a semi-weakly supervised ResNeXt-101 32x8 model pre-trained on 1B weakly supervised -image dataset and finetuned on ImageNet. -"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ -Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

-

-

swsl_resnext101_32x16d_mc_dropout

-
@register_model
-def swsl_resnext101_32x16d_mc_dropout(pretrained=False, **kwargs)
+

+

forward

+
def forward(x)
 
-

Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised -image dataset and finetuned on ImageNet. -"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ -Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

-

-

ecaresnet26t_mc_dropout

-
@register_model
-def ecaresnet26t_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

GDSConvBlock Objects

+
class GDSConvBlock(nn.Module)
 
-

Constructs an ECA-ResNeXt-26-T model. -This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels -in the deep stem and ECA attn.

-

-

ecaresnet50d_mc_dropout

-
@register_model
-def ecaresnet50d_mc_dropout(pretrained=False, **kwargs)
+

+

forward

+
def forward(x)
 
-

Constructs a ResNet-50-D model with eca.

-

-

ecaresnet50d_pruned_mc_dropout

-
@register_model
-def ecaresnet50d_pruned_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

SincNet Objects

+
class SincNet(nn.Module)
 
-

Constructs a ResNet-50-D model pruned with eca. -The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf

-

-

ecaresnet50t_mc_dropout

-
@register_model
-def ecaresnet50t_mc_dropout(pretrained=False, **kwargs)
+

+

forward

+
def forward(x)
 
-

Constructs an ECA-ResNet-50-T model. -Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn.

-

-

ecaresnetlight_mc_dropout

-
@register_model
-def ecaresnetlight_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

hannah.models.resnet.operators

+

+

hannah.models.resnet

+

+

hannah.models.resnet.expressions

+

+

padding_expression

+
def padding_expression(kernel_size, stride, dilation=1)
 
-

Constructs a ResNet-50-D light model with eca.

-

-

ecaresnet101d_mc_dropout

-
@register_model
-def ecaresnet101d_mc_dropout(pretrained=False, **kwargs)
+

Symbolically calculate padding such that for a given kernel_size, stride and dilation +the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2). +Note: If the input dimension is 1 and stride = 2, the calculated padding will result in +an output with also dimension 1.

+

Parameters

+

kernel_size : Union[int, Expression] +stride : Union[int, Expression] +dilation : Union[int, Expression], optional +description, by default 1

+

Returns

+

Expression

+

+

hannah.models.resnet.models_lazy

+

+

padding_expression

+
def padding_expression(kernel_size, stride, dilation=1)
 
-

Constructs a ResNet-101-D model with eca.

-

-

ecaresnet101d_pruned_mc_dropout

-
@register_model
-def ecaresnet101d_pruned_mc_dropout(pretrained=False, **kwargs)
+

Symbolically calculate padding such that for a given kernel_size, stride and dilation +the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2). +Note: If the input dimension is 1 and stride = 2, the calculated padding will result in +an output with also dimension 1.

+

Parameters

+

kernel_size : Union[int, Expression] +stride : Union[int, Expression] +dilation : Union[int, Expression], optional +description, by default 1

+

Returns

+

Expression

+

+

hannah.models.resnet.models

+

+

hannah.models.resnet.blocks

+

+

hannah.models.embedded_vision_net.utils

+

+

hannah.models.embedded_vision_net.operators

+

+

hannah.models.embedded_vision_net.expressions

+

+

hannah.models.embedded_vision_net.parameters

+

+

hannah.models.embedded_vision_net.models

+

+

hannah.models.embedded_vision_net.blocks

+

+

hannah.models.conv_vit.operators

+

+

hannah.models.conv_vit.models

+

+

hannah.models.conv_vit.blocks

+

+

hannah.models.conv_vit.attention

+

+

hannah.models._vendor

+

+

hannah.models._vendor.focalnet

+

+

Mlp Objects

+
class Mlp(nn.Module)
 
-

Constructs a ResNet-101-D model pruned with eca. -The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf

-

-

ecaresnet200d_mc_dropout

-
@register_model
-def ecaresnet200d_mc_dropout(pretrained=False, **kwargs)
+

+

forward

+
def forward(x)
 
-

Constructs a ResNet-200-D model with ECA.

-

-

ecaresnet269d_mc_dropout

-
@register_model
-def ecaresnet269d_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

FocalModulation Objects

+
class FocalModulation(nn.Module)
 
-

Constructs a ResNet-269-D model with ECA.

-

-

ecaresnext26t_32x4d_mc_dropout

-
@register_model
-def ecaresnext26t_32x4d_mc_dropout(pretrained=False, **kwargs)
+

+

forward

+
def forward(x)
 
-

Constructs an ECA-ResNeXt-26-T model. -This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels -in the deep stem. This model replaces SE module with the ECA module

-

-

ecaresnext50t_32x4d_mc_dropout

-
@register_model
-def ecaresnext50t_32x4d_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+
    +
  • x - input features with shape of (B, H, W, C)
  • +
+

+

extra_repr

+
def extra_repr() -> str
 
-

Constructs an ECA-ResNeXt-50-T model. -This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels -in the deep stem. This model replaces SE module with the ECA module

-

-

seresnet200d_mc_dropout

-
@register_model
-def seresnet200d_mc_dropout(pretrained=False, **kwargs)
+

+

flops

+
def flops(N)
 
-

Constructs a ResNet-200-D model with SE attn.

-

-

seresnet269d_mc_dropout

-
@register_model
-def seresnet269d_mc_dropout(pretrained=False, **kwargs)
+

calculate flops for 1 window with token length of N

+

Arguments:

+

N:

+

+

FocalNetBlock Objects

+
class FocalNetBlock(nn.Module)
 
-

Constructs a ResNet-269-D model with SE attn.

-

-

seresnext26d_32x4d_mc_dropout

-
@register_model
-def seresnext26d_32x4d_mc_dropout(pretrained=False, **kwargs)
+

Focal Modulation Network Block.

+

Arguments:

+
    +
  • dim(int) - Number of input channels.
  • +
  • input_resolution(tuple[int]) - Input resulotion.
  • +
  • mlp_ratio(float) - Ratio of mlp hidden dim to embedding dim.
  • +
  • drop(float) - Dropout rate. Default: 0.0
  • +
  • drop_path(float) - Stochastic depth rate. Default: 0.0
  • +
  • act_layer(nn.Module) - Activation layer. Default: nn.GELU
  • +
  • norm_layer(nn.Module) - Normalization layer. Default: nn.LayerNorm
  • +
  • focal_level(int) - Number of focal levels.
  • +
  • focal_window(int) - Focal window size at first focal level
  • +
  • use_layerscale(bool) - Whether use layerscale
  • +
  • layerscale_value(float) - Initial layerscale value
  • +
  • use_postln(bool) - Whether use layernorm after modulation
  • +
+

+

forward

+
def forward(x)
 
-

Constructs a SE-ResNeXt-26-D model.` -This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for -combination of deep stem and avg_pool in downsample.

-

-

seresnext26t_32x4d_mc_dropout

-
@register_model
-def seresnext26t_32x4d_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

extra_repr

+
def extra_repr() -> str
 
-

Constructs a SE-ResNet-26-T model. -This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels -in the deep stem.

-

-

seresnext26tn_32x4d_mc_dropout

-
@register_model
-def seresnext26tn_32x4d_mc_dropout(pretrained=False, **kwargs)
+

+

flops

+
def flops()
 
-

Constructs a SE-ResNeXt-26-T model. -NOTE I deprecated previous 't' model defs and replaced 't' with 'tn', this was the only tn model of note -so keeping this def for backwards compat with any uses out there. Old 't' model is lost.

-

-

resnetblur18_mc_dropout

-
@register_model
-def resnetblur18_mc_dropout(pretrained=False, **kwargs)
+

+

BasicLayer Objects

+
class BasicLayer(nn.Module)
 
-

Constructs a ResNet-18 model with blur anti-aliasing

-

-

resnetblur50_mc_dropout

-
@register_model
-def resnetblur50_mc_dropout(pretrained=False, **kwargs)
+

A basic Focal Transformer layer for one stage.

+

Arguments:

+
    +
  • dim(int) - Number of input channels.
  • +
  • input_resolution(tuple[int]) - Input resolution.
  • +
  • depth(int) - Number of blocks.
  • +
  • window_size(int) - Local window size.
  • +
  • mlp_ratio(float) - Ratio of mlp hidden dim to embedding dim.
  • +
  • qkv_bias(bool) - If True, add a learnable bias to query, key, value. Default: True + qk_scale(float | None): Override default qk scale of head_dim ** -0.5 if set.
  • +
  • drop(float) - Dropout rate. Default: 0.0 + drop_path(float | tuple[float]): Stochastic depth rate. Default: 0.0
  • +
  • norm_layer(nn.Module) - Normalization layer. Default: nn.LayerNorm + downsample(nn.Module | None): Downsample layer at the end of the layer. Default: None
  • +
  • use_checkpoint(bool) - Whether to use checkpointing to save memory. Default: False.
  • +
  • focal_level(int) - Number of focal levels
  • +
  • focal_window(int) - Focal window size at first focal level
  • +
  • use_layerscale(bool) - Whether use layerscale
  • +
  • layerscale_value(float) - Initial layerscale value
  • +
  • use_postln(bool) - Whether use layernorm after modulation
  • +
+

+

forward

+
def forward(x, H, W)
 
-

Constructs a ResNet-50 model with blur anti-aliasing

-

-

resnetblur50d_mc_dropout

-
@register_model
-def resnetblur50d_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x: + H: + W:

+

+

extra_repr

+
def extra_repr() -> str
 
-

Constructs a ResNet-50-D model with blur anti-aliasing

-

-

resnetblur101d_mc_dropout

-
@register_model
-def resnetblur101d_mc_dropout(pretrained=False, **kwargs)
+

+

flops

+
def flops()
 
-

Constructs a ResNet-101-D model with blur anti-aliasing

-

-

resnetaa34d_mc_dropout

-
@register_model
-def resnetaa34d_mc_dropout(pretrained=False, **kwargs)
+

+

PatchEmbed Objects

+
class PatchEmbed(nn.Module)
 
-

Constructs a ResNet-34-D model w/ avgpool anti-aliasing

-

-

resnetaa50_mc_dropout

-
@register_model
-def resnetaa50_mc_dropout(pretrained=False, **kwargs)
+

Image to Patch Embedding

+

Arguments:

+
    +
  • img_size(int) - Image size. Default: 224.
  • +
  • patch_size(int) - Patch token size. Default: 4.
  • +
  • in_chans(int) - Number of input image channels. Default: 3.
  • +
  • embed_dim(int) - Number of linear projection output channels. Default: 96.
  • +
  • norm_layer(nn.Module) - Normalization layer. Default: None
  • +
+

+

forward

+
def forward(x)
 
-

Constructs a ResNet-50 model with avgpool anti-aliasing

-

-

resnetaa50d_mc_dropout

-
@register_model
-def resnetaa50d_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

flops

+
def flops()
 
-

Constructs a ResNet-50-D model with avgpool anti-aliasing

-

-

resnetaa101d_mc_dropout

-
@register_model
-def resnetaa101d_mc_dropout(pretrained=False, **kwargs)
+

+

FocalNet Objects

+
class FocalNet(nn.Module)
 
-

Constructs a ResNet-101-D model with avgpool anti-aliasing

-

-

seresnetaa50d_mc_dropout

-
@register_model
-def seresnetaa50d_mc_dropout(pretrained=False, **kwargs)
+

Focal Modulation Networks (FocalNets)

+

Arguments:

+

img_size(int | tuple(int): Input image size. Default 224 + patch_size(int | tuple(int): Patch size. Default: 4 +- in_chans(int) - Number of input image channels. Default: 3 +- num_classes(int) - Number of classes for classification head. Default: 1000 +- embed_dim(int) - Patch embedding dimension. Default: 96 +- depths(tuple(int) - Depth of each Focal Transformer layer. +- mlp_ratio(float) - Ratio of mlp hidden dim to embedding dim. Default: 4 +- drop_rate(float) - Dropout rate. Default: 0 +- drop_path_rate(float) - Stochastic depth rate. Default: 0.1 +- norm_layer(nn.Module) - Normalization layer. Default: nn.LayerNorm. +- patch_norm(bool) - If True, add normalization after patch embedding. Default: True +- use_checkpoint(bool) - Whether to use checkpointing to save memory. Default: False +- focal_levels(list) - How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1] +- focal_windows(list) - The focal window size at all stages. Default: [7, 5, 3, 1] +- use_conv_embed(bool) - Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False +- use_layerscale(bool) - Whether use layerscale proposed in CaiT. Default: False +- layerscale_value(float) - Value for layer scale. Default: 1e-4 +- use_postln(bool) - Whether use layernorm after modulation (it helps stablize training of large models)

+

+

no_weight_decay

+
@torch.jit.ignore
+def no_weight_decay()
 
-

Constructs a SE=ResNet-50-D model with avgpool anti-aliasing

-

-

seresnextaa101d_32x8d_mc_dropout

-
@register_model
-def seresnextaa101d_32x8d_mc_dropout(pretrained=False, **kwargs)
+

+

no_weight_decay_keywords

+
@torch.jit.ignore
+def no_weight_decay_keywords()
 
-

Constructs a SE=ResNeXt-101-D 32x8d model with avgpool anti-aliasing

-

-

resnetrs50_mc_dropout

-
@register_model
-def resnetrs50_mc_dropout(pretrained=False, **kwargs)
+

+

forward_features

+
def forward_features(x)
 
-

Constructs a ResNet-RS-50 model. -Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 -Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

-

-

resnetrs101_mc_dropout

-
@register_model
-def resnetrs101_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

forward

+
def forward(x)
 
-

Constructs a ResNet-RS-101 model. -Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 -Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

-

-

resnetrs152_mc_dropout

-
@register_model
-def resnetrs152_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

x:

+

+

flops

+
def flops()
 
-

Constructs a ResNet-RS-152 model. -Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 -Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

-

-

resnetrs200_mc_dropout

-
@register_model
-def resnetrs200_mc_dropout(pretrained=False, **kwargs)
+

+

build_transforms

+
def build_transforms(img_size, center_crop=False)
 
-

Constructs a ResNet-RS-200 model. -Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 -Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

-

-

resnetrs270_mc_dropout

-
@register_model
-def resnetrs270_mc_dropout(pretrained=False, **kwargs)
+

Arguments:

+

img_size: +- center_crop - (Default value = False)

+

+

build_transforms4display

+
def build_transforms4display(img_size, center_crop=False)
 
-

Constructs a ResNet-RS-270 model. -Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 -Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

-

-

resnetrs350_mc_dropout

+

Arguments:

+

img_size: +- center_crop - (Default value = False)

+

+

focalnet_tiny_srf

@register_model
-def resnetrs350_mc_dropout(pretrained=False, **kwargs)
+def focalnet_tiny_srf(pretrained=False, **kwargs)
 
-

Constructs a ResNet-RS-350 model. -Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 -Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

-

-

resnetrs420_mc_dropout

+

Arguments:

+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

focalnet_small_srf

@register_model
-def resnetrs420_mc_dropout(pretrained=False, **kwargs)
-
- -

Constructs a ResNet-RS-420 model -Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 -Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

-

-

hannah.models.resnet

-

-

hannah.models.resnet.blocks

-

-

hannah.models.resnet.operators

-

-

hannah.models.resnet.models_lazy

-

-

padding_expression

-
def padding_expression(kernel_size, stride, dilation=1)
-
- -

Symbolically calculate padding such that for a given kernel_size, stride and dilation -the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2). -Note: If the input dimension is 1 and stride = 2, the calculated padding will result in -an output with also dimension 1.

-

Parameters

-

kernel_size : Union[int, Expression] -stride : Union[int, Expression] -dilation : Union[int, Expression], optional -description, by default 1

-

Returns

-

Expression

-

-

hannah.models.resnet.expressions

-

-

padding_expression

-
def padding_expression(kernel_size, stride, dilation=1)
-
- -

Symbolically calculate padding such that for a given kernel_size, stride and dilation -the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2). -Note: If the input dimension is 1 and stride = 2, the calculated padding will result in -an output with also dimension 1.

-

Parameters

-

kernel_size : Union[int, Expression] -stride : Union[int, Expression] -dilation : Union[int, Expression], optional -description, by default 1

-

Returns

-

Expression

-

-

hannah.models.resnet.models

-

-

hannah.models.sinc

-

-

hannah.models.sinc.models

-

-

GDSConv Objects

-
class GDSConv(nn.Module)
+def focalnet_small_srf(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Arguments:

+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

focalnet_base_srf

+
@register_model
+def focalnet_base_srf(pretrained=False, **kwargs)
 

Arguments:

-

x:

-

-

GDSConvBlock Objects

-
class GDSConvBlock(nn.Module)
+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

focalnet_tiny_lrf

+
@register_model
+def focalnet_tiny_lrf(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Arguments:

+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

focalnet_small_lrf

+
@register_model
+def focalnet_small_lrf(pretrained=False, **kwargs)
 

Arguments:

-

x:

-

-

SincNet Objects

-
class SincNet(nn.Module)
+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

focalnet_base_lrf

+
@register_model
+def focalnet_base_lrf(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Arguments:

+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

focalnet_tiny_iso_16

+
@register_model
+def focalnet_tiny_iso_16(pretrained=False, **kwargs)
 

Arguments:

-

x:

-

-

hannah.models.mobilenet.operators

-

-

hannah.models.mobilenet.expressions

-

-

padding_expression

-
def padding_expression(kernel_size, stride, dilation=1)
+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

focalnet_small_iso_16

+
@register_model
+def focalnet_small_iso_16(pretrained=False, **kwargs)
 
-

Symbolically calculate padding such that for a given kernel_size, stride and dilation -the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2). -Note: If the input dimension is 1 and stride = 2, the calculated padding will result in -an output with also dimension 1.

-

Parameters

-

kernel_size : Union[int, Expression] -stride : Union[int, Expression] -dilation : Union[int, Expression], optional -description, by default 1

-

Returns

-

Expression

-

-

hannah.models.mobilenet.models

-

-

hannah.models.objectdetection

-

-

hannah.models.objectdetection.loss

-

-

bbox_iou

-
def bbox_iou(box1,
-             box2,
-             x1y1x2y2=True,
-             GIoU=False,
-             DIoU=False,
-             CIoU=False,
-             eps=1e-7)
+

Arguments:

+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

focalnet_base_iso_16

+
@register_model
+def focalnet_base_iso_16(pretrained=False, **kwargs)
 

Arguments:

-

box1: - box2: -- x1y1x2y2 - (Default value = True) -- GIoU - (Default value = False) -- DIoU - (Default value = False) -- CIoU - (Default value = False) -- eps - (Default value = 1e-7)

-

-

is_parallel

-
def is_parallel(model)
+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

focalnet_large_fl3

+
@register_model
+def focalnet_large_fl3(pretrained=False, **kwargs)
 

Arguments:

-

model:

-

-

BCEBlurWithLogitsLoss Objects

-
class BCEBlurWithLogitsLoss(nn.Module)
+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

focalnet_large_fl4

+
@register_model
+def focalnet_large_fl4(pretrained=False, **kwargs)
 

Arguments:

    -
  • eps - (Default value = 0.1)
  • -
  • ) - # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441# return positive: - negative label smoothing BCE targetsreturn 1.0 - 0.5 * eps: - 0.5 * epsclass BCEBlurWithLogitsLoss(nn.Module:
  • -
  • ) - # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441# return positive:
  • +
  • pretrained - (Default value = False) + **kwargs:
-

-

forward

-
def forward(pred, true)
+

+

focalnet_xlarge_fl3

+
@register_model
+def focalnet_xlarge_fl3(pretrained=False, **kwargs)
 

Arguments:

-

pred: - true:

-

-

FocalLoss Objects

-
class FocalLoss(nn.Module)
+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

focalnet_xlarge_fl4

+
@register_model
+def focalnet_xlarge_fl4(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(pred, true)
+

Arguments:

+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

focalnet_huge_fl3

+
@register_model
+def focalnet_huge_fl3(pretrained=False, **kwargs)
 

Arguments:

-

pred: - true:

-

-

QFocalLoss Objects

-
class QFocalLoss(nn.Module)
+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

focalnet_huge_fl4

+
@register_model
+def focalnet_huge_fl4(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(pred, true)
+

Arguments:

+
    +
  • pretrained - (Default value = False) + **kwargs:
  • +
+

+

hannah.models._vendor.resnet_mc_dropout

+

PyTorch ResNet

+

This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with +additional dropout and dynamic global avg/max pool.

+

ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman

+

Copyright 2019, Ross Wightman

+

+

ResNet Objects

+
class ResNet(nn.Module)
+
+ +

ResNet / ResNeXt / SE-ResNeXt / SE-Net

+

This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that + * have > 1 stride in the 3x3 conv layer of bottleneck + * have conv-bn-act ordering

+

This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s +variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the +'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default.

+

ResNet variants (the same modifications can be used in SE/ResNeXt models as well): + * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b + * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64) + * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample + * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample + * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128) + * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample + * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample

+

ResNeXt + * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths + * same c,d, e, s variants as ResNet can be enabled

+

SE-ResNeXt + * normal - 7x7 stem, stem_width = 64 + * same c, d, e, s variants as ResNet can be enabled

+

SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64, + reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block

+

+

__init__

+
def __init__(block,
+             layers,
+             num_classes=1000,
+             in_chans=3,
+             output_stride=32,
+             global_pool="avg",
+             cardinality=1,
+             base_width=64,
+             stem_width=64,
+             stem_type="",
+             replace_stem_pool=False,
+             block_reduce_first=1,
+             down_kernel_size=1,
+             avg_down=False,
+             act_layer=nn.ReLU,
+             norm_layer=nn.BatchNorm2d,
+             aa_layer=None,
+             drop_rate=0.0,
+             drop_path_rate=0.0,
+             drop_block_rate=0.0,
+             zero_init_last=True,
+             block_args=None)
 

Arguments:

-

pred: - true:

-

-

ComputeLoss Objects

-
class ComputeLoss()
+
    +
  • block nn.Module - class for the residual block. Options are BasicBlock, Bottleneck. + layers (List[int]) : number of layers in each block
  • +
  • num_classes int - number of classification classes (default 1000)
  • +
  • in_chans int - number of input (color) channels. (default 3)
  • +
  • output_stride int - output stride of the network, 32, 16, or 8. (default 32)
  • +
  • global_pool str - Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' (default 'avg')
  • +
  • cardinality int - number of convolution groups for 3x3 conv in Bottleneck. (default 1)
  • +
  • base_width int - bottleneck channels factor. planes * base_width / 64 * cardinality (default 64)
  • +
  • stem_width int - number of channels in stem convolutions (default 64)
  • +
  • stem_type str - The type of stem (default ''):
  • +
  • '', default - a single 7x7 conv with a width of stem_width
  • +
  • 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2
  • +
  • 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2
  • +
  • replace_stem_pool bool - replace stem max-pooling layer with a 3x3 stride-2 convolution
  • +
  • block_reduce_first int - Reduction factor for first convolution output width of residual blocks, + 1 for all archs except senets, where 2 (default 1)
  • +
  • down_kernel_size int - kernel size of residual block downsample path, + 1x1 for most, 3x3 for senets (default: 1)
  • +
  • avg_down bool - use avg pooling for projection skip connection between stages/downsample (default False)
  • +
  • act_layer str, nn.Module - activation layer
  • +
  • norm_layer str, nn.Module - normalization layer
  • +
  • aa_layer nn.Module - anti-aliasing layer
  • +
  • drop_rate float - Dropout probability before classifier, for training (default 0.)
  • +
  • drop_path_rate float - Stochastic depth drop-path rate (default 0.)
  • +
  • drop_block_rate float - Drop block rate (default 0.)
  • +
  • zero_init_last bool - zero-init the last weight in residual path (usually last BN affine weight)
  • +
  • block_args dict - Extra kwargs to pass through to block module
  • +
+

+

resnet10t_mc_dropout

+
@register_model
+def resnet10t_mc_dropout(pretrained=False, **kwargs)
 
-

-

build_targets

-
def build_targets(p, targets)
+

Constructs a ResNet-10-T model.

+

+

resnet14t_mc_dropout

+
@register_model
+def resnet14t_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

p: - targets:

-

-

hannah.models.objectdetection.models

-

-

FasterRCNN Objects

-
class FasterRCNN(torch.nn.Module)
+

Constructs a ResNet-14-T model.

+

+

resnet18_mc_dropout

+
@register_model
+def resnet18_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x, y=None)
+

Constructs a ResNet-18 model.

+

+

resnet18d_mc_dropout

+
@register_model
+def resnet18d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x: -- y - (Default value = None)

-

-

transformOutput

-
def transformOutput(cocoGt, output, x, y)
+

Constructs a ResNet-18-D model.

+

+

resnet34_mc_dropout

+
@register_model
+def resnet34_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

cocoGt: - output: - x: - y:

-

-

UltralyticsYolo Objects

-
class UltralyticsYolo(torch.nn.Module)
+

Constructs a ResNet-34 model.

+

+

resnet34d_mc_dropout

+
@register_model
+def resnet34d_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x, y=None)
+

Constructs a ResNet-34-D model.

+

+

resnet26_mc_dropout

+
@register_model
+def resnet26_mc_dropout(pretrained=False, **kwargs)
+
+ +

Constructs a ResNet-26 model.

+

+

resnet26t_mc_dropout

+
@register_model
+def resnet26t_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x: -- y - (Default value = None)

-

-

train

-
def train(mode=True)
+

Constructs a ResNet-26-T model.

+

+

resnet26d_mc_dropout

+
@register_model
+def resnet26d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-
    -
  • mode - (Default value = True)
  • -
-

-

transformOutput

-
def transformOutput(cocoGt, output, x, y)
+

Constructs a ResNet-26-D model.

+

+

resnet50_mc_dropout

+
@register_model
+def resnet50_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

cocoGt: - output: - x: - y:

-

-

hannah.models.ai8x

-

-

hannah.models.ai8x.models

-

A search space based on the cifar 10 NASNet search space for ai85x devices from: htt

-

-

hannah.models.convnet

-

-

hannah.models.convnet.models

-

-

padding_expression

-
def padding_expression(kernel_size, stride, dilation=1)
+

Constructs a ResNet-50 model.

+

+

resnet50d_mc_dropout

+
@register_model
+def resnet50d_mc_dropout(pretrained=False, **kwargs) -> ResNet
 
-

Symbolically calculate padding such that for a given kernel_size, stride and dilation -the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2). -Note: If the input dimension is 1 and stride = 2, the calculated padding will result in -an output with also dimension 1.

-

Parameters

-

kernel_size : Union[int, Expression] -stride : Union[int, Expression] -dilation : Union[int, Expression], optional -description, by default 1

-

Returns

-

Expression

-

-

hannah.models.ekut

-

-

hannah.models.ekut.models

-

-

conv_bn

-
def conv_bn(inp, oup, stride)
+

Constructs a ResNet-50-D model.

+

+

resnet50t_mc_dropout

+
@register_model
+def resnet50t_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

inp: - oup: - stride:

-

-

conv_1x1_bn

-
def conv_1x1_bn(inp, oup)
+

Constructs a ResNet-50-T model.

+

+

resnet101_mc_dropout

+
@register_model
+def resnet101_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

inp: - oup:

-

-

InvertedResidual Objects

-
class InvertedResidual(nn.Module)
+

Constructs a ResNet-101 model.

+

+

resnet101d_mc_dropout

+
@register_model
+def resnet101d_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Constructs a ResNet-101-D model.

+

+

resnet152_mc_dropout

+
@register_model
+def resnet152_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x:

-

-

RawSpeechModel Objects

-
class RawSpeechModel(nn.Module)
+

Constructs a ResNet-152 model.

+

+

resnet152d_mc_dropout

+
@register_model
+def resnet152d_mc_dropout(pretrained=False, **kwargs)
 
-

Speech Recognition on RAW Data using Wolfgang Fuhls Networks

-

-

forward

-
def forward(x)
+

Constructs a ResNet-152-D model.

+

+

resnet200_mc_dropout

+
@register_model
+def resnet200_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x:

-

-

RawSpeechModelInvertedResidual Objects

-
class RawSpeechModelInvertedResidual(nn.Module)
+

Constructs a ResNet-200 model.

+

+

resnet200d_mc_dropout

+
@register_model
+def resnet200d_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Constructs a ResNet-200-D model.

+

+

tv_resnet34_mc_dropout

+
@register_model
+def tv_resnet34_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x:

-

-

hannah.models.tc

-

-

hannah.models.tc.models

-

-

create_act

-
def create_act(act, clipping_value)
+

Constructs a ResNet-34 model with original Torchvision weights.

+

+

tv_resnet50_mc_dropout

+
@register_model
+def tv_resnet50_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

act: - clipping_value:

-

-

ApproximateGlobalAveragePooling1D Objects

-
class ApproximateGlobalAveragePooling1D(nn.Module)
+

Constructs a ResNet-50 model with original Torchvision weights.

+

+

tv_resnet101_mc_dropout

+
@register_model
+def tv_resnet101_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Constructs a ResNet-101 model w/ Torchvision pretrained weights.

+

+

tv_resnet152_mc_dropout

+
@register_model
+def tv_resnet152_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x:

-

-

TCResidualBlock Objects

-
class TCResidualBlock(nn.Module)
+

Constructs a ResNet-152 model w/ Torchvision pretrained weights.

+

+

wide_resnet50_2_mc_dropout

+
@register_model
+def wide_resnet50_2_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Constructs a Wide ResNet-50-2 model. +The model is the same as ResNet except for the bottleneck number of channels +which is twice larger in every block. The number of channels in outer 1x1 +convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 +channels, and in Wide ResNet-50-2 has 2048-1024-2048.

+

+

wide_resnet101_2_mc_dropout

+
@register_model
+def wide_resnet101_2_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x:

-

-

TCResNetModel Objects

-
class TCResNetModel(nn.Module)
+

Constructs a Wide ResNet-101-2 model. +The model is the same as ResNet except for the bottleneck number of channels +which is twice larger in every block. The number of channels in outer 1x1 +convolutions is the same.

+

+

resnet50_gn_mc_dropout

+
@register_model
+def resnet50_gn_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Constructs a ResNet-50 model w/ GroupNorm

+

+

resnext50_32x4d_mc_dropout

+
@register_model
+def resnext50_32x4d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x:

-

-

ExitWrapperBlock Objects

-
class ExitWrapperBlock(nn.Module)
+

Constructs a ResNeXt50-32x4d model.

+

+

resnext50d_32x4d_mc_dropout

+
@register_model
+def resnext50d_32x4d_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample

+

+

resnext101_32x4d_mc_dropout

+
@register_model
+def resnext101_32x4d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x:

-

-

BranchyTCResNetModel Objects

-
class BranchyTCResNetModel(TCResNetModel)
+

Constructs a ResNeXt-101 32x4d model.

+

+

resnext101_32x8d_mc_dropout

+
@register_model
+def resnext101_32x8d_mc_dropout(pretrained=False, **kwargs)
 
-

-

on_val

-
def on_val()
+

Constructs a ResNeXt-101 32x8d model.

+

+

resnext101_64x4d_mc_dropout

+
@register_model
+def resnext101_64x4d_mc_dropout(pretrained=False, **kwargs)
 
-

-

on_val_end

-
def on_val_end()
+

Constructs a ResNeXt101-64x4d model.

+

+

tv_resnext50_32x4d_mc_dropout

+
@register_model
+def tv_resnext50_32x4d_mc_dropout(pretrained=False, **kwargs)
 
-

-

on_test

-
def on_test()
+

Constructs a ResNeXt50-32x4d model with original Torchvision weights.

+

+

ig_resnext101_32x8d_mc_dropout

+
@register_model
+def ig_resnext101_32x8d_mc_dropout(pretrained=False, **kwargs)
 
-

-

on_test_end

-
def on_test_end()
+

Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data +and finetuned on ImageNet from Figure 5 in +"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>_ +Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/

+

+

ig_resnext101_32x16d_mc_dropout

+
@register_model
+def ig_resnext101_32x16d_mc_dropout(pretrained=False, **kwargs)
 
-

-

reset_stats

-
def reset_stats()
+

Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data +and finetuned on ImageNet from Figure 5 in +"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>_ +Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/

+

+

ig_resnext101_32x32d_mc_dropout

+
@register_model
+def ig_resnext101_32x32d_mc_dropout(pretrained=False, **kwargs)
 
-

- -
def print_stats()
+

Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data +and finetuned on ImageNet from Figure 5 in +"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>_ +Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/

+

+

ig_resnext101_32x48d_mc_dropout

+
@register_model
+def ig_resnext101_32x48d_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data +and finetuned on ImageNet from Figure 5 in +"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>_ +Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/

+

+

ssl_resnet18_mc_dropout

+
@register_model
+def ssl_resnet18_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x:

-

-

get_loss_function

-
def get_loss_function()
+

Constructs a semi-supervised ResNet-18 model pre-trained on YFCC100M dataset and finetuned on ImageNet +"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ +Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

+

+

ssl_resnet50_mc_dropout

+
@register_model
+def ssl_resnet50_mc_dropout(pretrained=False, **kwargs)
 
-

-

hannah.models.wavenet

-

-

hannah.models.wavenet.models

-

-

Conv Objects

-
class Conv(nn.Module)
+

Constructs a semi-supervised ResNet-50 model pre-trained on YFCC100M dataset and finetuned on ImageNet +"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ +Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

+

+

ssl_resnext50_32x4d_mc_dropout

+
@register_model
+def ssl_resnext50_32x4d_mc_dropout(pretrained=False, **kwargs)
 
-

A convolution with the option to be causal and use xavier initialization

-

-

forward

-
def forward(signal)
+

Constructs a semi-supervised ResNeXt-50 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet +"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ +Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

+

+

ssl_resnext101_32x4d_mc_dropout

+
@register_model
+def ssl_resnext101_32x4d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

signal:

-

-

WaveNet Objects

-
class WaveNet(nn.Module)
+

Constructs a semi-supervised ResNeXt-101 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet +"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ +Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

+

+

ssl_resnext101_32x8d_mc_dropout

+
@register_model
+def ssl_resnext101_32x8d_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(input_data)
+

Constructs a semi-supervised ResNeXt-101 32x8 model pre-trained on YFCC100M dataset and finetuned on ImageNet +"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ +Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

+

+

ssl_resnext101_32x16d_mc_dropout

+
@register_model
+def ssl_resnext101_32x16d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

input_data:

-

-

hannah.models.embedded_vision_net.blocks

-

-

hannah.models.embedded_vision_net.operators

-

-

hannah.models.embedded_vision_net.expressions

-

-

hannah.models.embedded_vision_net.models

-

-

hannah.models.embedded_vision_net.utils

-

-

hannah.models.embedded_vision_net.parameters

-

-

hannah.models.hello

-

-

hannah.models.hello.models

-

-

DSConv2d Objects

-
class DSConv2d(nn.Module)
+

Constructs a semi-supervised ResNeXt-101 32x16 model pre-trained on YFCC100M dataset and finetuned on ImageNet +"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ +Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

+

+

swsl_resnet18_mc_dropout

+
@register_model
+def swsl_resnet18_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Constructs a semi-weakly supervised Resnet-18 model pre-trained on 1B weakly supervised +image dataset and finetuned on ImageNet. +"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ +Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

+

+

swsl_resnet50_mc_dropout

+
@register_model
+def swsl_resnet50_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x:

-

-

DSCNNSpeechModel Objects

-
class DSCNNSpeechModel(nn.Module)
+

Constructs a semi-weakly supervised ResNet-50 model pre-trained on 1B weakly supervised +image dataset and finetuned on ImageNet. +"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ +Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

+

+

swsl_resnext50_32x4d_mc_dropout

+
@register_model
+def swsl_resnext50_32x4d_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Constructs a semi-weakly supervised ResNeXt-50 32x4 model pre-trained on 1B weakly supervised +image dataset and finetuned on ImageNet. +"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ +Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

+

+

swsl_resnext101_32x4d_mc_dropout

+
@register_model
+def swsl_resnext101_32x4d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x:

-

-

DNNSpeechModel Objects

-
class DNNSpeechModel(nn.Module)
+

Constructs a semi-weakly supervised ResNeXt-101 32x4 model pre-trained on 1B weakly supervised +image dataset and finetuned on ImageNet. +"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ +Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

+

+

swsl_resnext101_32x8d_mc_dropout

+
@register_model
+def swsl_resnext101_32x8d_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Constructs a semi-weakly supervised ResNeXt-101 32x8 model pre-trained on 1B weakly supervised +image dataset and finetuned on ImageNet. +"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ +Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

+

+

swsl_resnext101_32x16d_mc_dropout

+
@register_model
+def swsl_resnext101_32x16d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x:

-

-

hannah.models.factory.qconfig

-

Import from new loacation for backwards compatibility

-

-

hannah.models.factory

-

-

hannah.models.factory.network

-

-

ConvNet Objects

-
class ConvNet(nn.Module)
+

Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised +image dataset and finetuned on ImageNet. +"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>_ +Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/

+

+

ecaresnet26t_mc_dropout

+
@register_model
+def ecaresnet26t_mc_dropout(pretrained=False, **kwargs)
 
-

-

forward

-
def forward(x)
+

Constructs an ECA-ResNeXt-26-T model. +This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels +in the deep stem and ECA attn.

+

+

ecaresnet50d_mc_dropout

+
@register_model
+def ecaresnet50d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

x:

-

-

hannah.models.factory.factory

-

A neural network model factory

-

It allows us to construct quantized and unquantized versions of the same network, -allows to explore implementation alternatives using a common neural network construction -interface.

-

-

NormConfig Objects

-
@dataclass
-class NormConfig()
+

Constructs a ResNet-50-D model with eca.

+

+

ecaresnet50d_pruned_mc_dropout

+
@register_model
+def ecaresnet50d_pruned_mc_dropout(pretrained=False, **kwargs)
 
-

-

BNConfig Objects

-
@dataclass
-class BNConfig(NormConfig)
+

Constructs a ResNet-50-D model pruned with eca. +The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf

+

+

ecaresnet50t_mc_dropout

+
@register_model
+def ecaresnet50t_mc_dropout(pretrained=False, **kwargs)
 
-

-

ActConfig Objects

-
@dataclass
-class ActConfig()
+

Constructs an ECA-ResNet-50-T model. +Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn.

+

+

ecaresnetlight_mc_dropout

+
@register_model
+def ecaresnetlight_mc_dropout(pretrained=False, **kwargs)
 
-

-

ELUConfig Objects

-
@dataclass
-class ELUConfig(ActConfig)
+

Constructs a ResNet-50-D light model with eca.

+

+

ecaresnet101d_mc_dropout

+
@register_model
+def ecaresnet101d_mc_dropout(pretrained=False, **kwargs)
 
-

-

HardtanhConfig Objects

-
@dataclass
-class HardtanhConfig(ActConfig)
+

Constructs a ResNet-101-D model with eca.

+

+

ecaresnet101d_pruned_mc_dropout

+
@register_model
+def ecaresnet101d_pruned_mc_dropout(pretrained=False, **kwargs)
 
-

-

MinorBlockConfig Objects

-
@dataclass
-class MinorBlockConfig()
+

Constructs a ResNet-101-D model pruned with eca. +The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf

+

+

ecaresnet200d_mc_dropout

+
@register_model
+def ecaresnet200d_mc_dropout(pretrained=False, **kwargs)
 
-

-

target

-

target Operation

-

-

parallel

-

execute block in parallel with preceding block

-

-

out_channels

-

number of output channels

-

-

kernel_size

-

kernel size of this Operation (if applicable)

-

-

stride

-

stride for this operation use

-

-

padding

-

use padding for this operation (padding will always try to keep input dimensions / stride)

-

-

dilation

-

dilation factor to use for this operation

-

-

groups

-

number of groups for this operation

-

-

norm

-

normalization to use (true uses networks default configs)

-

-

act

-

activation to use (true uses default configs)

-

-

upsampling

-

Upsampling factor for mbconv layers

-

-

bias

-

use bias for this operation

-

-

out_quant

-

use output quantization for this operation

-

-

MajorBlockConfig Objects

-
@dataclass
-class MajorBlockConfig()
+

Constructs a ResNet-200-D model with ECA.

+

+

ecaresnet269d_mc_dropout

+
@register_model
+def ecaresnet269d_mc_dropout(pretrained=False, **kwargs)
 
-

-

stride

-

Union[None, int, Tuple[int, ...], Tuple[int, ...]]

-

-

last

-

Indicates wether this block is the last reduction block

-

-

LinearConfig Objects

-
@dataclass
-class LinearConfig()
+

Constructs a ResNet-269-D model with ECA.

+

+

ecaresnext26t_32x4d_mc_dropout

+
@register_model
+def ecaresnext26t_32x4d_mc_dropout(pretrained=False, **kwargs)
 
-

-

norm

-

Union[bool, NormConfig]

-

-

act

-

Union[bool, ActConfig]

-

-

NetworkConfig Objects

-
@dataclass
-class NetworkConfig()
+

Constructs an ECA-ResNeXt-26-T model. +This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels +in the deep stem. This model replaces SE module with the ECA module

+

+

ecaresnext50t_32x4d_mc_dropout

+
@register_model
+def ecaresnext50t_32x4d_mc_dropout(pretrained=False, **kwargs)
 
-

-

NetworkFactory Objects

-
class NetworkFactory()
+

Constructs an ECA-ResNeXt-50-T model. +This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels +in the deep stem. This model replaces SE module with the ECA module

+

+

seresnet200d_mc_dropout

+
@register_model
+def seresnet200d_mc_dropout(pretrained=False, **kwargs)
 
-

-

act

-
def act(config: ActConfig) -> nn.Module
+

Constructs a ResNet-200-D model with SE attn.

+

+

seresnet269d_mc_dropout

+
@register_model
+def seresnet269d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-
    -
  • config - ActConfig:
  • -
  • config - ActConfig:
  • -
-

-

conv2d

-
def conv2d(input_shape: Tuple[int, ...],
-           out_channels: int,
-           kernel_size: Union[int, Tuple[int, ...]],
-           stride: Union[int, Tuple[int, ...]] = 1,
-           padding: Union[int, Tuple[int, ...], bool] = True,
-           dilation: Union[int, Tuple[int, ...]] = 0,
-           groups: int = 1,
-           norm: Union[BNConfig, bool] = False,
-           act: Union[ActConfig, bool] = False,
-           bias: bool = False) -> None
+

Constructs a ResNet-269-D model with SE attn.

+

+

seresnext26d_32x4d_mc_dropout

+
@register_model
+def seresnext26d_32x4d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-
    -
  • input_shape - Tuple[int: - int: - int]:
  • -
  • out_channels - int:
  • -
  • kernel_size - Union[int: - Tuple[int, ...]]: (Default value = 1)
  • -
  • stride - Union[int:
  • -
  • padding - Union[int: - Tuple[int, ...]:
  • -
  • bool] - (Default value = False)
  • -
  • dilation - int: (Default value = 0)
  • -
  • groups - int: (Default value = 1)
  • -
  • norm - Union[BNConfig:
  • -
  • act - Union[ActConfig:
  • -
  • bias - bool: (Default value = False)
  • -
  • input_shape - Tuple[int:
  • -
  • out_channels - int:
  • -
  • kernel_size - Union[int:
  • -
  • stride - Union[int:
  • -
  • padding - Union[int:
  • -
  • dilation - int: (Default value = 0)
  • -
  • groups - int: (Default value = 1)
  • -
  • norm - Union[BNConfig:
  • -
  • act - Union[ActConfig:
  • -
  • bias - bool: (Default value = False)
  • -
-

-

mbconv1d

-
def mbconv1d(input_shape: Tuple[int, ...],
-             out_channels: int,
-             kernel_size: int,
-             dilation: int = 1,
-             stride: int = 1,
-             padding: Union[int, bool] = True,
-             bias=False,
-             upsampling: float = 1.0,
-             groups: int = 1,
-             norm: Union[BNConfig, bool] = False,
-             act: Union[ActConfig, bool] = False)
+

Constructs a SE-ResNeXt-26-D model.` +This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for +combination of deep stem and avg_pool in downsample.

+

+

seresnext26t_32x4d_mc_dropout

+
@register_model
+def seresnext26t_32x4d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-
    -
  • input_shape - Tuple[int: - int: - int]:
  • -
  • out_channels - int:
  • -
  • kernel_size - int:
  • -
  • dilation - int: (Default value = 1)
  • -
  • stride - int: (Default value = 1)
  • -
  • padding - Union[int:
  • -
  • bool] - (Default value = False)
  • -
  • bias - (Default value = False)
  • -
  • upsampling - float: (Default value = 1.0)
  • -
  • groups - int: (Default value = 1)
  • -
  • norm - Union[BNConfig:
  • -
  • act - Union[ActConfig:
  • -
  • input_shape - Tuple[int:
  • -
  • out_channels - int:
  • -
  • kernel_size - int:
  • -
  • dilation - int: (Default value = 1)
  • -
  • stride - int: (Default value = 1)
  • -
  • padding - Union[int:
  • -
  • upsampling - float: (Default value = 1.0)
  • -
  • groups - int: (Default value = 1)
  • -
  • norm - Union[BNConfig:
  • -
  • act - Union[ActConfig:
  • -
-

-

conv1d

-
def conv1d(input_shape: Tuple[int, ...],
-           out_channels: int,
-           kernel_size: int,
-           stride: int = 1,
-           bias: bool = False,
-           padding: Union[int, bool] = True,
-           dilation: int = 1,
-           groups: int = 1,
-           norm: Union[BNConfig, bool] = False,
-           act: Union[ActConfig, bool] = False,
-           out_quant: bool = True) -> Tuple[Tuple[int, ...], nn.Module]
+

Constructs a SE-ResNet-26-T model. +This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels +in the deep stem.

+

+

seresnext26tn_32x4d_mc_dropout

+
@register_model
+def seresnext26tn_32x4d_mc_dropout(pretrained=False, **kwargs)
+
+ +

Constructs a SE-ResNeXt-26-T model. +NOTE I deprecated previous 't' model defs and replaced 't' with 'tn', this was the only tn model of note +so keeping this def for backwards compat with any uses out there. Old 't' model is lost.

+

+

resnetblur18_mc_dropout

+
@register_model
+def resnetblur18_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-
    -
  • input_shape - Tuple[int: - int: - int]:
  • -
  • out_channels - int:
  • -
  • kernel_size - int:
  • -
  • stride - int: (Default value = 1)
  • -
  • bias - bool: (Default value = False)
  • -
  • padding - Union[int:
  • -
  • bool] - (Default value = False)
  • -
  • dilation - int: (Default value = 1)
  • -
  • groups - int: (Default value = 1)
  • -
  • norm - Union[BNConfig:
  • -
  • act - Union[ActConfig:
  • -
  • out_quant - bool: (Default value = True)
  • -
  • input_shape - Tuple[int:
  • -
  • out_channels - int:
  • -
  • kernel_size - int:
  • -
  • stride - int: (Default value = 1)
  • -
  • bias - bool: (Default value = False)
  • -
  • padding - Union[int:
  • -
  • dilation - int: (Default value = 1)
  • -
  • groups - int: (Default value = 1)
  • -
  • norm - Union[BNConfig:
  • -
  • act - Union[ActConfig:
  • -
  • out_quant - bool: (Default value = True)
  • -
-

-

minor

-
def minor(input_shape, config: MinorBlockConfig, major_stride=None)
+

Constructs a ResNet-18 model with blur anti-aliasing

+

+

resnetblur50_mc_dropout

+
@register_model
+def resnetblur50_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

input_shape: -- config - MinorBlockConfig: -- major_stride - (Default value = None) -- config - MinorBlockConfig:

-

-

forward

-
def forward(input_shape: Tuple[int, ...], config: MajorBlockConfig)
+

Constructs a ResNet-50 model with blur anti-aliasing

+

+

resnetblur50d_mc_dropout

+
@register_model
+def resnetblur50d_mc_dropout(pretrained=False, **kwargs)
 
-

Create a forward neural network block without parallelism

-

If parallel is set to [True, False, True, False]

-

Input: ------->|---> parallel: False ---> parallel: False ---> | --> output

-

Arguments:

-
    -
  • input_shape - Tuple[int, ...]:
  • -
  • config - MajorBlockConfig:
  • -
  • input_shape - Tuple[int, ...]:
  • -
  • config - MajorBlockConfig:
  • -
-

-

residual

-
def residual(input_shape: Tuple[int, ...], config: MajorBlockConfig)
+

Constructs a ResNet-50-D model with blur anti-aliasing

+

+

resnetblur101d_mc_dropout

+
@register_model
+def resnetblur101d_mc_dropout(pretrained=False, **kwargs)
 
-

Create a neural network block with with residual parallelism

-

If parallel is set to [True, False, True, False] -|---> parallel: True ---> parallel: True ---> | -Input: ------->| +---> -|---> parallel: False ---> parallel: False ---> |

-

If the major block does change the output dimensions compared to the input -and one of the branches does not contain any layers, we infer -1x1 conv of maximum group size (gcd (input_channels, output_channels)) to do the -downsampling.

-

Arguments:

-
    -
  • input_shape - Tuple[int, ...]:
  • -
  • config - MajorBlockConfig:
  • -
  • input_shape - Tuple[int, ...]:
  • -
  • config - MajorBlockConfig:
  • -
-

-

input

-
def input(in_channels: int, config: MajorBlockConfig)
+

Constructs a ResNet-101-D model with blur anti-aliasing

+

+

resnetaa34d_mc_dropout

+
@register_model
+def resnetaa34d_mc_dropout(pretrained=False, **kwargs)
 
-

Create a neural network block with input parallelism

-

If parallel is set to [True, False, True, False] -|---> parallel: True ---> | -|---> parallel: True ---> + -----------------> | -Input:--------->| +---> -|---> parallel: False ---> parallel: False ---> |

-

If there are no parallel branches in the network. The major block is -a standard feed forward layer.

-

Arguments:

-
    -
  • in_channels - int:
  • -
  • config - MajorBlockConfig:
  • -
  • in_channels - int:
  • -
  • config - MajorBlockConfig:
  • -
-

-

full

-
def full(in_channels: int, config: MajorBlockConfig)
+

Constructs a ResNet-34-D model w/ avgpool anti-aliasing

+

+

resnetaa50_mc_dropout

+
@register_model
+def resnetaa50_mc_dropout(pretrained=False, **kwargs)
 
-

Create a neural network block with full parallelism

-

If parallel is set to [True, False, True, False] -|---> parallel: True ---------------------------------- -| -Input:--->| +---> -| |--> parallel: False --->| | -|---> parallel: False ----> | +--->| -|--> parallel: True ---->|

-

If there are no parallel blocks the block is a standard feed forward network.

-

Arguments:

-
    -
  • in_channels - int:
  • -
  • config - MajorBlockConfig:
  • -
  • in_channels - int:
  • -
  • config - MajorBlockConfig:
  • -
-

-

major

-
def major(input_shape, config: MajorBlockConfig)
+

Constructs a ResNet-50 model with avgpool anti-aliasing

+

+

resnetaa50d_mc_dropout

+
@register_model
+def resnetaa50d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

input_shape: -- config - MajorBlockConfig: -- config - MajorBlockConfig:

-

-

linear

-
def linear(input_shape, config: LinearConfig)
+

Constructs a ResNet-50-D model with avgpool anti-aliasing

+

+

resnetaa101d_mc_dropout

+
@register_model
+def resnetaa101d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

input_shape: -- config - LinearConfig: -- config - LinearConfig:

-

-

identity

-
def identity() -> Identity
+

Constructs a ResNet-101-D model with avgpool anti-aliasing

+

+

seresnetaa50d_mc_dropout

+
@register_model
+def seresnetaa50d_mc_dropout(pretrained=False, **kwargs)
 
-

-

network

-
def network(input_shape, labels: int, network_config: Union[ListConfig,
-                                                            DictConfig])
+

Constructs a SE=ResNet-50-D model with avgpool anti-aliasing

+

+

seresnextaa101d_32x8d_mc_dropout

+
@register_model
+def seresnextaa101d_32x8d_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-

input_shape: -- labels - int: -- network_config - NetworkConfig: -- labels - int: -- network_config - NetworkConfig:

-

-

create_cnn

-
def create_cnn(input_shape: Sequence[int],
-               labels: int,
-               name: str,
-               conv: Optional[List[MajorBlockConfig]] = None,
-               linear: Optional[List[LinearConfig]] = None,
-               norm: Optional[NormConfig] = None,
-               act: Optional[ActConfig] = None,
-               qconfig: Any = None,
-               dropout: float = 0.5)
+

Constructs a SE=ResNeXt-101-D 32x8d model with avgpool anti-aliasing

+

+

resnetrs50_mc_dropout

+
@register_model
+def resnetrs50_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-
    -
  • input_shape - Sequence[int]:
  • -
  • labels - int:
  • -
  • name - str:
  • -
  • conv - Optional[List[MajorBlockConfig]]: (Default value = None)
  • -
  • linear - Optional[List[LinearConfig]]: (Default value = None)
  • -
  • norm - Optional[NormConfig]: (Default value = None)
  • -
  • act - Optional[ActConfig]: (Default value = None)
  • -
  • qconfig - Any: (Default value = None)
  • -
  • dropout - float: (Default value = 0.5)
  • -
  • input_shape - Sequence[int]:
  • -
  • labels - int:
  • -
  • name - str:
  • -
  • conv - Optional[List[MajorBlockConfig]]: (Default value = None)
  • -
  • linear - Optional[List[LinearConfig]]: (Default value = None)
  • -
  • norm - Optional[NormConfig]: (Default value = None)
  • -
  • act - Optional[ActConfig]: (Default value = None)
  • -
  • qconfig - Any: (Default value = None)
  • -
  • dropout - float: (Default value = 0.5)
  • -
-

-

hannah.models.factory.qat

-

Import from new loacation for backwards compatibility

-

-

hannah.models.factory.reduction

-

-

ReductionBlockAdd Objects

-
class ReductionBlockAdd(nn.Module)
+

Constructs a ResNet-RS-50 model. +Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 +Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

+

+

resnetrs101_mc_dropout

+
@register_model
+def resnetrs101_mc_dropout(pretrained=False, **kwargs)
+
+ +

Constructs a ResNet-RS-101 model. +Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 +Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

+

+

resnetrs152_mc_dropout

+
@register_model
+def resnetrs152_mc_dropout(pretrained=False, **kwargs)
+
+ +

Constructs a ResNet-RS-152 model. +Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 +Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

+

+

resnetrs200_mc_dropout

+
@register_model
+def resnetrs200_mc_dropout(pretrained=False, **kwargs)
+
+ +

Constructs a ResNet-RS-200 model. +Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 +Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

+

+

resnetrs270_mc_dropout

+
@register_model
+def resnetrs270_mc_dropout(pretrained=False, **kwargs)
 
-

Reduction block that sums over its inputs

-

-

forward

-
def forward(x: Tensor) -> Tensor
+

Constructs a ResNet-RS-270 model. +Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 +Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

+

+

resnetrs350_mc_dropout

+
@register_model
+def resnetrs350_mc_dropout(pretrained=False, **kwargs)
 
-

Arguments:

-
    -
  • x - Tensor:
  • -
  • x - Tensor:
  • -
-

-

ReductionBlockConcat Objects

-
class ReductionBlockConcat(nn.Module)
+

Constructs a ResNet-RS-350 model. +Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 +Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

+

+

resnetrs420_mc_dropout

+
@register_model
+def resnetrs420_mc_dropout(pretrained=False, **kwargs)
 
-

Reduction block that concatenates its inputs

-

-

forward

-
def forward(x: Tensor) -> Tensor
+

Constructs a ResNet-RS-420 model +Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 +Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs

+

+

hannah.models.wavenet

+

+

hannah.models.wavenet.models

+

+

Conv Objects

+
class Conv(nn.Module)
 
-

Arguments:

-
    -
  • x - Tensor:
  • -
  • x - Tensor:
  • -
-

-

hannah.models.factory.act

-

-

DummyActivation Objects

-
class DummyActivation(nn.Identity)
+

A convolution with the option to be causal and use xavier initialization

+

+

forward

+
def forward(signal)
 
-

Dummy class that instantiated to mark a missing activation.

-

This can be used to mark requantization of activations for convolutional layers without -activation functions.

Arguments:

-

-

hannah.models.factory.pooling

-

-

ApproximateGlobalAveragePooling1D Objects

-
class ApproximateGlobalAveragePooling1D(nn.Module)
+

signal:

+

+

WaveNet Objects

+
class WaveNet(nn.Module)
 
-

A global average pooling layer, that divides by the next power of 2 instead of true number of elements

-

-

forward

-
def forward(x)
+

+

forward

+
def forward(input_data)
 

Arguments:

-

x:

-

-

ApproximateGlobalAveragePooling2D Objects

-
class ApproximateGlobalAveragePooling2D(nn.Module)
+

input_data:

+

+

hannah.models.honk

+

+

hannah.models.honk.model

+

+

truncated_normal

+
def truncated_normal(tensor, std_dev=0.01)
 
-

A global average pooling layer, that divides by the next power of 2 instead of true number of elements

-

-

forward

+

Arguments:

+

tensor: +- std_dev - (Default value = 0.01)

+

+

SpeechResModel Objects

+
class SpeechResModel(nn.Module)
+
+ +

+

forward

def forward(x)
 

Arguments:

x:

-

-

hannah.models.factory.quantized

-

Import from new loacation for backwards compatibility

-

-

hannah.models.factory.rounding

-

Import from new loacation for backwards compatibility

-

-

hannah.models.utils

-

-

next_power_of2

-
def next_power_of2(x)
+

+

SpeechModel Objects

+
class SpeechModel(nn.Module)
+
+ +

+

forward

+
def forward(x)
 

Arguments:

@@ -2544,7 +2958,7 @@

DefaultAnomalyDetector Objects

-

forward

+

forward

def forward(x)
 
@@ -2564,7 +2978,7 @@

DefaultClassifierHead Objects

-

forward

+

forward

def forward(x: torch.Tensor) -> torch.Tensor
 
@@ -2579,7 +2993,7 @@

DefaultProjectionHead Objects

Default projection head for semi supervised classification learning

-

forward

+

forward

def forward(x: torch.Tensor) -> torch.Tensor
 
@@ -2596,7 +3010,7 @@

DefaultDecoderHead Objects

-

__init__

+

__init__

def __init__(latent_shape, input_shape)
 
@@ -2607,7 +3021,7 @@

__init__

  • input_shape(Tuple) - Shape (CxHxW) of the reconstructed image
  • -

    forward

    +

    forward

    def forward(x)
     
    @@ -2619,7 +3033,7 @@

    TimmModel Objects

    -

    forward

    +

    forward

    def forward(x: torch.Tensor) -> torch.Tensor
     
    @@ -2628,889 +3042,711 @@

    forward

  • x - torch.Tensor:
  • x - torch.Tensor:
  • -

    -

    hannah.models.conv_vit.blocks

    -

    -

    hannah.models.conv_vit.operators

    -

    -

    hannah.models.conv_vit.attention

    -

    -

    hannah.models.conv_vit.models

    -

    -

    hannah.optim

    -

    -

    hannah.optim.RAdam

    -

    -

    hannah.optim.madgrad

    -

    -

    MADGRAD Objects

    -
    class MADGRAD(torch.optim.Optimizer)
    -
    - -

    MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic -Optimization. -.. _MADGRAD: https://arxiv.org/abs/2101.11075 -MADGRAD is a general purpose optimizer that can be used in place of SGD or -Adam may converge faster and generalize better. Currently GPU-only. -Typically, the same learning rate schedule that is used for SGD or Adam may -be used. The overall learning rate is not comparable to either method and -should be determined by a hyper-parameter sweep. -MADGRAD requires less weight decay than other methods, often as little as -zero. Momentum values used for SGD or Adam's beta1 should work here also. -On sparse problems both weight_decay and momentum should be set to 0.

    -

    Arguments:

    -

    params (iterable): - Iterable of parameters to optimize or dicts defining parameter groups. - lr (float): - Learning rate (default: 1e-2). - momentum (float): - Momentum value in the range [0,1) (default: 0.9). - weight_decay (float): - Weight decay, i.e. a L2 penalty (default: 0). - eps (float): - Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6).

    -

    -

    step

    -
    def step(closure: Optional[Callable[[], float]] = None) -> Optional[float]
    -
    - -

    Performs a single optimization step.

    -

    Arguments:

    -
      -
    • closure callable, optional - A closure that reevaluates the model - and returns the loss.
    • -
    -

    -

    hannah.quantization.qconfig

    -

    -

    hannah.quantization.callback

    -

    -

    hannah.quantization.rounding

    -

    -

    round_downward

    -
    def round_downward(x: Tensor) -> Tensor
    -
    - -

    Round to nearest upward

    -

    -

    round_upward

    -
    def round_upward(x: Tensor) -> Tensor
    -
    - -

    Round to nearest downward

    -

    -

    round_odd

    -
    def round_odd(x: Tensor) -> Tensor
    -
    - -

    Round to nearest odd

    -

    -

    round_even

    -
    def round_even(x: Tensor) -> Tensor
    -
    - -

    Round to nearest even

    -

    -

    round_zero

    -
    def round_zero(x: Tensor) -> Tensor
    -
    - -

    Round towards zero

    -

    -

    round_infinity

    -
    def round_infinity(x: Tensor) -> Tensor
    -
    - -

    Round toward infinity

    -

    -

    truncate_up

    -
    def truncate_up(x: Tensor) -> Tensor
    -
    - -

    Always round up to next integer

    -

    -

    truncate_down

    -
    def truncate_down(x: Tensor) -> Tensor
    -
    - -

    Always round down to next integer

    -

    -

    truncate_infinity

    -
    def truncate_infinity(x: Tensor) -> Tensor
    -
    - -

    Always round to next integer in direction infinity

    -

    -

    truncate_zero

    -
    def truncate_zero(x: Tensor) -> Tensor
    +

    +

    hannah.models.hello

    +

    +

    hannah.models.hello.models

    +

    +

    DSConv2d Objects

    +
    class DSConv2d(nn.Module)
     
    -

    Always round to next integer in direction of Zero

    -

    -

    round_stochastic

    -
    def round_stochastic(x: Tensor) -> Tensor
    +

    +

    forward

    +
    def forward(x)
     
    -

    Round stochastically

    -

    -

    hannah.modules.augmentation.transforms

    -

    -

    hannah.modules.augmentation.transforms.registry

    -

    -

    hannah.modules.augmentation.transforms.kornia_transforms

    -

    -

    hannah.modules.augmentation

    -

    -

    hannah.modules.augmentation.bordersearch

    -

    -

    hannah.modules.augmentation.batch_augmentation

    -

    -

    BatchAugmentationPipeline Objects

    -
    class BatchAugmentationPipeline(nn.Module)
    +

    Arguments:

    +

    x:

    +

    +

    DSCNNSpeechModel Objects

    +
    class DSCNNSpeechModel(nn.Module)
     
    -

    -

    __init__

    -
    def __init__(transforms={})
    +

    +

    forward

    +
    def forward(x)
     
    -

    Augmentation pipeline especially for self supervised learning

    Arguments:

    -
      -
    • replica int - number of replicated different random augmentations
    • -
    • transforms dict - configuration of transforms
    • -
    -

    -

    forward

    -
    @torch.no_grad()
    -def forward(x) -> torch.Tensor
    +

    x:

    +

    +

    DNNSpeechModel Objects

    +
    class DNNSpeechModel(nn.Module)
     
    -

    Perform Augmentations

    -

    Arguments:

    -
      -
    • x torch.Tensor - a torch.Tensor representing the augementation pipeline
    • -
    -

    Returns:

    -

    Tuple[torch.Tensor, torch.Tensor]; Batch augmented with replica different random augmentations

    -

    -

    hannah.modules.augmentation.augmentation

    -

    -

    hannah.modules

    -

    -

    hannah.modules.vision

    -

    -

    hannah.modules.vision.base

    -

    -

    hannah.modules.vision.loss

    -

    -

    hannah.modules.vision.anomaly_score

    -

    class AnomalyScore(CatMetric): -def init(self, percentile, nan_strategy="warn", kwargs): - super().init(nan_strategy=nan_strategy, kwargs) - self.percentile = percentile

    -

    def compute(self): - anomaly_score = None - train_losses = super().compute() - if train_losses: - normalized_train_errors = torch.stack(train_losses) / ( - torch.max(torch.stack(train_losses), dim=0).values - ) - anomaly_score = np.percentile( - normalized_train_errors.cpu().numpy(), self.percentile - ) - return anomaly_score

    -

    -

    hannah.modules.vision.anomaly_detection

    -

    -

    AnomalyDetectionModule Objects

    -
    class AnomalyDetectionModule(VisionBaseModule)
    +

    +

    forward

    +
    def forward(x)
     
    -

    -

    on_test_end

    -
    def on_test_end()
    +

    Arguments:

    +

    x:

    +

    +

    hannah.models.objectdetection

    +

    +

    hannah.models.objectdetection.loss

    +

    +

    bbox_iou

    +
    def bbox_iou(box1,
    +             box2,
    +             x1y1x2y2=True,
    +             GIoU=False,
    +             DIoU=False,
    +             CIoU=False,
    +             eps=1e-7)
     
    -

    wd_dir = os.getcwd() -score, largest_train_error = self.compute_anomaly_score() -train_errors = self.normalized_train_errors -plt.hist(train_errors.detach().cpu().numpy(), bins=100) -plt.axvline(score, linestyle="dashed") -plt.title("Normalized train reconstruction errors") -plt.savefig(wd_dir + "/normalized_train_errors.png")

    -

    test = ( - torch.tensor(self.test_losses, device=self.device) - / torch.max(torch.stack(self.train_losses), dim=0).values -) -plt.hist(test.detach().cpu().numpy(), bins=100) -plt.title("Normalized test reconstruction errors") -plt.savefig(wd_dir + "/normalized_test_errors.png") -print("Anomaly score", score) -print( - "Largest train error", - torch.max(torch.stack(self.train_losses), dim=0).values, -)

    -

    -

    hannah.modules.vision.image_classifier

    -

    -

    hannah.modules.config_utils

    -

    -

    dump_config

    -
    def dump_config(output_dir, config)
    +

    Arguments:

    +

    box1: + box2: +- x1y1x2y2 - (Default value = True) +- GIoU - (Default value = False) +- DIoU - (Default value = False) +- CIoU - (Default value = False) +- eps - (Default value = 1e-7)

    +

    +

    is_parallel

    +
    def is_parallel(model)
     
    -

    Dumps the configuration to json format

    -

    Creates file config.json in output_dir

    -

    Parameters

    -

    output_dir : str - Output directory -config : dict - Configuration to dump

    -

    -

    save_model

    -
    def save_model(output_dir, model)
    +

    Arguments:

    +

    model:

    +

    +

    BCEBlurWithLogitsLoss Objects

    +
    class BCEBlurWithLogitsLoss(nn.Module)
     
    -

    Creates serialization of the model for later inference, evaluation

    -

    Creates the following files:

    +

    Arguments:

      -
    • model.pt: Serialized version of network parameters in pytorch
    • -
    • model.json: Serialized version of network parameters in json format
    • -
    • model.onnx: full model including paramters in onnx format
    • +
    • eps - (Default value = 0.1)
    • +
    • ) - # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441# return positive: + negative label smoothing BCE targetsreturn 1.0 - 0.5 * eps: + 0.5 * epsclass BCEBlurWithLogitsLoss(nn.Module:
    • +
    • ) - # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441# return positive:
    -

    Parameters

    -

    output_dir : str - Directory to put serialized models -model : LightningModule - Model to serialize

    -

    -

    hannah.modules.object_detection

    -

    -

    hannah.modules.base

    -

    -

    ClassifierModule Objects

    -
    class ClassifierModule(LightningModule, ABC)
    -
    - -

    -

    total_training_steps

    -
    def total_training_steps() -> int
    +

    +

    forward

    +
    def forward(pred, true)
     
    -

    Total training steps inferred from datamodule and devices.

    -

    -

    hannah.modules.classifier

    -

    -

    hannah.modules.angle_classifier

    -

    -

    hannah.modules.metrics

    -

    -

    Error Objects

    -
    class Error()
    +

    Arguments:

    +

    pred: + true:

    +

    +

    FocalLoss Objects

    +
    class FocalLoss(nn.Module)
     
    -

    Computes Error = 1 - Accuracy_

    -

    .. math:: - \text{Error} = 1 - \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)

    -

    Where :math:y is a tensor of target values, and :math:\hat{y} is a tensor of predictions.

    -

    This module is a simple wrapper to get the task specific versions of this metric, which is done by setting the -task argument to either 'binary', 'multiclass' or multilabel. See the documentation of -:mod:BinaryError, :mod:MulticlassError and :mod:MultilabelError for the specific details of -each argument influence and examples.

    -

    -

    plot_confusion_matrix

    -
    def plot_confusion_matrix(cf,
    -                          group_names=None,
    -                          categories="auto",
    -                          count=True,
    -                          percent=True,
    -                          cbar=True,
    -                          xyticks=True,
    -                          xyplotlabels=True,
    -                          sum_stats=True,
    -                          figsize=None,
    -                          cmap="Blues",
    -                          title=None)
    +

    +

    forward

    +
    def forward(pred, true)
     
    -

    This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization.

    -

    Arguments

    -

    cf: confusion matrix to be passed in

    -

    group_names: List of strings that represent the labels row by row to be shown in each square.

    -

    categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'

    -

    count: If True, show the raw number in the confusion matrix. Default is True.

    -

    normalize: If True, show the proportions for each category. Default is True.

    -

    cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix. - Default is True.

    -

    xyticks: If True, show x and y ticks. Default is True.

    -

    xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.

    -

    sum_stats: If True, display summary statistics below the figure. Default is True.

    -

    figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.

    -

    cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues' - See http://matplotlib.org/examples/color/colormaps_reference.html

    -

    title: Title for the heatmap. Default is None.

    -

    -

    hannah.callbacks.summaries

    -

    -

    walk_model

    -
    def walk_model(model, dummy_input)
    +

    Arguments:

    +

    pred: + true:

    +

    +

    QFocalLoss Objects

    +
    class QFocalLoss(nn.Module)
    +
    + +

    +

    forward

    +
    def forward(pred, true)
     
    -

    Adapted from IntelLabs Distiller

    Arguments:

    -

    model: - dummy_input:

    -

    -

    MacSummaryCallback Objects

    -
    class MacSummaryCallback(Callback)
    +

    pred: + true:

    +

    +

    ComputeLoss Objects

    +
    class ComputeLoss()
     
    -

    -

    predict

    -
    def predict(pl_module, input=input)
    +

    +

    build_targets

    +
    def build_targets(p, targets)
     

    Arguments:

    -

    pl_module:

    -

    -

    on_train_start

    -
    @rank_zero_only
    -def on_train_start(trainer, pl_module)
    +

    p: + targets:

    +

    +

    hannah.models.objectdetection.models

    +

    +

    FasterRCNN Objects

    +
    class FasterRCNN(torch.nn.Module)
    +
    + +

    +

    forward

    +
    def forward(x, y=None)
     

    Arguments:

    -

    trainer: - pl_module:

    -

    -

    on_test_end

    -
    @rank_zero_only
    -def on_test_end(trainer, pl_module)
    +

    x: +- y - (Default value = None)

    +

    +

    transformOutput

    +
    def transformOutput(cocoGt, output, x, y)
     

    Arguments:

    -

    trainer: - pl_module:

    -

    -

    on_validation_epoch_end

    -
    @rank_zero_only
    -def on_validation_epoch_end(trainer, pl_module)
    +

    cocoGt: + output: + x: + y:

    +

    +

    UltralyticsYolo Objects

    +
    class UltralyticsYolo(torch.nn.Module)
    +
    + +

    +

    forward

    +
    def forward(x, y=None)
     

    Arguments:

    -

    trainer: - pl_module:

    -

    -

    estimate

    -
    def estimate(pl_module, input=None)
    +

    x: +- y - (Default value = None)

    +

    +

    train

    +
    def train(mode=True)
     
    -

    Generate Summary Metrics for neural network

    Arguments:

      -
    • pl_module(pytorch_lightning.LightningModule) - pytorch lightning module to summarize
    • +
    • mode - (Default value = True)
    -

    Returns:

    -

    dict[str, float]: Dict of MetricName => Metric Value

    -

    -

    prod

    -
    def prod(seq)
    +

    +

    transformOutput

    +
    def transformOutput(cocoGt, output, x, y)
     

    Arguments:

    -

    seq:

    -

    -

    hannah.callbacks

    -

    -

    hannah.callbacks.clustering

    -

    -

    clustering

    -
    def clustering(params, inertia, cluster)
    +

    cocoGt: + output: + x: + y:

    +

    +

    hannah.models.vad

    +

    +

    hannah.models.vad.models

    +

    +

    BottleneckVad Objects

    +
    class BottleneckVad(nn.Module)
    +
    + +

    +

    forward

    +
    def forward(x)
     

    Arguments:

    -

    params: - inertia: - cluster:

    -

    -

    kMeans Objects

    -
    class kMeans(Callback)
    +

    x:

    +

    +

    num_flat_features

    +
    def num_flat_features(x)
     
    -

    -

    on_test_epoch_start

    -
    def on_test_epoch_start(trainer, pl_module)
    +

    Arguments:

    +

    x:

    +

    +

    SmallVad Objects

    +
    class SmallVad(nn.Module)
    +
    + +

    +

    forward

    +
    def forward(x)
     

    Arguments:

    -

    trainer: - pl_module:

    -

    -

    on_train_epoch_end

    -
    def on_train_epoch_end(trainer, pl_module)
    +

    x:

    +

    +

    num_flat_features

    +
    def num_flat_features(x)
     

    Arguments:

    -

    trainer: - pl_module:

    -

    -

    hannah.callbacks.svd_compress

    -

    -

    SVD Objects

    -
    class SVD(Callback)
    +

    x:

    +

    +

    SimpleVad Objects

    +
    class SimpleVad(nn.Module)
     
    -

    -

    on_train_epoch_start

    -
    def on_train_epoch_start(trainer, pl_module)
    +

    +

    forward

    +
    def forward(x)
     

    Arguments:

    -

    trainer: - pl_module:

    -

    -

    hannah.callbacks.prediction_logger

    -

    -

    hannah.callbacks.pruning

    -

    -

    PruningAmountScheduler Objects

    -
    class PruningAmountScheduler()
    +

    x:

    +

    +

    num_flat_features

    +
    def num_flat_features(x)
     
    -

    -

    FilteredPruning Objects

    -
    class FilteredPruning(ModelPruning)
    +

    Arguments:

    +

    x:

    +

    +

    BottleneckVadModel Objects

    +
    class BottleneckVadModel(nn.Module)
     
    -

    -

    setup

    -
    def setup(trainer: Trainer, pl_module: LightningModule, stage: str)
    +

    +

    forward

    +
    def forward(x)
     

    Arguments:

    -

    trainer: - pl_module:

    -

    -

    filter_parameters_to_prune

    -
    def filter_parameters_to_prune(parameters_to_prune=None)
    +

    x:

    +

    +

    SimpleVadModel Objects

    +
    class SimpleVadModel(nn.Module)
    +
    + +

    +

    forward

    +
    def forward(x)
     
    -

    Filter out unprunable parameters

    Arguments:

    -
      -
    • parameters_to_prune - (Default value = None)
    • -
    -

    -

    on_test_end

    -
    def on_test_end(trainer, pl_module) -> None
    +

    x:

    +

    +

    SmallVadModel Objects

    +
    class SmallVadModel(nn.Module)
    +
    + +

    +

    forward

    +
    def forward(x)
    +
    + +

    Arguments:

    +

    x:

    +

    +

    hannah.models.lstm

    +

    +

    hannah.models.lstm.models

    +

    +

    LSTMModel Objects

    +
    class LSTMModel(nn.Module)
    +
    + +

    Simple LSTM model.

    +

    +

    forward

    +
    def forward(x)
    +
    + +

    Arguments:

    +

    x:

    +

    +

    hannah.quantization.callback

    +

    +

    hannah.quantization.qconfig

    +

    +

    hannah.quantization.rounding

    +

    +

    round_downward

    +
    def round_downward(x: Tensor) -> Tensor
    +
    + +

    Round to nearest upward

    +

    +

    round_upward

    +
    def round_upward(x: Tensor) -> Tensor
     
    -

    Arguments:

    -

    trainer: - pl_module:

    -

    -

    hannah.callbacks.backends

    -

    -

    hannah.callbacks.optimization

    -

    -

    HydraOptCallback Objects

    -
    class HydraOptCallback(Callback)
    +

    Round to nearest downward

    +

    +

    round_odd

    +
    def round_odd(x: Tensor) -> Tensor
     
    -

    -

    on_test_end

    -
    def on_test_end(trainer, pl_module)
    +

    Round to nearest odd

    +

    +

    round_even

    +
    def round_even(x: Tensor) -> Tensor
     
    -

    Arguments:

    -

    trainer: - pl_module:

    -

    -

    on_validation_end

    -
    def on_validation_end(trainer, pl_module)
    +

    Round to nearest even

    +

    +

    round_zero

    +
    def round_zero(x: Tensor) -> Tensor
     
    -

    Arguments:

    -

    trainer: - pl_module:

    -

    -

    test_result

    -
    def test_result()
    +

    Round towards zero

    +

    +

    round_infinity

    +
    def round_infinity(x: Tensor) -> Tensor
     
    -

    -

    val_result

    -
    def val_result()
    +

    Round toward infinity

    +

    +

    truncate_up

    +
    def truncate_up(x: Tensor) -> Tensor
     
    -

    -

    result

    -
    def result(dict=False)
    +

    Always round up to next integer

    +

    +

    truncate_down

    +
    def truncate_down(x: Tensor) -> Tensor
     
    -

    Arguments:

    -
      -
    • dict - (Default value = False)
    • -
    -

    -

    curves

    -
    def curves(dict=False)
    +

    Always round down to next integer

    +

    +

    truncate_infinity

    +
    def truncate_infinity(x: Tensor) -> Tensor
     
    -

    Arguments:

    -
      -
    • dict - (Default value = False)
    • -
    -

    -

    hannah.callbacks.dump_layers

    -

    -

    TestDumperCallback Objects

    -
    class TestDumperCallback(Callback)
    +

    Always round to next integer in direction infinity

    +

    +

    truncate_zero

    +
    def truncate_zero(x: Tensor) -> Tensor
     
    -

    -

    on_test_start

    -
    def on_test_start(pl_trainer, pl_model)
    +

    Always round to next integer in direction of Zero

    +

    +

    round_stochastic

    +
    def round_stochastic(x: Tensor) -> Tensor
     
    -

    Arguments:

    -

    pl_trainer: - pl_model:

    -

    -

    hannah.callbacks.backbone_finetuning

    -

    -

    hannah.callbacks.fine_tuning

    -

    -

    hannah.test_linear_classifier

    -

    -

    hannah.tools.objectdetection_eval

    -

    -

    eval_train

    -
    def eval_train(config, module, test=True)
    +

    Round stochastically

    +

    +

    hannah.features

    +

    +

    MFCC Objects

    +
    class MFCC(torchaudio.transforms.MFCC)
     
    -

    Arguments:

    -
      -
    • config - param module:
    • -
    • test - Default value = True) - module:
    • -
    -

    -

    eval_steps

    -
    def eval_steps(config, module, hparams, checkpoint)
    +

    A simple wrapper around torchaudio mfcc, but melkwargs are given as direct named arguments instead of a dictionary

    +

    +

    SincConv Objects

    +
    class SincConv(nn.Module)
     
    +

    Sinc convolution:

    Arguments:

    +
      -
    • config - param module:
    • -
    • hparams - param checkpoint: - module: - checkpoint:
    • +
    • in_channels - No. of input channels(must be 1)
    • +
    • out_channels - No. of filters(40)
    • +
    • sample_rate - sampling rate, default set at 32000
    • +
    • kernel_size - Filter length(101)
    -

    -

    eval_checkpoint

    -
    def eval_checkpoint(config: DictConfig, checkpoint)
    +

    +

    LogSpectrogram Objects

    +
    class LogSpectrogram(torch.nn.Module)
     
    +

    Create a spectrogram from a audio signal.

    Arguments:

      -
    • config - DictConfig: - checkpoint:
    • -
    • config - DictConfig:
    • -
    • config - DictConfig:
    • +
    • n_fft int, optional - Size of FFT, creates n_fft // 2 + 1 bins. (Default: 400)
    • +
    • win_length int or None, optional - Window size. (Default: n_fft)
    • +
    • hop_length int or None, optional - Length of hop between STFT windows. (Default: win_length // 2)
    • +
    • pad int, optional - Two sided padding of signal. (Default: 0)
    • +
    • window_fn Callable[..., Tensor], optional - A function to create a window tensor + that is applied/multiplied to each frame/window. (Default: torch.hann_window)
    • +
    • power float or None, optional - Exponent for the magnitude spectrogram, + (must be > 0) e.g., 1 for energy, 2 for power, etc. + If None, then the complex spectrum is returned instead. (Default: 2)
    • +
    • normalized bool, optional - Whether to normalize by magnitude after stft. (Default: False)
    • +
    • wkwargs dict or None, optional - Arguments for window function. (Default: None)
    • +
    • center bool, optional - whether to pad :attr:waveform on both sides so + that the :math:t-th frame is centered at time :math:t \times \text{hop\_length}.
    • +
    • Default - True
    • +
    • pad_mode string, optional - controls the padding method used when + :attr:center is True. Default: "reflect"
    • +
    • onesided bool, optional - controls whether to return half of results to + avoid redundancy Default: True
    -

    -

    eval

    -
    def eval(config: DictConfig)
    +

    +

    forward

    +
    def forward(waveform: torch.Tensor) -> torch.Tensor
     

    Arguments:

      -
    • config - DictConfig:
    • -
    • config - DictConfig:
    • -
    • config - DictConfig:
    • +
    • waveform Tensor - Tensor of audio of dimension (..., time).
    -

    -

    main

    -
    @hydra.main(config_name="objectdetection_eval",
    -            config_path="../conf",
    -            version_base="1.2")
    -def main(config: DictConfig)
    -
    - -

    Arguments:

    +

    Returns:

      -
    • config - DictConfig:
    • -
    • config - DictConfig:
    • -
    • config - DictConfig:
    • +
    • Tensor - Dimension (..., freq, time), where freq is + n_fft // 2 + 1 where n_fft is the number of + Fourier bins, and time is the number of window hops (n_frame).
    -

    -

    hannah.tools

    -

    -

    hannah.tools.eval

    -

    -

    eval_checkpoint

    -
    def eval_checkpoint(config: DictConfig, checkpoint) -> None
    +

    +

    hannah.normalizer

    +

    +

    FixedPointNormalizer Objects

    +
    class FixedPointNormalizer(nn.Module)
     
    -

    Arguments:

    -
      -
    • config - DictConfig: - checkpoint:
    • -
    • config - DictConfig:
    • -
    • config - DictConfig:
    • -
    -

    -

    eval

    -
    def eval(config: DictConfig) -> Optional[bool]
    +

    Simple feature normalizer for fixed point models

    +

    +

    AdaptiveFixedPointNormalizer Objects

    +
    class AdaptiveFixedPointNormalizer(nn.Module)
    +
    + +

    Simple feature normalizer for fixed point models

    +

    +

    hannah.optim

    +

    +

    hannah.optim.RAdam

    +

    +

    hannah.optim.madgrad

    +

    +

    MADGRAD Objects

    +
    class MADGRAD(torch.optim.Optimizer)
     
    +

    MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic +Optimization. +.. _MADGRAD: https://arxiv.org/abs/2101.11075 +MADGRAD is a general purpose optimizer that can be used in place of SGD or +Adam may converge faster and generalize better. Currently GPU-only. +Typically, the same learning rate schedule that is used for SGD or Adam may +be used. The overall learning rate is not comparable to either method and +should be determined by a hyper-parameter sweep. +MADGRAD requires less weight decay than other methods, often as little as +zero. Momentum values used for SGD or Adam's beta1 should work here also. +On sparse problems both weight_decay and momentum should be set to 0.

    Arguments:

    -
      -
    • config - DictConfig:
    • -
    • config - DictConfig:
    • -
    • config - DictConfig:
    • -
    -

    -

    main

    -
    @hydra.main(config_name="eval", config_path="conf", version_base="1.2")
    -def main(config: DictConfig)
    +

    params (iterable): + Iterable of parameters to optimize or dicts defining parameter groups. + lr (float): + Learning rate (default: 1e-2). + momentum (float): + Momentum value in the range [0,1) (default: 0.9). + weight_decay (float): + Weight decay, i.e. a L2 penalty (default: 0). + eps (float): + Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6).

    +

    +

    step

    +
    def step(closure: Optional[Callable[[], float]] = None) -> Optional[float]
     
    +

    Performs a single optimization step.

    Arguments:

      -
    • config - DictConfig:
    • -
    • config - DictConfig:
    • -
    • config - DictConfig:
    • +
    • closure callable, optional - A closure that reevaluates the model + and returns the loss.
    -

    -

    hannah.tools.train

    -

    -

    hannah.tools.characterize

    -

    -

    main

    -
    @hydra.main(config_name="characterize",
    -            config_path="../conf",
    -            version_base="1.2")
    -def main(config: DictConfig)
    +

    +

    hannah.backends.torch_mobile

    +

    +

    TorchMobileBackend Objects

    +
    class TorchMobileBackend(InferenceBackendBase)
    +
    + +

    Inference backend for torch mobile

    +

    +

    hannah.backends.utils

    +

    +

    symbolic_batch_dim

    +
    def symbolic_batch_dim(model) -> None
     
    +

    make the batch dimension symbolic for onnx models

    Arguments:

      -
    • config - DictConfig:
    • -
    • config - DictConfig:
    • -
    • config - DictConfig:
    • +
    • model - onnx model
    -

    -

    hannah.ssl

    -

    -

    hannah.ssl.hard_labeling

    -

    -

    HardLabeling Objects

    -
    class HardLabeling()
    -
    - -

    -

    training_step

    -
    def training_step(unlabeled_data: torch.Tensor,
    -                  trainer: pl.Trainer,
    -                  pl_module: pl.LightningModule,
    -                  batch_idx: int = -1) -> torch.Tensor
    -
    - -

    Calculate pseudo label loss from unlabeled data.

    -

    -

    get_dropout_layers

    -
    def get_dropout_layers()
    -
    - -

    Returns all model layers of class dropout or dropblock.

    -

    -

    compute_loss

    -
    def compute_loss(inputs, logits, targets, loss_fn=None)
    -
    - -

    Helper function to compute loss, possibly with consistency -regularization by augmentations (FixMatch).

    -

    -

    negative_cre_loss

    -
    def negative_cre_loss(logits, targets)
    -
    - -

    Cross Entropy Loss for negative learning which requires a mutli- -class and multi-label loss function.

    -

    -

    hannah.datasets.fake1d

    -

    -

    hannah.datasets.activity

    -

    -

    Data3D Objects

    -
    class Data3D()
    +

    +

    hannah.backends

    +

    +

    hannah.backends.onnxrt

    +

    +

    OnnxruntimeBackend Objects

    +
    class OnnxruntimeBackend(InferenceBackendBase)
     
    -

    3D-Data

    -

    -

    PAMPAP2_IMUData Objects

    -
    class PAMPAP2_IMUData()
    +

    Inference Backend for tensorflow

    +

    +

    hannah.backends.base

    +

    +

    ProfilingResult Objects

    +
    class ProfilingResult(NamedTuple)
     
    -

    A IMU set defined by temperature (°C) -3D-acceleration data (ms -2 ), scale: ±16g, resolution: 13-bit -3D-acceleration data (ms -2 ), scale: ±6g, resolution: 13-bit -3D-gyroscope data (rad/s) -3D-magnetometer data (μT) -orientation (invalid in this data collection)

    -

    -

    PAMAP2_DataPoint Objects

    -
    class PAMAP2_DataPoint()
    +

    Result of a profiling run

    +

    Attributes:

    +
      +
    • outputs - the outputs of the model on the given input batch
    • +
    • metrics - a dictionary containing the combined metrics obtained from the profiling run
    • +
    • profile - the raw profile in a backend-specific format
    • +
    +

    +

    AbstractBackend Objects

    +
    class AbstractBackend(ABC)
     
    -

    A temporal datapoint in the dataset

    -

    -

    PAMAP2_DataChunk Objects

    -
    class PAMAP2_DataChunk()
    +

    +

    prepare

    +
    @abstractmethod
    +def prepare(module: ClassifierModule)
     
    -

    A DataChunk is a item of the pytorch dataset

    -

    -

    PAMAP2_Dataset Objects

    -
    class PAMAP2_Dataset(AbstractDataset)
    +

    Prepare the model for execution on the target device

    +

    Arguments:

    +
      +
    • module - the classifier module to be exported
    • +
    +

    +

    run

    +
    @abstractmethod
    +def run(*inputs) -> Union[torch.Tensor, Sequence[torch.Tensor]]
     
    -

    Class for the PAMAP2 activity dataset -https://archive.ics.uci.edu/ml/datasets/pamap2+physical+activity+monitoring

    -

    -

    hannah.datasets.DatasetSplit

    -

    -

    hannah.datasets.physio

    -

    -

    AtrialFibrillationDataset Objects

    -
    class AtrialFibrillationDataset(PhysioDataset)
    +

    Run a batch on the target device

    +

    Arguments:

    +
      +
    • +

      inputs - a list of torch tensors representing the inputs to be run on the target device, each tensor represents a whole batched input, so for models taking 1 parameter, the list will contain 1 tensor of shape (batch_size, *input_shape)

      +
    • +
    • +

      Returns - the output(s) of the model as a torch tensor or a Sequence of torch tensors for models producing multiple outputs

      +
    • +
    +

    +

    profile

    +
    @abstractmethod
    +def profile(*inputs: torch.Tensor) -> ProfilingResult
     
    -

    Atrial Fibrillation Database (https://physionet.org/content/afdb/1.0.0/)

    -

    -

    hannah.datasets.eeg_chb

    -

    -

    EEGDataset Objects

    -
    class EEGDataset(AbstractDataset)
    +

    Do a profiling run on the target device

    +

    Arguments:

    +
      +
    • +

      inputs - a list of torch tensors representing the inputs to be run on the target device, each tensor represents a whole batched input, so for models taking 1 parameter, the list will contain 1 tensor of shape (batch_size, *input_shape)

      +
    • +
    • +

      Returns - a ProfilingResult object containing the outputs of the model, the metrics obtained from the profiling run and the raw profile in a backend-specific format

      +
    • +
    +

    +

    available

    +
    @classmethod
    +@abstractmethod
    +def available(cls) -> bool
     
    -

    -

    class_names

    -
    @property
    -def class_names() -> List[str]
    +

    Check if the backend is available

    +

    Returns: True if the backend is available, False otherwise

    +

    +

    export

    +
    def export() -> None
     
    -

    Returns the names of the classes in the classification dataset

    -

    -

    class_counts

    -
    @property
    -def class_counts() -> Optional[Dict[int, int]]
    +

    Export the model through the target backend

    +

    +

    InferenceBackendBase Objects

    +
    class InferenceBackendBase(AbstractBackend)
     
    -

    Returns the number of items in each class of the dataset

    -

    If this is not applicable to a dataset type e.g. ASR, Semantic Segmentation, -it may return None

    -

    -

    size

    -
    def size() -> List[int]
    +

    Base class for backends, it is only here for backwards compatibility reasons, use AbstractBackend instead

    +

    +

    hannah.backends.tensorrt

    +

    +

    TensorRTBackend Objects

    +
    class TensorRTBackend(InferenceBackendBase)
     
    -

    Returns dimension of output without batch dimension

    -

    -

    hannah.datasets

    -

    -

    hannah.datasets.vision

    -

    -

    hannah.datasets.vision.kvasir

    -

    -

    hannah.datasets.vision.ri_capsule

    -

    Rhode island gastroenterology video capsule endoscopy dataset

    -

    https://www.nature.com/articles/s41597-022-01726-3 -https://github.com/acharoen/Rhode-Island-GI-VCE-Technical-Validation

    -

    -

    split_train_set

    -
    def split_train_set(csv_file: pathlib.Path, drop_rate: float)
    +

    +

    output_spec

    +
    def output_spec()
     
    -

    Split train set in two and save as separate csv files.

    -

    -

    hannah.datasets.vision.fake

    -

    -

    hannah.datasets.vision.base

    -

    -

    TorchvisionDatasetBase Objects

    -
    class TorchvisionDatasetBase(VisionDatasetBase)
    +

    Get the specs for the output tensor of the network. Useful to prepare memory allocations.

    +

    Returns:

    +

    Two items, the shape of the output tensor and its (numpy) datatype.

    +

    +

    hannah.conf.optimizer

    +

    +

    SGDConf Objects

    +
    @dataclass
    +class SGDConf()
     
    -

    Wrapper around torchvision classification datasets

    -

    -

    ImageDatasetBase Objects

    -
    class ImageDatasetBase(VisionDatasetBase)
    +

    +

    lr

    +

    _RequiredParameter

    +

    +

    MADGRADConf Objects

    +
    @dataclass
    +class MADGRADConf()
     
    -

    -

    __init__

    -
    def __init__(X, y, classes, bbox=None, transform=None)
    +

    +

    lr

    +

    _RequiredParameter

    +

    +

    hannah.conf

    +

    +

    hannah.conf.scheduler

    +

    +

    OneCycleLRConf Objects

    +
    @dataclass
    +class OneCycleLRConf()
     
    -

    Initialize vision dataset

    -

    Arguments:

    -
      -
    • X List[str] - List of paths to image files
    • -
    • y List[str] - Class id of corresponding image
    • -
    • classes List[str] - List of class names, names are ordered by numeric class id
    • -
    • bbox Dict[str] - Dict with filename as keys, bbox coordinates as numpy arrays
    • -
    • transform Callable[image,image], optional - Optional transformation/augmentation of input images. Defaults to None.
    • -
    -

    -

    hannah.datasets.vision.dresden_capsule

    -

    Rhode island gastroenterology video capsule endoscopy dataset

    -

    https://www.nature.com/articles/s41597-022-01726-3 -https://github.com/acharoen/Rhode-Island-GI-VCE-Technical-Validation

    -

    -

    hannah.datasets.vision.kvasir_unlabeled

    -

    -

    KvasirCapsuleUnlabeled Objects

    -
    class KvasirCapsuleUnlabeled(AbstractDataset)
    +

    Config for one cycle lr total steps are configured from module

    +

    +

    hannah.conf.nas

    +

    +

    hannah.datasets.physio

    +

    +

    AtrialFibrillationDataset Objects

    +
    class AtrialFibrillationDataset(PhysioDataset)
     
    -

    Dataset representing unlabelled videos

    -

    -

    sequential

    -
    @property
    -def sequential() -> bool
    +

    Atrial Fibrillation Database (https://physionet.org/content/afdb/1.0.0/)

    +

    +

    hannah.datasets.emergency

    +

    +

    EmergencySirenDataset Objects

    +
    class EmergencySirenDataset(AbstractDataset)
     
    -

    Returns true if this dataset should only be iterated sequentially

    -

    -

    max_workers

    -
    @property
    -def max_workers() -> int
    +

    Emergency Dataset

    +

    +

    hannah.datasets.utils

    +

    +

    hannah.datasets.utils.cache

    +

    +

    hannah.datasets.utils.md5

    +

    +

    hannah.datasets.NoiseDataset

    +

    +

    hannah.datasets.Kitti

    +

    +

    Kitti Objects

    +
    class Kitti(AbstractDataset)
     
    -

    Returns the maximum number of workers useable for this dataset

    -

    -

    hannah.datasets.vision.utils

    -

    -

    hannah.datasets.vision.utils.naneye

    -

    -

    read_naneye

    -
    def read_naneye(data_file: Union[str, Path])
    +

    +

    splits

    +
    @classmethod
    +def splits(cls, config)
     
    -

    Read a naneye raw aimage and decode bayer pattern

    -

    Arguments:

    -
      -
    • data_file Union[str, Path] - path to the datafile
    • -
    -

    Returns:

    -
      -
    • np.ndarray - uint8 array of decoded image data
    • -
    -

    -

    hannah.datasets.vision.cifar

    +

    Splits the dataset in training, devlopment and test set and returns +the three sets as List

    +

    +

    hannah.datasets.directional

    +

    +

    DirectionalDataset Objects

    +
    class DirectionalDataset(AbstractDataset)
    +
    + +

    Directional Dataset

    +

    +

    hannah.datasets

    hannah.datasets.base

    @@ -3525,7 +3761,7 @@

    AbstractDataset Objects

    -

    prepare

    +

    prepare

    @classmethod
     @abstractmethod
     def prepare(cls, config: Dict[str, Any]) -> None
    @@ -3540,7 +3776,7 @@ 

    prepare

  • config Dict[Any] - The dataset configuration
  • -

    splits

    +

    splits

    @classmethod
     @abstractmethod
     def splits(
    @@ -3554,7 +3790,7 @@ 

    splits

  • config [type] - [description]
  • -

    class_names

    +

    class_names

    @property
     @abstractmethod
     def class_names() -> List[str]
    @@ -3562,7 +3798,7 @@ 

    class_names

    Returns the names of the classes in the classification dataset

    -

    class_counts

    +

    class_counts

    @property
     @abstractmethod
     def class_counts() -> Optional[Dict[int, int]]
    @@ -3591,7 +3827,7 @@ 

    __len__

    Returns number of samples in dataset

    -

    size

    +

    size

    def size() -> List[int]
     
    @@ -3625,14 +3861,14 @@

    weights

    Class weights for weighted sampling

    -

    sequential

    +

    sequential

    @property
     def sequential() -> bool
     

    Returns true if this dataset should only be iterated sequentially

    -

    max_workers

    +

    max_workers

    @property
     def max_workers() -> int
     
    @@ -3661,165 +3897,33 @@

    SpeechCommandsDataset Objects

    dataset

    SpeechHotwordDataset Objects

    -
    class SpeechHotwordDataset(SpeechDataset)
    -
    - -

    Dataset Class for Hotword dataset e.g. Hey Snips!

    -

    -

    splits

    -
    @classmethod
    -def splits(cls, config)
    -
    - -

    Splits the dataset in training, devlopment and test set and returns -the three sets as List

    -

    -

    VadDataset Objects

    -
    class VadDataset(SpeechDataset)
    -
    - -

    -

    splits

    -
    @classmethod
    -def splits(cls, config)
    -
    - -

    Splits the dataset in training, devlopment and test set and returns -the three sets as List

    -

    -

    hannah.datasets.Downsample

    -

    -

    hannah.datasets.directional

    -

    -

    DirectionalDataset Objects

    -
    class DirectionalDataset(AbstractDataset)
    -
    - -

    Directional Dataset

    -

    -

    hannah.datasets.emergency

    -

    -

    EmergencySirenDataset Objects

    -
    class EmergencySirenDataset(AbstractDataset)
    -
    - -

    Emergency Dataset

    -

    -

    hannah.datasets.collate

    -

    -

    vision_collate_fn

    -
    def vision_collate_fn(batch)
    -
    - -

    Function that takes in a batch of data and puts the elements within the batch -into a tensor with an additional outer dimension - batch size. The exact output type can be -a :class:torch.Tensor, a Sequence of :class:torch.Tensor, a -Collection of :class:torch.Tensor, or left unchanged, depending on the input type. -This is used as the default function for collation for vision tasks -batch_size or batch_sampler is defined in :class:~torch.utils.data.DataLoader.

    -

    Here is the general input type (based on the type of the element within the batch) to output type mapping:

    -
      -
    • :class:torch.Tensor -> :class:torch.Tensor (with an added outer dimension batch size)
    • -
    • NumPy Arrays -> :class:torch.Tensor
    • -
    • float -> :class:torch.Tensor
    • -
    • int -> :class:torch.Tensor
    • -
    • str -> str (unchanged)
    • -
    • bytes -> bytes (unchanged)
    • -
    • Mapping[K, V_i] -> Mapping[K, vision_collate([V_1, V_2, ...])]
    • -
    • NamedTuple[V1_i, V2_i, ...] -> NamedTuple[vision_collate([V1_1, V1_2, ...]), -vision_collate([V2_1, V2_2, ...]), ...]
    • -
    • Sequence[V1_i, V2_i, ...] -> Sequence[vision_collate([V1_1, V1_2, ...]), -vision_collate([V2_1, V2_2, ...]), ...]
    • -
    -

    Arguments:

    -
      -
    • batch - a single batch to be collated
    • -
    -

    Examples:

    -
    -
    -
    -

    Example with a batch of ints:

    -

    vision_collate([0, 1, 2, 3]) - tensor([0, 1, 2, 3])

    -

    Example with a batch of strs:

    -

    vision_collate(['a', 'b', 'c']) - ['a', 'b', 'c']

    -

    Example with Map inside the batch:

    -

    vision_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}]) -- {'A' - tensor([ 0, 100]), 'B': tensor([ 1, 100])}

    -

    Example with NamedTuple inside the batch:

    -

    Point = namedtuple('Point', ['x', 'y']) -vision_collate([Point(0, 0), Point(1, 1)]) - Point(x=tensor([0, 1]), y=tensor([0, 1]))

    -

    Example with Tuple inside the batch:

    -

    vision_collate([(0, 1), (2, 3)]) - [tensor([0, 2]), tensor([1, 3])]

    -

    Example with List inside the batch:

    -

    vision_collate([[0, 1], [2, 3]]) - [tensor([0, 2]), tensor([1, 3])]

    -
    -
    -
    -

    -

    ctc_collate_fn

    -
    def ctc_collate_fn(data)
    -
    - -

    Creates mini-batch tensors from the list of tuples (src_seq, trg_seq). -We should build a custom collate_fn rather than using default collate_fn, -because merging sequences (including padding) is not supported in default. -Sequences are padded to the maximum length of mini-batch sequences (dynamic padding).

    -

    Arguments:

    -
      -
    • data - list of tuple (src_seq, src_length, trg_seq, trg_length).
    • -
    • src_seq: torch tensor of shape (x,?); variable length.
    • -
    • src length: torch tenso of shape 1x1
    • -
    • trg_seq: torch tensor of shape (?); variable length.
    • -
    • trg_length: torch_tensor of shape (1x1)
    • -
    • Returns - tuple of four torch tensors
    • -
    • src_seqs - torch tensor of shape (batch_size, x, padded_length).
    • -
    • src_lengths - torch_tensor of shape (batch_size); valid length for each padded source sequence.
    • -
    • trg_seqs - torch tensor of shape (batch_size, x, padded_length).
    • -
    • trg_lengths - torch tensor of shape (batch_size); valid length for each padded target sequence.
    • -
    -

    -

    hannah.datasets.NoiseDataset

    -

    -

    hannah.datasets.eeg_tusz

    -

    -

    EEGDataset Objects

    -
    class EEGDataset(AbstractDataset)
    -
    - -

    -

    class_names

    -
    @property
    -def class_names() -> List[str]
    +
    class SpeechHotwordDataset(SpeechDataset)
     
    -

    Returns the names of the classes in the classification dataset

    -

    -

    class_counts

    -
    @property
    -def class_counts() -> Optional[Dict[int, int]]
    +

    Dataset Class for Hotword dataset e.g. Hey Snips!

    +

    +

    splits

    +
    @classmethod
    +def splits(cls, config)
     
    -

    Returns the number of items in each class of the dataset

    -

    If this is not applicable to a dataset type e.g. ASR, Semantic Segmentation, -it may return None

    -

    -

    size

    -
    def size() -> List[int]
    +

    Splits the dataset in training, devlopment and test set and returns +the three sets as List

    +

    +

    VadDataset Objects

    +
    class VadDataset(SpeechDataset)
     
    -

    Returns dimension of output without batch dimension

    -

    -

    hannah.datasets.utils

    -

    -

    hannah.datasets.utils.cache

    -

    -

    hannah.datasets.utils.md5

    +

    +

    splits

    +
    @classmethod
    +def splits(cls, config)
    +
    + +

    Splits the dataset in training, devlopment and test set and returns +the three sets as List

    +

    +

    hannah.datasets.fake1d

    hannah.datasets.pickle_set

    @@ -3835,26 +3939,26 @@

    loader

    Return the data loader for the dataset

    -

    prepare

    +

    prepare

    def prepare(config)
     

    Prepare the dataset

    -

    splits

    +

    splits

    def splits(config)
     

    Return the dataset splits

    -

    class_names

    +

    class_names

    @property
     def class_names()
     

    Return the class names

    -

    class_counts

    +

    class_counts

    @property
     def class_counts()
     
    @@ -3873,7 +3977,7 @@

    __len__

    Return the length of the dataset

    -

    max_workers

    +

    max_workers

    @property
     def max_workers()
     
    @@ -3887,14 +3991,14 @@

    EEGRTDataset Objects

    -

    class_names

    +

    class_names

    @property
     def class_names() -> List[str]
     

    Returns the names of the classes in the classification dataset

    -

    class_counts

    +

    class_counts

    @property
     def class_counts() -> Optional[Dict[int, int]]
     
    @@ -3903,749 +4007,549 @@

    class_counts

    If this is not applicable to a dataset type e.g. ASR, Semantic Segmentation, it may return None

    -

    size

    +

    size

    def size() -> List[int]
     

    Returns dimension of output without batch dimension

    -

    -

    hannah.datasets.Kitti

    -

    -

    Kitti Objects

    -
    class Kitti(AbstractDataset)
    -
    - -

    -

    splits

    -
    @classmethod
    -def splits(cls, config)
    +

    +

    hannah.datasets.eeg_tusz

    +

    +

    EEGDataset Objects

    +
    class EEGDataset(AbstractDataset)
     
    -

    Splits the dataset in training, devlopment and test set and returns -the three sets as List

    -

    -

    hannah.backends

    -

    -

    hannah.backends.base

    -

    -

    ProfilingResult Objects

    -
    class ProfilingResult(NamedTuple)
    +

    +

    class_names

    +
    @property
    +def class_names() -> List[str]
     
    -

    Result of a profiling run

    -

    Attributes:

    -
      -
    • outputs - the outputs of the model on the given input batch
    • -
    • metrics - a dictionary containing the combined metrics obtained from the profiling run
    • -
    • profile - the raw profile in a backend-specific format
    • -
    -

    -

    AbstractBackend Objects

    -
    class AbstractBackend(ABC)
    +

    Returns the names of the classes in the classification dataset

    +

    +

    class_counts

    +
    @property
    +def class_counts() -> Optional[Dict[int, int]]
     
    -

    -

    prepare

    -
    @abstractmethod
    -def prepare(module: ClassifierModule)
    +

    Returns the number of items in each class of the dataset

    +

    If this is not applicable to a dataset type e.g. ASR, Semantic Segmentation, +it may return None

    +

    +

    size

    +
    def size() -> List[int]
     
    -

    Prepare the model for execution on the target device

    -

    Arguments:

    -
      -
    • module - the classifier module to be exported
    • -
    -

    -

    run

    -
    @abstractmethod
    -def run(*inputs) -> Union[torch.Tensor, Sequence[torch.Tensor]]
    +

    Returns dimension of output without batch dimension

    +

    +

    hannah.datasets.activity

    +

    +

    Data3D Objects

    +
    class Data3D()
     
    -

    Run a batch on the target device

    -

    Arguments:

    -
      -
    • -

      inputs - a list of torch tensors representing the inputs to be run on the target device, each tensor represents a whole batched input, so for models taking 1 parameter, the list will contain 1 tensor of shape (batch_size, *input_shape)

      -
    • -
    • -

      Returns - the output(s) of the model as a torch tensor or a Sequence of torch tensors for models producing multiple outputs

      -
    • -
    -

    -

    profile

    -
    @abstractmethod
    -def profile(*inputs: torch.Tensor) -> ProfilingResult
    +

    3D-Data

    +

    +

    PAMPAP2_IMUData Objects

    +
    class PAMPAP2_IMUData()
     
    -

    Do a profiling run on the target device

    -

    Arguments:

    -
      -
    • -

      inputs - a list of torch tensors representing the inputs to be run on the target device, each tensor represents a whole batched input, so for models taking 1 parameter, the list will contain 1 tensor of shape (batch_size, *input_shape)

      -
    • -
    • -

      Returns - a ProfilingResult object containing the outputs of the model, the metrics obtained from the profiling run and the raw profile in a backend-specific format

      -
    • -
    -

    -

    available

    -
    @classmethod
    -@abstractmethod
    -def available(cls) -> bool
    +

    A IMU set defined by temperature (°C) +3D-acceleration data (ms -2 ), scale: ±16g, resolution: 13-bit +3D-acceleration data (ms -2 ), scale: ±6g, resolution: 13-bit +3D-gyroscope data (rad/s) +3D-magnetometer data (μT) +orientation (invalid in this data collection)

    +

    +

    PAMAP2_DataPoint Objects

    +
    class PAMAP2_DataPoint()
     
    -

    Check if the backend is available

    -

    Returns: True if the backend is available, False otherwise

    -

    -

    export

    -
    def export() -> None
    +

    A temporal datapoint in the dataset

    +

    +

    PAMAP2_DataChunk Objects

    +
    class PAMAP2_DataChunk()
     
    -

    Export the model through the target backend

    -

    -

    InferenceBackendBase Objects

    -
    class InferenceBackendBase(AbstractBackend)
    +

    A DataChunk is a item of the pytorch dataset

    +

    +

    PAMAP2_Dataset Objects

    +
    class PAMAP2_Dataset(AbstractDataset)
     
    -

    Base class for backends, it is only here for backwards compatibility reasons, use AbstractBackend instead

    -

    -

    hannah.backends.onnxrt

    -

    -

    OnnxruntimeBackend Objects

    -
    class OnnxruntimeBackend(InferenceBackendBase)
    +

    Class for the PAMAP2 activity dataset +https://archive.ics.uci.edu/ml/datasets/pamap2+physical+activity+monitoring

    +

    +

    hannah.datasets.collate

    +

    +

    vision_collate_fn

    +
    def vision_collate_fn(batch)
     
    -

    Inference Backend for tensorflow

    -

    -

    hannah.backends.utils

    -

    -

    symbolic_batch_dim

    -
    def symbolic_batch_dim(model) -> None
    +

    Function that takes in a batch of data and puts the elements within the batch +into a tensor with an additional outer dimension - batch size. The exact output type can be +a :class:torch.Tensor, a Sequence of :class:torch.Tensor, a +Collection of :class:torch.Tensor, or left unchanged, depending on the input type. +This is used as the default function for collation for vision tasks +batch_size or batch_sampler is defined in :class:~torch.utils.data.DataLoader.

    +

    Here is the general input type (based on the type of the element within the batch) to output type mapping:

    +
      +
    • :class:torch.Tensor -> :class:torch.Tensor (with an added outer dimension batch size)
    • +
    • NumPy Arrays -> :class:torch.Tensor
    • +
    • float -> :class:torch.Tensor
    • +
    • int -> :class:torch.Tensor
    • +
    • str -> str (unchanged)
    • +
    • bytes -> bytes (unchanged)
    • +
    • Mapping[K, V_i] -> Mapping[K, vision_collate([V_1, V_2, ...])]
    • +
    • NamedTuple[V1_i, V2_i, ...] -> NamedTuple[vision_collate([V1_1, V1_2, ...]), +vision_collate([V2_1, V2_2, ...]), ...]
    • +
    • Sequence[V1_i, V2_i, ...] -> Sequence[vision_collate([V1_1, V1_2, ...]), +vision_collate([V2_1, V2_2, ...]), ...]
    • +
    +

    Arguments:

    +
      +
    • batch - a single batch to be collated
    • +
    +

    Examples:

    +
    +
    +
    +

    Example with a batch of ints:

    +

    vision_collate([0, 1, 2, 3]) + tensor([0, 1, 2, 3])

    +

    Example with a batch of strs:

    +

    vision_collate(['a', 'b', 'c']) + ['a', 'b', 'c']

    +

    Example with Map inside the batch:

    +

    vision_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}]) +- {'A' - tensor([ 0, 100]), 'B': tensor([ 1, 100])}

    +

    Example with NamedTuple inside the batch:

    +

    Point = namedtuple('Point', ['x', 'y']) +vision_collate([Point(0, 0), Point(1, 1)]) + Point(x=tensor([0, 1]), y=tensor([0, 1]))

    +

    Example with Tuple inside the batch:

    +

    vision_collate([(0, 1), (2, 3)]) + [tensor([0, 2]), tensor([1, 3])]

    +

    Example with List inside the batch:

    +

    vision_collate([[0, 1], [2, 3]]) + [tensor([0, 2]), tensor([1, 3])]

    +
    +
    +
    +

    +

    ctc_collate_fn

    +
    def ctc_collate_fn(data)
     
    -

    make the batch dimension symbolic for onnx models

    +

    Creates mini-batch tensors from the list of tuples (src_seq, trg_seq). +We should build a custom collate_fn rather than using default collate_fn, +because merging sequences (including padding) is not supported in default. +Sequences are padded to the maximum length of mini-batch sequences (dynamic padding).

    Arguments:

      -
    • model - onnx model
    • +
    • data - list of tuple (src_seq, src_length, trg_seq, trg_length).
    • +
    • src_seq: torch tensor of shape (x,?); variable length.
    • +
    • src length: torch tenso of shape 1x1
    • +
    • trg_seq: torch tensor of shape (?); variable length.
    • +
    • trg_length: torch_tensor of shape (1x1)
    • +
    • Returns - tuple of four torch tensors
    • +
    • src_seqs - torch tensor of shape (batch_size, x, padded_length).
    • +
    • src_lengths - torch_tensor of shape (batch_size); valid length for each padded source sequence.
    • +
    • trg_seqs - torch tensor of shape (batch_size, x, padded_length).
    • +
    • trg_lengths - torch tensor of shape (batch_size); valid length for each padded target sequence.
    -

    -

    hannah.backends.tensorrt

    -

    -

    TensorRTBackend Objects

    -
    class TensorRTBackend(InferenceBackendBase)
    -
    - -

    -

    output_spec

    -
    def output_spec()
    +

    +

    hannah.datasets.eeg_chb

    +

    +

    EEGDataset Objects

    +
    class EEGDataset(AbstractDataset)
     
    -

    Get the specs for the output tensor of the network. Useful to prepare memory allocations.

    -

    Returns:

    -

    Two items, the shape of the output tensor and its (numpy) datatype.

    -

    -

    hannah.backends.torch_mobile

    -

    -

    TorchMobileBackend Objects

    -
    class TorchMobileBackend(InferenceBackendBase)
    +

    +

    class_names

    +
    @property
    +def class_names() -> List[str]
     
    -

    Inference backend for torch mobile

    -

    -

    hannah.features

    -

    -

    MFCC Objects

    -
    class MFCC(torchaudio.transforms.MFCC)
    +

    Returns the names of the classes in the classification dataset

    +

    +

    class_counts

    +
    @property
    +def class_counts() -> Optional[Dict[int, int]]
     
    -

    A simple wrapper around torchaudio mfcc, but melkwargs are given as direct named arguments instead of a dictionary

    -

    -

    SincConv Objects

    -
    class SincConv(nn.Module)
    +

    Returns the number of items in each class of the dataset

    +

    If this is not applicable to a dataset type e.g. ASR, Semantic Segmentation, +it may return None

    +

    +

    size

    +
    def size() -> List[int]
     
    -

    Sinc convolution:

    -

    Arguments:

    -
    -
      -
    • in_channels - No. of input channels(must be 1)
    • -
    • out_channels - No. of filters(40)
    • -
    • sample_rate - sampling rate, default set at 32000
    • -
    • kernel_size - Filter length(101)
    • -
    -

    -

    LogSpectrogram Objects

    -
    class LogSpectrogram(torch.nn.Module)
    +

    Returns dimension of output without batch dimension

    +

    +

    hannah.datasets.vision.ri_capsule

    +

    Rhode island gastroenterology video capsule endoscopy dataset

    +

    https://www.nature.com/articles/s41597-022-01726-3 +https://github.com/acharoen/Rhode-Island-GI-VCE-Technical-Validation

    +

    +

    split_train_set

    +
    def split_train_set(csv_file: pathlib.Path, drop_rate: float)
     
    -

    Create a spectrogram from a audio signal.

    -

    Arguments:

    -
      -
    • n_fft int, optional - Size of FFT, creates n_fft // 2 + 1 bins. (Default: 400)
    • -
    • win_length int or None, optional - Window size. (Default: n_fft)
    • -
    • hop_length int or None, optional - Length of hop between STFT windows. (Default: win_length // 2)
    • -
    • pad int, optional - Two sided padding of signal. (Default: 0)
    • -
    • window_fn Callable[..., Tensor], optional - A function to create a window tensor - that is applied/multiplied to each frame/window. (Default: torch.hann_window)
    • -
    • power float or None, optional - Exponent for the magnitude spectrogram, - (must be > 0) e.g., 1 for energy, 2 for power, etc. - If None, then the complex spectrum is returned instead. (Default: 2)
    • -
    • normalized bool, optional - Whether to normalize by magnitude after stft. (Default: False)
    • -
    • wkwargs dict or None, optional - Arguments for window function. (Default: None)
    • -
    • center bool, optional - whether to pad :attr:waveform on both sides so - that the :math:t-th frame is centered at time :math:t \times \text{hop\_length}.
    • -
    • Default - True
    • -
    • pad_mode string, optional - controls the padding method used when - :attr:center is True. Default: "reflect"
    • -
    • onesided bool, optional - controls whether to return half of results to - avoid redundancy Default: True
    • -
    -

    -

    forward

    -
    def forward(waveform: torch.Tensor) -> torch.Tensor
    +

    Split train set in two and save as separate csv files.

    +

    +

    hannah.datasets.vision.cifar

    +

    +

    hannah.datasets.vision.utils.naneye

    +

    +

    read_naneye

    +
    def read_naneye(data_file: Union[str, Path])
     
    +

    Read a naneye raw aimage and decode bayer pattern

    Arguments:

      -
    • waveform Tensor - Tensor of audio of dimension (..., time).
    • +
    • data_file Union[str, Path] - path to the datafile

    Returns:

      -
    • Tensor - Dimension (..., freq, time), where freq is - n_fft // 2 + 1 where n_fft is the number of - Fourier bins, and time is the number of window hops (n_frame).
    • +
    • np.ndarray - uint8 array of decoded image data
    -

    -

    hannah.logo

    -

    -

    hannah.visualization

    -

    -

    hannah.nas.spaces.darts.darts_space

    -

    -

    hannah.nas.spaces.mobilenet.mobilenet

    -

    -

    hannah.nas

    -

    -

    hannah.nas.performance_prediction.simple

    -

    -

    MACPredictor Objects

    -
    class MACPredictor()
    -
    - -

    A predictor class that instantiates the model and calculates abstract metrics

    -

    -

    GCNPredictor Objects

    -
    class GCNPredictor()
    +

    +

    hannah.datasets.vision.utils

    +

    +

    hannah.datasets.vision.fake

    +

    +

    hannah.datasets.vision

    +

    +

    hannah.datasets.vision.base

    +

    +

    TorchvisionDatasetBase Objects

    +
    class TorchvisionDatasetBase(VisionDatasetBase)
     
    -

    A predictor class that instantiates the model and uses the backends predict function to predict performance metrics

    -

    -

    hannah.nas.performance_prediction

    -

    -

    hannah.nas.performance_prediction.features.dataset

    -

    -

    hannah.nas.performance_prediction.features.graph_conversion

    -

    -

    hannah.nas.performance_prediction.protocol

    -

    -

    Predictor Objects

    -
    @runtime_checkable
    -class Predictor(Protocol)
    +

    Wrapper around torchvision classification datasets

    +

    +

    ImageDatasetBase Objects

    +
    class ImageDatasetBase(VisionDatasetBase)
     
    -

    -

    predict

    -
    def predict(model: ClassifierModule,
    -            input: Optional[InputShape] = None) -> Mapping[str, float]
    +

    +

    __init__

    +
    def __init__(X, y, classes, bbox=None, transform=None)
     
    -

    Pedicts performance metrisc of a model.

    -

    Performance metrics are returned as a dictionary with the metric name as key and the metric value as floating point value.

    +

    Initialize vision dataset

    Arguments:

      -
    • model ClassifierModule - The model to predict the performance of.
    • -
    • input __type_, optional_ - Input shape of input . Defaults to None.
    • +
    • X List[str] - List of paths to image files
    • +
    • y List[str] - Class id of corresponding image
    • +
    • classes List[str] - List of class names, names are ordered by numeric class id
    • +
    • bbox Dict[str] - Dict with filename as keys, bbox coordinates as numpy arrays
    • +
    • transform Callable[image,image], optional - Optional transformation/augmentation of input images. Defaults to None.
    -

    -

    FitablePredictor Objects

    -
    class FitablePredictor(Predictor)
    +

    +

    hannah.datasets.vision.kvasir

    +

    +

    hannah.datasets.vision.dresden_capsule

    +

    Rhode island gastroenterology video capsule endoscopy dataset

    +

    https://www.nature.com/articles/s41597-022-01726-3 +https://github.com/acharoen/Rhode-Island-GI-VCE-Technical-Validation

    +

    +

    hannah.datasets.vision.kvasir_unlabeled

    +

    +

    KvasirCapsuleUnlabeled Objects

    +
    class KvasirCapsuleUnlabeled(AbstractDataset)
     
    -

    -

    load

    -
    def load(result_folder: str)
    +

    Dataset representing unlabelled videos

    +

    +

    sequential

    +
    @property
    +def sequential() -> bool
     
    -

    Load predefined model from a folder.

    -

    Arguments:

    -
      -
    • result_folder str - Path to the folder containing the model or training data to recreate the model.
    • -
    -

    -

    update

    -
    def update(new_data, input=None)
    +

    Returns true if this dataset should only be iterated sequentially

    +

    +

    max_workers

    +
    @property
    +def max_workers() -> int
     
    -

    Update the model with new data.

    -

    -

    hannah.nas.performance_prediction.gcn.predictor

    -

    -

    Predictor Objects

    -
    class Predictor()
    +

    Returns the maximum number of workers useable for this dataset

    +

    +

    hannah.datasets.DatasetSplit

    +

    +

    hannah.datasets.Downsample

    +

    +

    hannah.callbacks.backbone_finetuning

    +

    +

    hannah.callbacks.dump_layers

    +

    +

    TestDumperCallback Objects

    +
    class TestDumperCallback(Callback)
     
    -

    -

    __init__

    -
    def __init__(fea_name="features") -> None
    +

    +

    on_test_start

    +
    def on_test_start(pl_trainer, pl_model)
    +
    + +

    Arguments:

    +

    pl_trainer: + pl_model:

    +

    +

    hannah.callbacks.svd_compress

    +

    +

    SVD Objects

    +
    class SVD(Callback)
     
    -

    Parent method for different predictor classes.

    -

    Parameters

    -

    fea_name : str, optional - internal name for features in the graph, as in graph.ndata[fea_name], by default 'features'

    -

    -

    train

    -
    def train(dataloader,
    -          learning_rate=1e-3,
    -          num_epochs=200,
    -          validation_dataloader=None,
    -          verbose=1)
    +

    +

    on_train_epoch_start

    +
    def on_train_epoch_start(trainer, pl_module)
     
    -

    Train GCN model

    -

    Parameters

    -

    dataloader : GraphDataLoader - training data -learning_rate : [type], optional - by default 1e-3 -num_epochs : int, optional - by default 200 -validation_dataloader : [type], optional - if given, use this data to print validation loss, by default None -verbose : int - if validation_dataloader is given, print validation MSE every epoch, by default 1

    -

    -

    predict

    -
    def predict(graph)
    +

    Arguments:

    +

    trainer: + pl_module:

    +

    +

    hannah.callbacks.prediction_logger

    +

    +

    hannah.callbacks

    +

    +

    hannah.callbacks.fine_tuning

    +

    +

    hannah.callbacks.backends

    +

    +

    hannah.callbacks.pruning

    +

    +

    PruningAmountScheduler Objects

    +
    class PruningAmountScheduler()
     
    -

    predict cost of graph

    -

    Parameters

    -

    graph : dgl.Graph

    -

    Returns

    -

    torch.Tensor - predicted cost of given graph. Retrieve float value with .item()

    -

    -

    GCNPredictor Objects

    -
    class GCNPredictor(Predictor)
    +

    +

    FilteredPruning Objects

    +
    class FilteredPruning(ModelPruning)
     
    -

    -

    __init__

    -
    def __init__(input_feature_size,
    -             hidden_units=[128],
    -             readout="mean",
    -             fea_name="features") -> None
    +

    +

    setup

    +
    def setup(trainer: Trainer, pl_module: LightningModule, stage: str)
     
    -

    G(raph)CN based network latency/cost predictor. End-to-end from graph to score.

    -

    Parameters

    -

    input_feature_size : [type] - length of feature vector of a graph node (graph G with n nodes, each with features of length m, i.e. feature matrix F = n x m) -hidden_units : int, list, optional - size of hidden layer (layers if list) , by default 128 -readout : str, optional - readout function that is used to aggregate node features, by default 'mean' -fea_name : str, optional - internal name for features in the graph, as in graph.ndata[fea_name], by default 'features'

    -

    -

    train_and_fit

    -
    def train_and_fit(dataloader,
    -                  learning_rate=1e-3,
    -                  num_epochs=200,
    -                  validation_dataloader=None,
    -                  verbose=0)
    +

    Arguments:

    +

    trainer: + pl_module:

    +

    +

    filter_parameters_to_prune

    +
    def filter_parameters_to_prune(parameters_to_prune=None)
     
    -

    Train GCN model

    -

    Parameters

    -

    dataloader : GraphDataLoader - training data -learning_rate : [type], optional - by default 1e-3 -num_epochs : int, optional - by default 200 -validation_dataloader : [type], optional - if given, use this data to print validation loss, by default None -verbose : int - if validation_dataloader is given, print validation MSE every epoch, by default 1

    -

    -

    predict

    -
    def predict(graph)
    +

    Filter out unprunable parameters

    +

    Arguments:

    +
      +
    • parameters_to_prune - (Default value = None)
    • +
    +

    +

    on_test_end

    +
    def on_test_end(trainer, pl_module) -> None
     
    -

    predict cost of graph

    -

    Parameters

    -

    graph : dgl.Graph

    -

    Returns

    -

    torch.Tensor - predicted cost of given graph. Retrieve float value with .item()

    -

    -

    GaussianProcessPredictor Objects

    -
    class GaussianProcessPredictor(Predictor)
    +

    Arguments:

    +

    trainer: + pl_module:

    +

    +

    hannah.callbacks.summaries

    +

    +

    walk_model

    +
    def walk_model(model, dummy_input)
     
    -

    -

    __init__

    -
    def __init__(input_feature_size,
    -             hidden_units=128,
    -             embedding_size=10,
    -             readout="mean",
    -             fea_name="features",
    -             kernel="default",
    -             alpha=1e-10) -> None
    +

    Adapted from IntelLabs Distiller

    +

    Arguments:

    +

    model: + dummy_input:

    +

    +

    MacSummaryCallback Objects

    +
    class MacSummaryCallback(Callback)
     
    -

    Predictor that generates a graph embedding that is used as input for a gaussian process predictor.

    -

    Parameters

    -

    input_feature_size : [type] - length of feature vector of a graph node (graph G with n nodes, each with features of length m, i.e. feature matrix F = n x m) -hidden_units : int, list, optional - size of hidden layer (layers if list) , by default 128 -embedding_size: int, optional - size of output embedding -readout : str, optional - readout function that is used to aggregate node features, by default 'mean' -fea_name : str, optional - internal name for features in the graph, as in graph.ndata[fea_name], by default 'features' -kernel : str, sklearn.gaussian_process.kernels.Kernel, optional - The gaussian process kernel to use. - input shoudl be either "default", or a sklearn Kernel() object - by default RBF() + DotProduct() + WhiteKernel()

    -

    -

    train_and_fit

    -
    def train_and_fit(dataloader,
    -                  learning_rate=1e-3,
    -                  num_epochs=200,
    -                  validation_dataloader=None,
    -                  verbose=1)
    +

    +

    predict

    +
    def predict(pl_module, input=input)
     
    -

    Train GCN model, generate embeddings for training data and fit the predictor with embeddings.

    -

    Parameters

    -

    dataloader : GraphDataLoader - training data -learning_rate : [type], optional - by default 1e-3 -num_epochs : int, optional - by default 200 -validation_dataloader : [type], optional - if given, use this data to print validation loss, by default None -verbose : int - if validation_dataloader is given, print validation MSE every epoch,by default 1

    -

    Returns

    -

    float - score of predictor on TRAINING data, see sklearn doc of chosen predictor for more info

    -

    -

    predict

    -
    def predict(X, return_std=True)
    +

    Arguments:

    +

    pl_module:

    +

    +

    on_train_start

    +
    @rank_zero_only
    +def on_train_start(trainer, pl_module)
     
    -

    Predict cost/latency of graphs.

    -

    Parameters

    -

    X : dgl.DGLGraph, list[DGLGraph], dgl.dataloading.GraphDataLoader - Input graph(s) -return_std : bool, optional - if true, return standard dev. else just mean prediction, by default True

    -

    Returns

    -

    array (,array) - prediction(s) , (if return_std: standard deviation(s))

    -

    -

    XGBPredictor Objects

    -
    class XGBPredictor(Predictor)
    +

    Arguments:

    +

    trainer: + pl_module:

    +

    +

    on_test_end

    +
    @rank_zero_only
    +def on_test_end(trainer, pl_module)
     
    -

    -

    __init__

    -
    def __init__(input_feature_size,
    -             hidden_units=128,
    -             embedding_size=10,
    -             readout="mean",
    -             fea_name="features",
    -             xgb_param="default") -> None
    +

    Arguments:

    +

    trainer: + pl_module:

    +

    +

    on_validation_epoch_end

    +
    @rank_zero_only
    +def on_validation_epoch_end(trainer, pl_module)
     
    -

    Predictor that generates a graph embedding that is used as input for a xgb based predictor.

    -

    Parameters

    -

    input_feature_size : [type] - length of feature vector of a graph node (graph G with n nodes, each with features of length m, i.e. feature matrix F = n x m) -hidden_units : int, list, optional - size of hidden layer (layers if list) , by default 128 -embedding_size: int, optional - size of output embedding -readout : str, optional - readout function that is used to aggregate node features, by default 'mean' -fea_name : str, optional - internal name for features in the graph, as in graph.ndata[fea_name], by default 'features' -xgb_param : str, dict, optional - The xgb_parameter to use. - See https://xgboost.readthedocs.io/en/latest/parameter.html

    -

    -

    train_and_fit

    -
    def train_and_fit(dataloader,
    -                  learning_rate=1e-3,
    -                  num_epochs=200,
    -                  num_round=8000,
    -                  validation_dataloader=None,
    -                  verbose=1)
    +

    Arguments:

    +

    trainer: + pl_module:

    +

    +

    estimate

    +
    def estimate(pl_module, input=None)
     
    -

    Train GCN model, generate embeddings for training data and fit the predictor with embeddings.

    -

    Parameters

    -

    dataloader : GraphDataLoader - training data -learning_rate : [type], optional - by default 1e-3 -num_epochs : int, optional - Training epochs for the GCN embedding network, by default 200 -num_round : int, optional - training rounds for xgb booster, by default 800 -validation_dataloader : [type], optional - if given, use this data to print validation loss, by default None -verbose : int - if validation_dataloader is given, print validation MSE every epoch,by default 1

    -

    -

    predict

    -
    def predict(X)
    +

    Generate Summary Metrics for neural network

    +

    Arguments:

    +
      +
    • pl_module(pytorch_lightning.LightningModule) - pytorch lightning module to summarize
    • +
    +

    Returns:

    +

    dict[str, float]: Dict of MetricName => Metric Value

    +

    +

    prod

    +
    def prod(seq)
     
    -

    Predict cost/latency of graphs.

    -

    Parameters

    -

    X : dgl.DGLGraph, list[DGLGraph], dgl.dataloading.GraphDataLoader - Input graph(s) -Returns

    -
    -

    array (,array) - prediction(s) , (if return_std: standard deviation(s))

    -

    -

    prepare_dataloader

    -
    def prepare_dataloader(dataset,
    -                       batch_size=50,
    -                       train_test_split=1,
    -                       subset=0,
    -                       seed=0,
    -                       validation=False)
    +

    Arguments:

    +

    seq:

    +

    +

    hannah.callbacks.clustering

    +

    +

    clustering

    +
    def clustering(params, inertia, cluster)
     
    -

    helper function to construct dataloaders from NASGraphDataset

    -

    Parameters

    -

    dataset : NASGraphDataset

    -

    batch_size : int, optional - by default 50 -train_test_split : float, optional - number between 0 and 1, the proportion of the dataset to be used for training, by default 1 -subset : int, optional - choose only many samples from the dataset. Set 0 for disabling, i.e. whole dataset. by default 0 -seed : int, optional - set seed for reproduceability -validation : bool, optional - also output a validation set e.g. for hyperparam tuning

    -

    Returns

    -

    tuple(GraphDataLoader, (GraphDataLoader), GraphDataLoader) - training dataloader to be used in CostPredictor.train() and test/validation dataloader if train_test_split > 0, - else len(test_dataloader) == 0

    -

    -

    hannah.nas.performance_prediction.gcn.model

    -

    -

    hannah.nas.performance_prediction.mlonmcu

    -

    -

    hannah.nas.performance_prediction.mlonmcu.predictor

    -

    -

    hannah.nas.performance_prediction.examples.gcn_predictor_example

    -

    -

    hannah.nas.performance_prediction.examples.xgb_predictor_example

    -

    -

    hannah.nas.performance_prediction.examples.gcn_model_example

    -

    -

    hannah.nas.performance_prediction.examples.gaussian_process_predictor_example

    -

    -

    hannah.nas.performance_prediction.examples.ri_capsule_performance_predictor

    -

    -

    hannah.nas.test.test_parametrize

    -

    -

    hannah.nas.test.test_operators

    -

    -

    hannah.nas.test.test_arithmetic

    -

    -

    test_unimplemeted

    -
    @pytest.mark.parametrize(
    -    "x,y",
    -    [
    -        (IntScalarParameter(0, 0), 2),
    -        (IntScalarParameter(0, 0), IntScalarParameter(0, 0)),
    -        (DefaultInt(0), 2),
    -    ],
    -)
    -def test_unimplemeted(x, y)
    +

    Arguments:

    +

    params: + inertia: + cluster:

    +

    +

    kMeans Objects

    +
    class kMeans(Callback)
     
    -

    Test that unimplemented methods raise unimplemented errors

    -

    -

    hannah.nas.test.test_onnx_export

    -

    -

    hannah.nas.test.test_parameter_scopes

    -

    -

    hannah.nas.test.test_graph_transformer

    -

    -

    hannah.nas.test.test_description_ultratrail

    -

    -

    hannah.nas.test.network

    -

    -

    hannah.nas.test.test_mobilenet

    -

    -

    hannah.nas.test.test_functional_ops

    -

    -

    conv_relu

    -
    def conv_relu(input, out_channels, kernel_size, stride)
    +

    +

    on_test_epoch_start

    +
    def on_test_epoch_start(trainer, pl_module)
     
    -

    Example for a functional block containing conv and relu

    -

    -

    hannah.nas.test.test_adjacency

    -

    -

    hannah.nas.test.test_scoping

    -

    -

    hannah.nas.test.test_functional_executor

    -

    -

    hannah.nas.test.test_add

    -

    -

    hannah.nas.test.test_lazy_torch

    -

    -

    hannah.nas.test.test_nas_graph_dataset_for_predictor

    -

    -

    hannah.nas.test.test_conv2d

    -

    -

    hannah.nas.test.test_constraint_model

    -

    -

    hannah.nas.test.test_parameters

    -

    -

    hannah.nas.test.test_z3

    -

    -

    hannah.nas.test.test_conditions

    -

    -

    hannah.nas.test.test_dfg_removal

    -

    -

    hannah.nas.test.test_functional_training

    -

    -

    hannah.nas.test.test_random_walk_constrainer

    -

    -

    hannah.nas.test.test_dataflow

    -

    -

    hannah.nas.test.test_darts_space

    -

    -

    hannah.nas.test.test_op_to_torch_conversion

    -

    -

    hannah.nas.test.test_repeat

    -

    -

    hannah.nas.functional_operators

    -

    -

    hannah.nas.functional_operators.executor

    -

    -

    hannah.nas.functional_operators.shapes

    -

    -

    padding_expression

    -
    def padding_expression(kernel_size, stride, dilation=1)
    +

    Arguments:

    +

    trainer: + pl_module:

    +

    +

    on_train_epoch_end

    +
    def on_train_epoch_end(trainer, pl_module)
     
    -

    Symbolically calculate padding such that for a given kernel_size, stride and dilation -the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2). -Note: If the input dimension is 1 and stride = 2, the calculated padding will result in -an output with also dimension 1.

    -

    Parameters

    -

    kernel_size : Union[int, Expression] -stride : Union[int, Expression] -dilation : Union[int, Expression], optional -description, by default 1

    -

    Returns

    -

    Expression

    -

    -

    hannah.nas.functional_operators.visualizer

    -

    -

    hannah.nas.functional_operators.torch_conversion

    -

    -

    hannah.nas.functional_operators.operators

    -

    -

    self_attention2d

    -
    @torch.fx.wrap
    -def self_attention2d(q, k, v, num_heads, d_model, *, id)
    +

    Arguments:

    +

    trainer: + pl_module:

    +

    +

    hannah.callbacks.optimization

    +

    +

    HydraOptCallback Objects

    +
    class HydraOptCallback(Callback)
     
    -

    Arguments:

    -
      -
    • q - Tensor, shape [B, h*d, H, W]
    • -
    • k - Tensor, shape [B, h*d, H, W]
    • -
    • v - Tensor, shape [B, h*d, H, W]
    • -
    -

    -

    self_attention2d

    -
    @torch.fx.wrap
    -def self_attention2d(q, k, v, num_heads, d_model, *, id)
    +

    +

    on_test_end

    +
    def on_test_end(trainer, pl_module)
     

    Arguments:

    -
      -
    • q - Tensor, shape [B, h*d, H, W]
    • -
    • k - Tensor, shape [B, h*d, H, W]
    • -
    • v - Tensor, shape [B, h*d, H, W]
    • -
    -

    -

    relu_linear_attention

    -
    @torch.fx.wrap
    -def relu_linear_attention(q, k, v, num_heads, d_model, *, id)
    +

    trainer: + pl_module:

    +

    +

    on_validation_end

    +
    def on_validation_end(trainer, pl_module)
     
    -

    Adapted from EfficientViT.

    Arguments:

    -
      -
    • q - Tensor, shape [B, h*d, H, W]
    • -
    • k - Tensor, shape [B, h*d, H, W]
    • -
    • v - Tensor, shape [B, h*d, H, W]
    • -
    -

    -

    ReluLinearAttention Objects

    -
    @parametrize
    -class ReluLinearAttention(Op)
    +

    trainer: + pl_module:

    +

    +

    test_result

    +
    def test_result()
     
    -

    Adapted from EfficientViT

    -

    -

    hannah.nas.functional_operators.op

    -

    -

    scope

    -
    def scope(function)
    +

    +

    val_result

    +
    def val_result()
     
    -

    Decorator defining a scope in a search space. The id of every subcomponent (operators or lower-hierarchy scopes) -enclosed in a function decorated with this will be prefixed with the name of the function, creating a -hierarchical scope.

    -

    -

    search_space

    -
    def search_space(function)
    +

    +

    result

    +
    def result(dict=False)
     
    -

    Decorator to define a search space. For correct scoping, -a search space containing functional ops must be enclosed by -a function decorated with @search_space.

    -

    -

    Bypass Objects

    -
    class Bypass(Op)
    +

    Arguments:

    +
      +
    • dict - (Default value = False)
    • +
    +

    +

    curves

    +
    def curves(dict=False)
     
    -

    Alternative Identity()

    -

    -

    hannah.nas.functional_operators.lazy

    -

    -

    hannah.nas.plotter

    +

    Arguments:

    +
      +
    • dict - (Default value = False)
    • +
    +

    +

    hannah.nas.hardware_description.description

    +

    +

    hannah.nas.hardware_description

    +

    +

    hannah.nas.hardware_description.memory_type

    +

    +

    hannah.nas.hardware_description.device

    +

    +

    hannah.nas.hardware_description.ultratrail

    +

    +

    hannah.nas.dataflow.optional_op

    +

    +

    hannah.nas.dataflow.ops

    +

    +

    hannah.nas.dataflow.ops.add

    +

    +

    hannah.nas.dataflow.ops.identity

    +

    +

    hannah.nas.dataflow.ops.batch_nom

    +

    +

    hannah.nas.dataflow.ops.dropout

    +

    +

    hannah.nas.dataflow.ops.concat

    +

    +

    hannah.nas.dataflow.ops.sum

    +

    +

    hannah.nas.dataflow.ops.linear

    +

    +

    hannah.nas.dataflow.ops.conv2d

    +

    +

    hannah.nas.dataflow.ops.relu

    +

    +

    hannah.nas.dataflow.ops.pooling

    +

    +

    hannah.nas.dataflow.tensor_expression

    +

    +

    hannah.nas.dataflow.transformations.graph_tranformer

    hannah.nas.dataflow.axis_type

    @@ -4655,56 +4559,30 @@

    AxisTuple Objects

    Used to have the axis dict as a parametrized object

    -

    -

    hannah.nas.dataflow

    +

    +

    hannah.nas.dataflow.analysis.dataflow_analysis

    hannah.nas.dataflow.repeat

    -

    -

    hannah.nas.dataflow.ops

    -

    -

    hannah.nas.dataflow.ops.relu

    -

    -

    hannah.nas.dataflow.ops.sum

    -

    -

    hannah.nas.dataflow.ops.dropout

    -

    -

    hannah.nas.dataflow.ops.identity

    -

    -

    hannah.nas.dataflow.ops.linear

    -

    -

    hannah.nas.dataflow.ops.add

    -

    -

    hannah.nas.dataflow.ops.concat

    -

    -

    hannah.nas.dataflow.ops.conv2d

    -

    -

    hannah.nas.dataflow.ops.pooling

    -

    -

    hannah.nas.dataflow.ops.batch_nom

    -

    -

    hannah.nas.dataflow.optional_op

    -

    -

    hannah.nas.dataflow.tensor

    -

    -

    hannah.nas.dataflow.dataflow_utils

    +

    +

    hannah.nas.dataflow.register_ops

    +

    +

    hannah.nas.dataflow

    +

    +

    hannah.nas.dataflow.registry

    +

    +

    hannah.nas.dataflow.tensor_type

    hannah.nas.dataflow.quantization_type

    -

    -

    hannah.nas.dataflow.data_type

    -

    -

    hannah.nas.dataflow.transformations.graph_tranformer

    hannah.nas.dataflow.op_type

    -

    -

    hannah.nas.dataflow.register_ops

    +

    +

    hannah.nas.dataflow.dataflow_utils

    +

    +

    hannah.nas.dataflow.data_type

    hannah.nas.dataflow.scoping_utils

    -

    -

    hannah.nas.dataflow.analysis.dataflow_analysis

    -

    -

    hannah.nas.dataflow.tensor_expression

    -

    -

    hannah.nas.dataflow.tensor_type

    +

    +

    hannah.nas.dataflow.compression_type

    hannah.nas.dataflow.dataflow_graph

    @@ -4730,10 +4608,10 @@

    collect_users

    If a node_b is NOT in collect_users(node_a), this means that node_b is either BEFORE node_a in the graph OR it is in a parallel branch.

    -

    Parameters

    +

    Parameters

    node : type description

    -

    Returns

    +

    Returns

    type description

    @@ -4745,63 +4623,186 @@

    find_first_input

    back to its first input. NOTE: The traversal is via OPERANDS and not OUTPUT, meaning that e.g. weight Tensors that are included in Ops in a DFG are not returned

    -

    Parameters

    +

    Parameters

    node : type description

    -

    Returns

    +

    Returns

    type description

    -

    -

    hannah.nas.dataflow.registry

    -

    -

    hannah.nas.dataflow.compression_type

    -

    -

    hannah.nas.fx.tracer

    -

    -

    InliningTracer Objects

    -
    class InliningTracer(SearchSpaceTracer)
    +

    +

    hannah.nas.dataflow.tensor

    +

    +

    hannah.nas.parameters.iterators

    +

    +

    hannah.nas.parameters

    +

    +

    hannah.nas.parameters.lazy

    +

    +

    hannah.nas.parameters.parametrize

    +

    +

    hannah.nas.parameters.parameters

    +

    +

    hannah.nas.expressions.shapes

    +

    +

    hannah.nas.expressions.logic

    +

    +

    hannah.nas.expressions.utils

    +

    +

    hannah.nas.expressions.metrics

    +

    +

    hannah.nas.expressions

    +

    +

    hannah.nas.expressions.arithmetic

    +

    +

    hannah.nas.expressions.conditions

    +

    +

    hannah.nas.expressions.op

    +

    +

    hannah.nas.expressions.choice

    +

    +

    hannah.nas.expressions.types

    +

    +

    hannah.nas.expressions.placeholder

    +

    +

    hannah.nas.core.expression

    +

    +

    hannah.nas.core

    +

    +

    hannah.nas.core.parametrized

    +

    +

    hannah.nas.utils

    +

    +

    is_pareto

    +
    def is_pareto(costs, maximise=False)
    +
    + +

    Arguments:

    +
      +
    • costs: An (n_points, n_costs) array
    • +
    +

    Returns:

    +

    A (n_points, ) boolean array, indicating whether each point is Pareto efficient

    +

    +

    hannah.nas.functional_operators.shapes

    +

    +

    padding_expression

    +
    def padding_expression(kernel_size, stride, dilation=1)
    +
    + +

    Symbolically calculate padding such that for a given kernel_size, stride and dilation +the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2). +Note: If the input dimension is 1 and stride = 2, the calculated padding will result in +an output with also dimension 1.

    +

    Parameters

    +

    kernel_size : Union[int, Expression] +stride : Union[int, Expression] +dilation : Union[int, Expression], optional +description, by default 1

    +

    Returns

    +

    Expression

    +

    +

    hannah.nas.functional_operators.torch_conversion

    +

    +

    hannah.nas.functional_operators.operators

    +

    +

    self_attention2d

    +
    @torch.fx.wrap
    +def self_attention2d(q, k, v, num_heads, d_model, *, id)
    +
    + +

    Arguments:

    +
      +
    • q - Tensor, shape [B, h*d, H, W]
    • +
    • k - Tensor, shape [B, h*d, H, W]
    • +
    • v - Tensor, shape [B, h*d, H, W]
    • +
    +

    +

    self_attention2d

    +
    @torch.fx.wrap
    +def self_attention2d(q, k, v, num_heads, d_model, *, id)
    +
    + +

    Arguments:

    +
      +
    • q - Tensor, shape [B, h*d, H, W]
    • +
    • k - Tensor, shape [B, h*d, H, W]
    • +
    • v - Tensor, shape [B, h*d, H, W]
    • +
    +

    +

    relu_linear_attention

    +
    @torch.fx.wrap
    +def relu_linear_attention(q, k, v, num_heads, d_model, *, id)
     
    -

    Inline all search space functions, into the graph.

    -

    This generates a standard pytorch.fx graph module containing only replacing the search space parametrizable functions with their equivalent form torch.functional`

    -

    -

    hannah.nas.constraints.constraint_model

    -

    -

    hannah.nas.constraints.dfg_constraint_model

    -

    -

    ConstraintModel Objects

    -
    class ConstraintModel()
    +

    Adapted from EfficientViT.

    +

    Arguments:

    +
      +
    • q - Tensor, shape [B, h*d, H, W]
    • +
    • k - Tensor, shape [B, h*d, H, W]
    • +
    • v - Tensor, shape [B, h*d, H, W]
    • +
    +

    +

    ReluLinearAttention Objects

    +
    @parametrize
    +class ReluLinearAttention(Op)
     
    -

    -

    process_optype

    -
    def process_optype(op: OpType)
    +

    Adapted from EfficientViT

    +

    +

    hannah.nas.functional_operators

    +

    +

    hannah.nas.functional_operators.executor

    +

    +

    hannah.nas.functional_operators.lazy

    +

    +

    hannah.nas.functional_operators.visualizer

    +

    +

    hannah.nas.functional_operators.op

    +

    +

    scope

    +
    def scope(function)
     
    -

    Extracts the constraints based on the type of op. -New variables are added to self.vars and the constraints -are added to the solver.

    -

    Parameters

    -

    op : OpType

    -

    -

    process_tensor

    -
    def process_tensor(tensor: Tensor)
    +

    Decorator defining a scope in a search space. The id of every subcomponent (operators or lower-hierarchy scopes) +enclosed in a function decorated with this will be prefixed with the name of the function, creating a +hierarchical scope.

    +

    +

    search_space

    +
    def search_space(function)
     
    -

    Goes through all axis and extracts the constraints for -the respective axis sizes

    -

    Parameters

    -

    tensor : Tensor

    -

    -

    hannah.nas.constraints.random_walk

    +

    Decorator to define a search space. For correct scoping, +a search space containing functional ops must be enclosed by +a function decorated with @search_space.

    +

    +

    Bypass Objects

    +
    class Bypass(Op)
    +
    + +

    Alternative Identity()

    +

    +

    hannah.nas.ops

    +

    +

    hannah.nas.parametrization

    +

    +

    ScalarParameterState Objects

    +
    @dataclass
    +class ScalarParameterState(ParameterState)
    +
    + +

    +

    sigma

    +

    variance of sampling parameter

    +

    +

    hannah.nas

    hannah.nas.eval

    -

    -

    hannah.nas.eval.plot

    hannah.nas.eval.extract

    hannah.nas.eval.__main__

    +

    +

    hannah.nas.eval.plot

    hannah.nas.eval.prepare

    @@ -4818,88 +4819,23 @@

    prepare_summary

  • base_dir str - base directory paths in data mapping are interpreted relative to base directory
  • force bool - force reconstructing of cached results ("data.pkl")
  • -

    -

    hannah.nas.parameters.iterators

    -

    -

    hannah.nas.parameters

    -

    -

    hannah.nas.parameters.parametrize

    -

    -

    hannah.nas.parameters.lazy

    -

    -

    hannah.nas.parameters.parameters

    -

    -

    hannah.nas.config

    -

    -

    Scalar Objects

    -
    @dataclass
    -class Scalar()
    -
    - -

    Representation of all the options to define -a scalar.

    -

    -

    hannah.nas.graph_conversion

    -

    -

    hannah.nas.parametrization

    -

    -

    ScalarParameterState Objects

    -
    @dataclass
    -class ScalarParameterState(ParameterState)
    -
    - -

    -

    sigma

    -

    variance of sampling parameter

    -

    -

    hannah.nas.search.search

    -

    -

    hannah.nas.search.sampler.aging_evolution

    -

    -

    AgingEvolutionSampler Objects

    -
    class AgingEvolutionSampler(Sampler)
    -
    - -

    Aging Evolution based multi objective optimization

    -

    -

    next_parameters

    -
    def next_parameters()
    -
    - -

    Returns a list of current tasks

    -

    -

    tell_result

    -
    def tell_result(parameters, metrics)
    -
    - -

    Tell the result of a task

    -

    -

    hannah.nas.search.sampler.base_sampler

    -

    -

    Sampler Objects

    -
    class Sampler(ABC)
    -
    - -

    -

    tell_result

    -
    def tell_result(parameters, metrics)
    +

    +

    hannah.nas.spaces.mobilenet.mobilenet

    +

    +

    hannah.nas.spaces.darts.darts_space

    +

    +

    hannah.nas.fx.tracer

    +

    +

    InliningTracer Objects

    +
    class InliningTracer(SearchSpaceTracer)
     
    -

    Tell the result of a task

    -

    -

    hannah.nas.search.sampler.defined_space_sampler

    -

    -

    hannah.nas.search.sampler.mutator

    -

    -

    hannah.nas.search.sampler.random_sampler

    +

    Inline all search space functions, into the graph.

    +

    This generates a standard pytorch.fx graph module containing only replacing the search space parametrizable functions with their equivalent form torch.functional`

    hannah.nas.search.model_trainer.simple_model_trainer

    hannah.nas.search.utils

    -

    -

    hannah.nas.search.weight_sharing

    -

    -

    hannah.nas.search.presampler.simple

    hannah.nas.search.search_old

    @@ -4909,487 +4845,539 @@

    AgingEvolution Objects

    Aging Evolution based multi objective optimization

    -

    next_parameters

    +

    next_parameters

    def next_parameters()
     

    Returns a list of current tasks

    -

    tell_result

    +

    tell_result

    def tell_result(parameters, metrics)
     

    Tell the result of a task

    -

    -

    hannah.nas.__main__

    -

    -

    hannah.nas.export

    -

    -

    hannah.nas.export.onnx

    -

    -

    eval

    -
    def eval(exp_tree: Any) -> Any
    -
    - -

    Recursively evaluate expressions organized as a pytree

    -

    -

    hannah.nas.hardware_description

    -

    -

    hannah.nas.hardware_description.ultratrail

    -

    -

    hannah.nas.hardware_description.device

    -

    -

    hannah.nas.hardware_description.memory_type

    -

    -

    hannah.nas.hardware_description.description

    -

    -

    hannah.nas.expressions

    -

    -

    hannah.nas.expressions.shapes

    -

    -

    hannah.nas.expressions.conditions

    -

    -

    hannah.nas.expressions.logic

    -

    -

    hannah.nas.expressions.arithmetic

    -

    -

    hannah.nas.expressions.op

    -

    -

    hannah.nas.expressions.types

    -

    -

    hannah.nas.expressions.placeholder

    -

    -

    hannah.nas.expressions.utils

    -

    -

    hannah.nas.expressions.metrics

    -

    -

    hannah.nas.expressions.choice

    -

    -

    hannah.nas.core

    -

    -

    hannah.nas.core.expression

    -

    -

    hannah.nas.core.parametrized

    -

    -

    hannah.nas.ops

    -

    -

    hannah.nas.utils

    -

    -

    is_pareto

    -
    def is_pareto(costs, maximise=False)
    -
    - -

    Arguments:

    -
      -
    • costs: An (n_points, n_costs) array
    • -
    -

    Returns:

    -

    A (n_points, ) boolean array, indicating whether each point is Pareto efficient

    -

    -

    hannah.utils.logger

    -

    -

    JSONLogger Objects

    -
    class JSONLogger(Logger)
    -
    - -

    -

    name

    -
    @property
    -def name() -> str
    +

    +

    hannah.nas.search.sampler.base_sampler

    +

    +

    Sampler Objects

    +
    class Sampler(ABC)
     
    -

    Gets the name of the experiment.

    -

    Returns:

    -

    The name of the experiment.

    -

    -

    version

    -
    @property
    -def version() -> Union[int, str]
    +

    +

    tell_result

    +
    def tell_result(parameters, metrics)
     
    -

    Gets the version of the experiment.

    -

    Returns:

    -

    The version of the experiment if it is specified, else the next version.

    -

    -

    root_dir

    -
    @property
    -def root_dir() -> str
    +

    Tell the result of a task

    +

    +

    hannah.nas.search.sampler.mutator

    +

    +

    hannah.nas.search.sampler.random_sampler

    +

    +

    hannah.nas.search.sampler.aging_evolution

    +

    +

    AgingEvolutionSampler Objects

    +
    class AgingEvolutionSampler(Sampler)
     
    -

    Gets the save directory where the versioned JSON experiments are saved.

    -

    -

    log_dir

    -
    @property
    -def log_dir() -> str
    +

    Aging Evolution based multi objective optimization

    +

    +

    next_parameters

    +
    def next_parameters()
     
    -

    The log directory for this run.

    -

    By default, it is named 'version_${self.version}' but it can be overridden by passing a string value for the -constructor's version parameter instead of None or an int.

    -

    -

    experiment

    -
    @property
    -@rank_zero_experiment
    -def experiment() -> "_ExperimentWriter"
    +

    Returns a list of current tasks

    +

    +

    tell_result

    +
    def tell_result(parameters, metrics)
     
    -

    Actual ExperimentWriter object. To use ExperimentWriter features anywhere in your code, do the following.

    -

    Example::

    -

    self.logger.experiment.some_experiment_writer_function()

    -

    -

    _ExperimentWriter Objects

    -
    class _ExperimentWriter()
    +

    Tell the result of a task

    +

    +

    hannah.nas.search.sampler.defined_space_sampler

    +

    +

    hannah.nas.search.search

    +

    +

    hannah.nas.search.presampler.simple

    +

    +

    hannah.nas.search.weight_sharing

    +

    +

    hannah.nas.plotter

    +

    +

    hannah.nas.test.test_operators

    +

    +

    hannah.nas.test.test_lazy_torch

    +

    +

    hannah.nas.test.test_scoping

    +

    +

    hannah.nas.test.test_repeat

    +

    +

    hannah.nas.test.test_parametrize

    +

    +

    hannah.nas.test.test_functional_executor

    +

    +

    hannah.nas.test.test_op_to_torch_conversion

    +

    +

    hannah.nas.test.test_description_ultratrail

    +

    +

    hannah.nas.test.test_conv2d

    +

    +

    hannah.nas.test.test_random_walk_constrainer

    +

    +

    hannah.nas.test.test_conditions

    +

    +

    hannah.nas.test.test_nas_graph_dataset_for_predictor

    +

    +

    hannah.nas.test.test_mobilenet

    +

    +

    hannah.nas.test.network

    +

    +

    hannah.nas.test.test_parameters

    +

    +

    hannah.nas.test.test_parameter_scopes

    +

    +

    hannah.nas.test.test_onnx_export

    +

    +

    hannah.nas.test.test_dfg_removal

    +

    +

    hannah.nas.test.test_z3

    +

    +

    hannah.nas.test.test_functional_ops

    +

    +

    conv_relu

    +
    def conv_relu(input, out_channels, kernel_size, stride)
     
    -

    Experiment writer for CSVLogger.

    -

    Arguments:

    -
      -
    • log_dir - Directory for the experiment logs
    • -
    -

    -

    log_metrics

    -
    def log_metrics(metrics_dict: Dict[str, float],
    -                step: Optional[int] = None) -> None
    +

    Example for a functional block containing conv and relu

    +

    +

    hannah.nas.test.test_arithmetic

    +

    +

    test_unimplemeted

    +
    @pytest.mark.parametrize(
    +    "x,y",
    +    [
    +        (IntScalarParameter(0, 0), 2),
    +        (IntScalarParameter(0, 0), IntScalarParameter(0, 0)),
    +        (DefaultInt(0), 2),
    +    ],
    +)
    +def test_unimplemeted(x, y)
     
    -

    Record metrics.

    -

    -

    save

    -
    def save() -> None
    +

    Test that unimplemented methods raise unimplemented errors

    +

    +

    hannah.nas.test.test_add

    +

    +

    hannah.nas.test.test_functional_training

    +

    +

    hannah.nas.test.test_dataflow

    +

    +

    hannah.nas.test.test_darts_space

    +

    +

    hannah.nas.test.test_constraint_model

    +

    +

    hannah.nas.test.test_adjacency

    +

    +

    hannah.nas.test.test_graph_transformer

    +

    +

    hannah.nas.graph_conversion

    +

    +

    hannah.nas.config

    +

    +

    Scalar Objects

    +
    @dataclass
    +class Scalar()
     
    -

    Save recorded metrics into files.

    -

    -

    hannah.utils

    -

    -

    hannah.utils.tuple

    -

    -

    hannah.utils.dvclive

    -

    -

    hannah.utils.imports

    -

    -

    lazy_import

    -
    def lazy_import(module_name, callback=None)
    +

    Representation of all the options to define +a scalar.

    +

    +

    hannah.nas.constraints.constraint_model

    +

    +

    hannah.nas.constraints.dfg_constraint_model

    +

    +

    ConstraintModel Objects

    +
    class ConstraintModel()
     
    -

    Returns a proxy module object that will lazily import the given module the first time it is used. -Example usage::

    -

    Lazy version of import tensorflow as tf

    -

    tf = lazy_import("tensorflow")

    -

    Other commands

    -

    Now the module is loaded

    -

    tf.version

    -

    Arguments:

    -
      -
    • module_name - the fully-qualified module name to import
    • -
    • callback None - a callback function to call before importing the - module
    • -
    -

    Returns:

    -

    a proxy module object that will be lazily imported when first used

    -

    -

    LazyModule Objects

    -
    class LazyModule(types.ModuleType)
    +

    +

    process_optype

    +
    def process_optype(op: OpType)
     
    -

    Proxy module that lazily imports the underlying module the first time it is actually used.

    -

    Arguments:

    -
      -
    • module_name - the fully-qualified module name to import
    • -
    • callback None - a callback function to call before importing the - module
    • -
    -

    -

    hannah.utils.utils

    -

    -

    log_execution_env_state

    -
    def log_execution_env_state() -> None
    +

    Extracts the constraints based on the type of op. +New variables are added to self.vars and the constraints +are added to the solver.

    +

    Parameters

    +

    op : OpType

    +

    +

    process_tensor

    +
    def process_tensor(tensor: Tensor)
     
    -

    Log information about the execution environment.

    -

    -

    git_version

    -
    def git_version(short=True)
    +

    Goes through all axis and extracts the constraints for +the respective axis sizes

    +

    Parameters

    +

    tensor : Tensor

    +

    +

    hannah.nas.constraints.random_walk

    +

    +

    hannah.nas.performance_prediction.protocol

    +

    +

    Predictor Objects

    +
    @runtime_checkable
    +class Predictor(Protocol)
     
    -

    Return the current git sha

    -

    Arguments:

    -
      -
    • short bool - If True, return the short (7 character) version of the SHA
    • -
    -

    Returns:

    -
      -
    • str - The current git SHA
    • -
    -

    -

    extract_from_download_cache

    -
    def extract_from_download_cache(filename,
    -                                url,
    -                                cached_files,
    -                                target_cache,
    -                                target_folder,
    -                                target_test_folder="",
    -                                clear_download=False,
    -                                no_exist_check=False) -> None
    +

    +

    predict

    +
    def predict(model: ClassifierModule,
    +            input: Optional[InputShape] = None) -> Mapping[str, float]
     
    -

    extracts given file from cache or donwloads first from url

    +

    Pedicts performance metrisc of a model.

    +

    Performance metrics are returned as a dictionary with the metric name as key and the metric value as floating point value.

    Arguments:

      -
    • filename str - name of the file to download or extract
    • -
    • url str - possible url to download the file - cached_files (list(str)): cached files in download cache
    • -
    • target_cache str - path to the folder to cache file if download necessary
    • -
    • target_folder str - path where to extract file
    • -
    • target_test_folder str, optional - folder to check if data are already there
    • -
    • clear_download bool - clear download after usage
    • -
    • no_exist_check bool - disables the check if folder exists
    • +
    • model ClassifierModule - The model to predict the performance of.
    • +
    • input __type_, optional_ - Input shape of input . Defaults to None.
    -

    -

    fullname

    -
    def fullname(o) -> Any
    +

    +

    FitablePredictor Objects

    +
    class FitablePredictor(Predictor)
     
    -

    Get the full classname of an object including surrounding packages/modules/namespaces

    -

    -

    set_deterministic

    -
    @contextmanager
    -def set_deterministic(mode, warn_only=False)
    +

    +

    load

    +
    def load(result_folder: str)
     
    -

    A contextmanager to set deterministic algorithms

    -

    -

    hannah.train

    -

    -

    hannah.trainer

    -

    -

    hannah.trainer.cross_validation

    -

    -

    hannah.normalizer

    -

    -

    FixedPointNormalizer Objects

    -
    class FixedPointNormalizer(nn.Module)
    +

    Load predefined model from a folder.

    +

    Arguments:

    +
      +
    • result_folder str - Path to the folder containing the model or training data to recreate the model.
    • +
    +

    +

    update

    +
    def update(new_data, input=None)
     
    -

    Simple feature normalizer for fixed point models

    -

    -

    AdaptiveFixedPointNormalizer Objects

    -
    class AdaptiveFixedPointNormalizer(nn.Module)
    +

    Update the model with new data.

    +

    +

    hannah.nas.performance_prediction.mlonmcu.predictor

    +

    +

    hannah.nas.performance_prediction.mlonmcu

    +

    +

    hannah.nas.performance_prediction.features.graph_conversion

    +

    +

    hannah.nas.performance_prediction.features.dataset

    +

    +

    hannah.nas.performance_prediction

    +

    +

    hannah.nas.performance_prediction.gcn.predictor

    +

    +

    Predictor Objects

    +
    class Predictor()
     
    -

    Simple feature normalizer for fixed point models

    -

    -

    hannah.nn.qat

    -

    Implementations of torch.nn.intrinsics qat with an optional -quantize bias parameter.

    -

    Qconfigs can support an optional bias quantization funciton which should be returned by - qconfig.bias() else biases will be quantized with qconfig.activation()

    -

    -

    _ConvBnNd Objects

    -
    class _ConvBnNd(nn.modules.conv._ConvNd, _ConvForwardMixin)
    +

    +

    __init__

    +
    def __init__(fea_name="features") -> None
     
    -

    +

    Parent method for different predictor classes.

    +

    Parameters

    +

    fea_name : str, optional + internal name for features in the graph, as in graph.ndata[fea_name], by default 'features'

    +

    train

    -
    def train(mode: bool = True) -> Any
    +
    def train(dataloader,
    +          learning_rate=1e-3,
    +          num_epochs=200,
    +          validation_dataloader=None,
    +          verbose=1)
     
    -

    Batchnorm's training behavior is using the self.training flag. Prevent -changing it if BN is frozen. This makes sure that calling model.train() -on a model with a frozen BN will behave properly.

    -

    -

    from_float

    -
    @classmethod
    -def from_float(cls, mod)
    +

    Train GCN model

    +

    Parameters

    +

    dataloader : GraphDataLoader + training data +learning_rate : [type], optional + by default 1e-3 +num_epochs : int, optional + by default 200 +validation_dataloader : [type], optional + if given, use this data to print validation loss, by default None +verbose : int + if validation_dataloader is given, print validation MSE every epoch, by default 1

    +

    +

    predict

    +
    def predict(graph)
     
    -

    Create a qat module from a float module or qparams_dict -Args: mod a float module, either produced by torch.quantization utilities -or directly from user

    -

    -

    ConvBn1d Objects

    -
    class ConvBn1d(_ConvBnNd)
    +

    predict cost of graph

    +

    Parameters

    +

    graph : dgl.Graph

    +

    Returns

    +

    torch.Tensor + predicted cost of given graph. Retrieve float value with .item()

    +

    +

    GCNPredictor Objects

    +
    class GCNPredictor(Predictor)
     
    -

    A ConvBn1d module is a module fused from Conv1d and BatchNorm1d, -attached with FakeQuantize modules for weight, -used in quantization aware training. -We combined the interface of :class:torch.nn.Conv1d and -:class:torch.nn.BatchNorm1d. -Similar to :class:torch.nn.Conv1d, with FakeQuantize modules initialized -to default.

    -

    Attributes:

    -

    freeze_bn: -- weight_fake_quant - fake quant module for weight

    -

    -

    ConvBnReLU1d Objects

    -
    class ConvBnReLU1d(ConvBn1d)
    +

    +

    __init__

    +
    def __init__(input_feature_size,
    +             hidden_units=[128],
    +             readout="mean",
    +             fea_name="features") -> None
     
    -

    A ConvBnReLU1d module is a module fused from Conv1d, BatchNorm1d and ReLU, -attached with FakeQuantize modules for weight, -used in quantization aware training. -We combined the interface of :class:torch.nn.Conv1d and -:class:torch.nn.BatchNorm1d and :class:torch.nn.ReLU. -Similar to torch.nn.Conv1d, with FakeQuantize modules initialized to -default.

    -

    Attributes:

    -
      -
    • weight_fake_quant - fake quant module for weight
    • -
    -

    -

    ConvBn2d Objects

    -
    class ConvBn2d(_ConvBnNd)
    +

    G(raph)CN based network latency/cost predictor. End-to-end from graph to score.

    +

    Parameters

    +

    input_feature_size : [type] + length of feature vector of a graph node (graph G with n nodes, each with features of length m, i.e. feature matrix F = n x m) +hidden_units : int, list, optional + size of hidden layer (layers if list) , by default 128 +readout : str, optional + readout function that is used to aggregate node features, by default 'mean' +fea_name : str, optional + internal name for features in the graph, as in graph.ndata[fea_name], by default 'features'

    +

    +

    train_and_fit

    +
    def train_and_fit(dataloader,
    +                  learning_rate=1e-3,
    +                  num_epochs=200,
    +                  validation_dataloader=None,
    +                  verbose=0)
     
    -

    A ConvBn2d module is a module fused from Conv2d and BatchNorm2d, -attached with FakeQuantize modules for weight, -used in quantization aware training. -We combined the interface of :class:torch.nn.Conv2d and -:class:torch.nn.BatchNorm2d. -Similar to :class:torch.nn.Conv2d, with FakeQuantize modules initialized -to default.

    -

    Attributes:

    -

    freeze_bn: -- weight_fake_quant - fake quant module for weight

    -

    -

    ConvBnReLU2d Objects

    -
    class ConvBnReLU2d(ConvBn2d)
    +

    Train GCN model

    +

    Parameters

    +

    dataloader : GraphDataLoader + training data +learning_rate : [type], optional + by default 1e-3 +num_epochs : int, optional + by default 200 +validation_dataloader : [type], optional + if given, use this data to print validation loss, by default None +verbose : int + if validation_dataloader is given, print validation MSE every epoch, by default 1

    +

    +

    predict

    +
    def predict(graph)
     
    -

    A ConvBnReLU2d module is a module fused from Conv2d, BatchNorm2d and ReLU, -attached with FakeQuantize modules for weight, -used in quantization aware training. -We combined the interface of :class:torch.nn.Conv2d and -:class:torch.nn.BatchNorm2d and :class:torch.nn.ReLU. -Similar to torch.nn.Conv2d, with FakeQuantize modules initialized to -default.

    -

    Attributes:

    -
      -
    • weight_fake_quant - fake quant module for weight
    • -
    -

    -

    ConvReLU2d Objects

    -
    class ConvReLU2d(nn.Conv2d, _ConvForwardMixin)
    +

    predict cost of graph

    +

    Parameters

    +

    graph : dgl.Graph

    +

    Returns

    +

    torch.Tensor + predicted cost of given graph. Retrieve float value with .item()

    +

    +

    GaussianProcessPredictor Objects

    +
    class GaussianProcessPredictor(Predictor)
    +
    + +

    +

    __init__

    +
    def __init__(input_feature_size,
    +             hidden_units=128,
    +             embedding_size=10,
    +             readout="mean",
    +             fea_name="features",
    +             kernel="default",
    +             alpha=1e-10) -> None
    +
    + +

    Predictor that generates a graph embedding that is used as input for a gaussian process predictor.

    +

    Parameters

    +

    input_feature_size : [type] + length of feature vector of a graph node (graph G with n nodes, each with features of length m, i.e. feature matrix F = n x m) +hidden_units : int, list, optional + size of hidden layer (layers if list) , by default 128 +embedding_size: int, optional + size of output embedding +readout : str, optional + readout function that is used to aggregate node features, by default 'mean' +fea_name : str, optional + internal name for features in the graph, as in graph.ndata[fea_name], by default 'features' +kernel : str, sklearn.gaussian_process.kernels.Kernel, optional + The gaussian process kernel to use. + input shoudl be either "default", or a sklearn Kernel() object + by default RBF() + DotProduct() + WhiteKernel()

    +

    +

    train_and_fit

    +
    def train_and_fit(dataloader,
    +                  learning_rate=1e-3,
    +                  num_epochs=200,
    +                  validation_dataloader=None,
    +                  verbose=1)
     
    -

    A ConvReLU2d module is a fused module of Conv2d and ReLU, attached with -FakeQuantize modules for weight for -quantization aware training. -We combined the interface of :class:~torch.nn.Conv2d and -:class:~torch.nn.BatchNorm2d.

    -

    Attributes:

    -
      -
    • weight_fake_quant - fake quant module for weight
    • -
    -

    -

    ConvReLU1d Objects

    -
    class ConvReLU1d(nn.Conv1d, _ConvForwardMixin)
    +

    Train GCN model, generate embeddings for training data and fit the predictor with embeddings.

    +

    Parameters

    +

    dataloader : GraphDataLoader + training data +learning_rate : [type], optional + by default 1e-3 +num_epochs : int, optional + by default 200 +validation_dataloader : [type], optional + if given, use this data to print validation loss, by default None +verbose : int + if validation_dataloader is given, print validation MSE every epoch,by default 1

    +

    Returns

    +

    float + score of predictor on TRAINING data, see sklearn doc of chosen predictor for more info

    +

    +

    predict

    +
    def predict(X, return_std=True)
     
    -

    A ConvReLU1d module is fused module of Conv1d and ReLU, attached with -FakeQuantize modules for quantization aware training

    -

    -

    Conv1d Objects

    -
    class Conv1d(nn.Conv1d, _ConvForwardMixin)
    +

    Predict cost/latency of graphs.

    +

    Parameters

    +

    X : dgl.DGLGraph, list[DGLGraph], dgl.dataloading.GraphDataLoader + Input graph(s) +return_std : bool, optional + if true, return standard dev. else just mean prediction, by default True

    +

    Returns

    +

    array (,array) + prediction(s) , (if return_std: standard deviation(s))

    +

    +

    XGBPredictor Objects

    +
    class XGBPredictor(Predictor)
     
    -

    A Conv1d module is a Conv1d module , attached with -FakeQuantize modules for weight for -quantization aware training.

    -

    Attributes:

    -
      -
    • weight_fake_quant - fake quant module for weight
    • -
    • bias_fake_quant - fake quant module for bias
    • -
    • activation_post_process - fake_quant_module for activations
    • -
    -

    -

    Conv2d Objects

    -
    class Conv2d(nn.Conv2d, _ConvForwardMixin)
    +

    +

    __init__

    +
    def __init__(input_feature_size,
    +             hidden_units=128,
    +             embedding_size=10,
    +             readout="mean",
    +             fea_name="features",
    +             xgb_param="default") -> None
     
    -

    A Conv2d module is a Conv2d module , attached with -FakeQuantize modules for weight for -quantization aware training.

    -

    Attributes:

    -
      -
    • weight_fake_quant - fake quant module for weight
    • -
    • bias_fake_quant - fake quant module for bias
    • -
    • activation_post_process - fake_quant_module for activations
    • -
    -

    -

    Linear Objects

    -
    class Linear(nn.Linear)
    +

    Predictor that generates a graph embedding that is used as input for a xgb based predictor.

    +

    Parameters

    +

    input_feature_size : [type] + length of feature vector of a graph node (graph G with n nodes, each with features of length m, i.e. feature matrix F = n x m) +hidden_units : int, list, optional + size of hidden layer (layers if list) , by default 128 +embedding_size: int, optional + size of output embedding +readout : str, optional + readout function that is used to aggregate node features, by default 'mean' +fea_name : str, optional + internal name for features in the graph, as in graph.ndata[fea_name], by default 'features' +xgb_param : str, dict, optional + The xgb_parameter to use. + See https://xgboost.readthedocs.io/en/latest/parameter.html

    +

    +

    train_and_fit

    +
    def train_and_fit(dataloader,
    +                  learning_rate=1e-3,
    +                  num_epochs=200,
    +                  num_round=8000,
    +                  validation_dataloader=None,
    +                  verbose=1)
     
    -

    A linear module attached with FakeQuantize modules for weight, -used for quantization aware training.

    -

    We adopt the same interface as torch.nn.Linear, please see -https://pytorch.org/docs/stable/nn.html#torch.nn.Linear -for documentation.

    -

    Similar to torch.nn.Linear, with FakeQuantize modules initialized to -default.

    -

    Attributes:

    -
      -
    • weight - fake quant module for weight
    • -
    -

    -

    from_float

    -
    @classmethod
    -def from_float(cls, mod)
    +

    Train GCN model, generate embeddings for training data and fit the predictor with embeddings.

    +

    Parameters

    +

    dataloader : GraphDataLoader + training data +learning_rate : [type], optional + by default 1e-3 +num_epochs : int, optional + Training epochs for the GCN embedding network, by default 200 +num_round : int, optional + training rounds for xgb booster, by default 800 +validation_dataloader : [type], optional + if given, use this data to print validation loss, by default None +verbose : int + if validation_dataloader is given, print validation MSE every epoch,by default 1

    +

    +

    predict

    +
    def predict(X)
     
    -

    Create a qat module from a float module or qparams_dict

    -

    Args: mod a float module, either produced by torch.quantization utilities -or directly from user

    -

    -

    LinearReLU Objects

    -
    class LinearReLU(nn.Linear)
    +

    Predict cost/latency of graphs.

    +

    Parameters

    +

    X : dgl.DGLGraph, list[DGLGraph], dgl.dataloading.GraphDataLoader + Input graph(s) +Returns

    +
    +

    array (,array) + prediction(s) , (if return_std: standard deviation(s))

    +

    +

    prepare_dataloader

    +
    def prepare_dataloader(dataset,
    +                       batch_size=50,
    +                       train_test_split=1,
    +                       subset=0,
    +                       seed=0,
    +                       validation=False)
     
    -

    A linear module attached with FakeQuantize modules and ReLU for weight, -used for quantization aware training.

    -

    We adopt the same interface as torch.nn.Linear, please see -https://pytorch.org/docs/stable/nn.html#torch.nn.Linear -for documentation.

    -

    Similar to torch.nn.Linear, with FakeQuantize modules initialized to -default.

    -

    Attributes:

    -
      -
    • weight - fake quant module for weight
    • -
    -

    -

    from_float

    -
    @classmethod
    -def from_float(cls, mod)
    +

    helper function to construct dataloaders from NASGraphDataset

    +

    Parameters

    +

    dataset : NASGraphDataset

    +

    batch_size : int, optional + by default 50 +train_test_split : float, optional + number between 0 and 1, the proportion of the dataset to be used for training, by default 1 +subset : int, optional + choose only many samples from the dataset. Set 0 for disabling, i.e. whole dataset. by default 0 +seed : int, optional + set seed for reproduceability +validation : bool, optional + also output a validation set e.g. for hyperparam tuning

    +

    Returns

    +

    tuple(GraphDataLoader, (GraphDataLoader), GraphDataLoader) + training dataloader to be used in CostPredictor.train() and test/validation dataloader if train_test_split > 0, + else len(test_dataloader) == 0

    +

    +

    hannah.nas.performance_prediction.gcn.model

    +

    +

    hannah.nas.performance_prediction.examples.gaussian_process_predictor_example

    +

    +

    hannah.nas.performance_prediction.examples.gcn_predictor_example

    +

    +

    hannah.nas.performance_prediction.examples.ri_capsule_performance_predictor

    +

    +

    hannah.nas.performance_prediction.examples.xgb_predictor_example

    +

    +

    hannah.nas.performance_prediction.examples.gcn_model_example

    +

    +

    hannah.nas.performance_prediction.simple

    +

    +

    MACPredictor Objects

    +
    class MACPredictor()
     
    -

    Create a qat module from a float module or qparams_dict

    -

    Args: mod a float module, either produced by torch.quantization utilities -or directly from user

    -

    -

    Identity Objects

    -
    class Identity(nn.Identity)
    +

    A predictor class that instantiates the model and calculates abstract metrics

    +

    +

    GCNPredictor Objects

    +
    class GCNPredictor()
     
    -

    A identity module attached with FakeQuantize modules for weight, -used for quantization aware training.

    -

    We adopt the same interface as torch.nn.Identity, please see -https://pytorch.org/docs/stable/nn.html#torch.nn.Identity -for documentation.

    -

    Similar to torch.nn.Identity, with FakeQuantize modules initialized to -default.

    -

    -

    from_float

    -
    @classmethod
    -def from_float(cls, mod)
    +

    A predictor class that instantiates the model and uses the backends predict function to predict performance metrics

    +

    +

    hannah.nas.__main__

    +

    +

    hannah.nas.export

    +

    +

    hannah.nas.export.onnx

    +

    +

    eval

    +
    def eval(exp_tree: Any) -> Any
     
    -

    Create a qat module from a float module or qparams_dict

    -

    Args: mod a float module, either produced by torch.quantization utilities -or directly from user

    -

    -

    hannah.nn.quantized

    +

    Recursively evaluate expressions organized as a pytree