hannah.conf.optimizer
- -SGDConf Objects
-@dataclass
-class SGDConf()
-
lr
-_RequiredParameter
- -MADGRADConf Objects
-@dataclass
-class MADGRADConf()
-
lr
-_RequiredParameter
- -hannah.conf
- -hannah.conf.scheduler
- -OneCycleLRConf Objects
-@dataclass
-class OneCycleLRConf()
-
Config for one cycle lr total steps are configured from module
- -hannah.conf.nas
- -hannah.models.honk
- -hannah.models.honk.model
- -truncated_normal
-def truncated_normal(tensor, std_dev=0.01)
+
+hannah.logo
+
+hannah.test_linear_classifier
+
+hannah.tools.objectdetection_eval
+
+eval_train
+def eval_train(config, module, test=True)
Arguments:
-tensor:
-- std_dev
- (Default value = 0.01)
-
-SpeechResModel Objects
-class SpeechResModel(nn.Module)
+
+config
- param module:
+test
- Default value = True)
+ module:
+
+
+eval_steps
+def eval_steps(config, module, hparams, checkpoint)
-
-forward
-def forward(x)
+Arguments:
+
+config
- param module:
+hparams
- param checkpoint:
+ module:
+ checkpoint:
+
+
+eval_checkpoint
+def eval_checkpoint(config: DictConfig, checkpoint)
Arguments:
-x:
-
-SpeechModel Objects
-class SpeechModel(nn.Module)
+
+config
- DictConfig:
+ checkpoint:
+config
- DictConfig:
+config
- DictConfig:
+
+
+eval
+def eval(config: DictConfig)
-
-forward
-def forward(x)
+Arguments:
+
+config
- DictConfig:
+config
- DictConfig:
+config
- DictConfig:
+
+
+main
+@hydra.main(config_name="objectdetection_eval",
+ config_path="../conf",
+ version_base="1.2")
+def main(config: DictConfig)
Arguments:
-x:
-
-hannah.models.functional_net_test.expressions
-
-padding_expression
-def padding_expression(kernel_size, stride, dilation=1)
+
+config
- DictConfig:
+config
- DictConfig:
+config
- DictConfig:
+
+
+hannah.tools.characterize
+
+main
+@hydra.main(config_name="characterize",
+ config_path="../conf",
+ version_base="1.2")
+def main(config: DictConfig)
-Symbolically calculate padding such that for a given kernel_size, stride and dilation
-the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2).
-Note: If the input dimension is 1 and stride = 2, the calculated padding will result in
-an output with also dimension 1.
-Parameters
-kernel_size : Union[int, Expression]
-stride : Union[int, Expression]
-dilation : Union[int, Expression], optional
-description, by default 1
-Returns
-Expression
-
-hannah.models.functional_net_test.models
-
-hannah.models.vad
-
-hannah.models.vad.models
-
-BottleneckVad Objects
-class BottleneckVad(nn.Module)
+Arguments:
+
+config
- DictConfig:
+config
- DictConfig:
+config
- DictConfig:
+
+
+hannah.tools.train
+
+hannah.tools
+
+hannah.tools.eval
+
+eval_checkpoint
+def eval_checkpoint(config: DictConfig, checkpoint) -> None
-
-forward
-def forward(x)
+Arguments:
+
+config
- DictConfig:
+ checkpoint:
+config
- DictConfig:
+config
- DictConfig:
+
+
+eval
+def eval(config: DictConfig) -> Optional[bool]
Arguments:
-x:
-
-num_flat_features
-def num_flat_features(x)
+
+config
- DictConfig:
+config
- DictConfig:
+config
- DictConfig:
+
+
+main
+@hydra.main(config_name="eval", config_path="conf", version_base="1.2")
+def main(config: DictConfig)
Arguments:
-x:
-
-SmallVad Objects
-class SmallVad(nn.Module)
+
+config
- DictConfig:
+config
- DictConfig:
+config
- DictConfig:
+
+
+hannah.utils.utils
+
+log_execution_env_state
+def log_execution_env_state() -> None
-
-forward
-def forward(x)
+Log information about the execution environment.
+
+git_version
+def git_version(short=True)
+Return the current git sha
Arguments:
-x:
-
-num_flat_features
-def num_flat_features(x)
+
+short
bool - If True, return the short (7 character) version of the SHA
+
+Returns:
+
+str
- The current git SHA
+
+
+extract_from_download_cache
+def extract_from_download_cache(filename,
+ url,
+ cached_files,
+ target_cache,
+ target_folder,
+ target_test_folder="",
+ clear_download=False,
+ no_exist_check=False) -> None
+extracts given file from cache or donwloads first from url
Arguments:
-x:
-
-SimpleVad Objects
-class SimpleVad(nn.Module)
+
+filename
str - name of the file to download or extract
+url
str - possible url to download the file
+ cached_files (list(str)): cached files in download cache
+target_cache
str - path to the folder to cache file if download necessary
+target_folder
str - path where to extract file
+target_test_folder
str, optional - folder to check if data are already there
+clear_download
bool - clear download after usage
+no_exist_check
bool - disables the check if folder exists
+
+
+fullname
+def fullname(o) -> Any
-
-forward
-def forward(x)
+Get the full classname of an object including surrounding packages/modules/namespaces
+
+set_deterministic
+@contextmanager
+def set_deterministic(mode, warn_only=False)
-Arguments:
-x:
-
-num_flat_features
-def num_flat_features(x)
+A contextmanager to set deterministic algorithms
+
+hannah.utils
+
+hannah.utils.imports
+
+lazy_import
+def lazy_import(module_name, callback=None)
+Returns a proxy module object that will lazily import the given module the first time it is used.
+Example usage::
+Lazy version of import tensorflow as tf
+tf = lazy_import("tensorflow")
+Other commands
+Now the module is loaded
+tf.version
Arguments:
-x:
-
-BottleneckVadModel Objects
-class BottleneckVadModel(nn.Module)
+
+module_name
- the fully-qualified module name to import
+callback
None - a callback function to call before importing the
+ module
+
+Returns:
+a proxy module object that will be lazily imported when first used
+
+LazyModule Objects
+class LazyModule(types.ModuleType)
-
-forward
-def forward(x)
+Proxy module that lazily imports the underlying module the first time it is actually used.
+Arguments:
+
+module_name
- the fully-qualified module name to import
+callback
None - a callback function to call before importing the
+ module
+
+
+hannah.utils.tuple
+
+hannah.utils.logger
+
+JSONLogger Objects
+class JSONLogger(Logger)
-Arguments:
-x:
-
-SimpleVadModel Objects
-class SimpleVadModel(nn.Module)
+
+name
+@property
+def name() -> str
-
-forward
-def forward(x)
+Gets the name of the experiment.
+Returns:
+The name of the experiment.
+
+version
+@property
+def version() -> Union[int, str]
-Arguments:
-x:
-
-SmallVadModel Objects
-class SmallVadModel(nn.Module)
+Gets the version of the experiment.
+Returns:
+The version of the experiment if it is specified, else the next version.
+
+root_dir
+@property
+def root_dir() -> str
-
-forward
-def forward(x)
+Gets the save directory where the versioned JSON experiments are saved.
+
+log_dir
+@property
+def log_dir() -> str
-Arguments:
-x:
-
-hannah.models
-
-hannah.models.simple1d
-
-hannah.models.kakao_resnet
-
-hannah.models.lstm
-
-hannah.models.lstm.models
-
-LSTMModel Objects
-class LSTMModel(nn.Module)
+The log directory for this run.
+By default, it is named 'version_${self.version}'
but it can be overridden by passing a string value for the
+constructor's version parameter instead of None
or an int.
+
+experiment
+@property
+@rank_zero_experiment
+def experiment() -> "_ExperimentWriter"
-Simple LSTM model.
-
-forward
-def forward(x)
+Actual ExperimentWriter object. To use ExperimentWriter features anywhere in your code, do the following.
+Example::
+self.logger.experiment.some_experiment_writer_function()
+
+_ExperimentWriter Objects
+class _ExperimentWriter()
+Experiment writer for CSVLogger.
Arguments:
-x:
-
-hannah.models._vendor
-
-hannah.models._vendor.focalnet
-
-Mlp Objects
-class Mlp(nn.Module)
+
+log_dir
- Directory for the experiment logs
+
+
+log_metrics
+def log_metrics(metrics_dict: Dict[str, float],
+ step: Optional[int] = None) -> None
-
-forward
-def forward(x)
+Record metrics.
+
+save
+def save() -> None
-Arguments:
-x:
-
-FocalModulation Objects
-class FocalModulation(nn.Module)
+Save recorded metrics into files.
+
+hannah.utils.dvclive
+
+hannah.modules.metrics
+
+Error Objects
+class Error()
-
-forward
-def forward(x)
+Computes Error
= 1 - Accuracy_
+.. math::
+ \text{Error} = 1 - \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
+Where :math:y
is a tensor of target values, and :math:\hat{y}
is a tensor of predictions.
+This module is a simple wrapper to get the task specific versions of this metric, which is done by setting the
+task
argument to either 'binary'
, 'multiclass'
or multilabel
. See the documentation of
+:mod:BinaryError
, :mod:MulticlassError
and :mod:MultilabelError
for the specific details of
+each argument influence and examples.
+
+plot_confusion_matrix
+def plot_confusion_matrix(cf,
+ group_names=None,
+ categories="auto",
+ count=True,
+ percent=True,
+ cbar=True,
+ xyticks=True,
+ xyplotlabels=True,
+ sum_stats=True,
+ figsize=None,
+ cmap="Blues",
+ title=None)
-Arguments:
-
-x
- input features with shape of (B, H, W, C)
-
-
-extra_repr
-def extra_repr() -> str
+This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization.
+Arguments
+cf: confusion matrix to be passed in
+group_names: List of strings that represent the labels row by row to be shown in each square.
+categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'
+count: If True, show the raw number in the confusion matrix. Default is True.
+normalize: If True, show the proportions for each category. Default is True.
+cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix.
+ Default is True.
+xyticks: If True, show x and y ticks. Default is True.
+xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.
+sum_stats: If True, display summary statistics below the figure. Default is True.
+figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.
+cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues'
+ See http://matplotlib.org/examples/color/colormaps_reference.html
+title: Title for the heatmap. Default is None.
+
+hannah.modules.classifier
+
+hannah.modules
+
+hannah.modules.augmentation.bordersearch
+
+hannah.modules.augmentation
+
+hannah.modules.augmentation.transforms.kornia_transforms
+
+hannah.modules.augmentation.transforms
+
+hannah.modules.augmentation.transforms.registry
+
+hannah.modules.augmentation.batch_augmentation
+
+BatchAugmentationPipeline Objects
+class BatchAugmentationPipeline(nn.Module)
-
-flops
-def flops(N)
+
+__init__
+def __init__(transforms={})
-calculate flops for 1 window with token length of N
+Augmentation pipeline especially for self supervised learning
Arguments:
-N:
-
-FocalNetBlock Objects
-class FocalNetBlock(nn.Module)
+
+replica
int - number of replicated different random augmentations
+transforms
dict - configuration of transforms
+
+
+forward
+@torch.no_grad()
+def forward(x) -> torch.Tensor
-Focal Modulation Network Block.
+Perform Augmentations
Arguments:
-dim(int)
- Number of input channels.
-input_resolution(tuple[int])
- Input resulotion.
-mlp_ratio(float)
- Ratio of mlp hidden dim to embedding dim.
-drop(float)
- Dropout rate. Default: 0.0
-drop_path(float)
- Stochastic depth rate. Default: 0.0
-act_layer(nn.Module)
- Activation layer. Default: nn.GELU
-norm_layer(nn.Module)
- Normalization layer. Default: nn.LayerNorm
-focal_level(int)
- Number of focal levels.
-focal_window(int)
- Focal window size at first focal level
-use_layerscale(bool)
- Whether use layerscale
-layerscale_value(float)
- Initial layerscale value
-use_postln(bool)
- Whether use layernorm after modulation
+x
torch.Tensor - a torch.Tensor representing the augementation pipeline
-
-forward
-def forward(x)
-
-
-Arguments:
-x:
-
-extra_repr
-def extra_repr() -> str
-
-
-
-flops
-def flops()
+Returns:
+Tuple[torch.Tensor, torch.Tensor]; Batch augmented with replica
different random augmentations
+
+hannah.modules.augmentation.augmentation
+
+hannah.modules.config_utils
+
+dump_config
+def dump_config(output_dir, config)
-
-BasicLayer Objects
-class BasicLayer(nn.Module)
+Dumps the configuration to json format
+Creates file config.json in output_dir
+Parameters
+output_dir : str
+ Output directory
+config : dict
+ Configuration to dump
+
+save_model
+def save_model(output_dir, model)
-A basic Focal Transformer layer for one stage.
-Arguments:
+Creates serialization of the model for later inference, evaluation
+Creates the following files:
-dim(int)
- Number of input channels.
-input_resolution(tuple[int])
- Input resolution.
-depth(int)
- Number of blocks.
-window_size(int)
- Local window size.
-mlp_ratio(float)
- Ratio of mlp hidden dim to embedding dim.
-qkv_bias(bool)
- If True, add a learnable bias to query, key, value. Default: True
- qk_scale(float | None): Override default qk scale of head_dim ** -0.5 if set.
-drop(float)
- Dropout rate. Default: 0.0
- drop_path(float | tuple[float]): Stochastic depth rate. Default: 0.0
-norm_layer(nn.Module)
- Normalization layer. Default: nn.LayerNorm
- downsample(nn.Module | None): Downsample layer at the end of the layer. Default: None
-use_checkpoint(bool)
- Whether to use checkpointing to save memory. Default: False.
-focal_level(int)
- Number of focal levels
-focal_window(int)
- Focal window size at first focal level
-use_layerscale(bool)
- Whether use layerscale
-layerscale_value(float)
- Initial layerscale value
-use_postln(bool)
- Whether use layernorm after modulation
+- model.pt: Serialized version of network parameters in pytorch
+- model.json: Serialized version of network parameters in json format
+- model.onnx: full model including paramters in onnx format
-
-forward
-def forward(x, H, W)
-
-
-Arguments:
-x:
- H:
- W:
-
-extra_repr
-def extra_repr() -> str
+Parameters
+output_dir : str
+ Directory to put serialized models
+model : LightningModule
+ Model to serialize
+
+hannah.modules.base
+
+ClassifierModule Objects
+class ClassifierModule(LightningModule, ABC)
-
-flops
-def flops()
+
+total_training_steps
+def total_training_steps() -> int
-
-PatchEmbed Objects
-class PatchEmbed(nn.Module)
+Total training steps inferred from datamodule and devices.
+
+hannah.modules.angle_classifier
+
+hannah.modules.object_detection
+
+hannah.modules.vision.anomaly_detection
+
+AnomalyDetectionModule Objects
+class AnomalyDetectionModule(VisionBaseModule)
-Image to Patch Embedding
-Arguments:
-
-img_size(int)
- Image size. Default: 224.
-patch_size(int)
- Patch token size. Default: 4.
-in_chans(int)
- Number of input image channels. Default: 3.
-embed_dim(int)
- Number of linear projection output channels. Default: 96.
-norm_layer(nn.Module)
- Normalization layer. Default: None
-
-
-forward
-def forward(x)
+
+on_test_end
+def on_test_end()
-Arguments:
-x:
-
-flops
-def flops()
+wd_dir = os.getcwd()
+score, largest_train_error = self.compute_anomaly_score()
+train_errors = self.normalized_train_errors
+plt.hist(train_errors.detach().cpu().numpy(), bins=100)
+plt.axvline(score, linestyle="dashed")
+plt.title("Normalized train reconstruction errors")
+plt.savefig(wd_dir + "/normalized_train_errors.png")
+test = (
+ torch.tensor(self.test_losses, device=self.device)
+ / torch.max(torch.stack(self.train_losses), dim=0).values
+)
+plt.hist(test.detach().cpu().numpy(), bins=100)
+plt.title("Normalized test reconstruction errors")
+plt.savefig(wd_dir + "/normalized_test_errors.png")
+print("Anomaly score", score)
+print(
+ "Largest train error",
+ torch.max(torch.stack(self.train_losses), dim=0).values,
+)
+
+hannah.modules.vision.anomaly_score
+class AnomalyScore(CatMetric):
+def init(self, percentile, nan_strategy="warn", kwargs):
+ super().init(nan_strategy=nan_strategy, kwargs)
+ self.percentile = percentile
+def compute(self):
+ anomaly_score = None
+ train_losses = super().compute()
+ if train_losses:
+ normalized_train_errors = torch.stack(train_losses) / (
+ torch.max(torch.stack(train_losses), dim=0).values
+ )
+ anomaly_score = np.percentile(
+ normalized_train_errors.cpu().numpy(), self.percentile
+ )
+ return anomaly_score
+
+hannah.modules.vision
+
+hannah.modules.vision.loss
+
+hannah.modules.vision.base
+
+hannah.modules.vision.image_classifier
+
+hannah.visualization
+
+hannah.train
+
+hannah.trainer.cross_validation
+
+hannah.trainer
+
+hannah.nn.quantized
+
+hannah.nn.qat
+Implementations of torch.nn.intrinsics qat with an optional
+quantize bias parameter.
+Qconfigs can support an optional bias quantization funciton which should be returned by
+ qconfig.bias()
else biases will be quantized with qconfig.activation()
+
+_ConvBnNd Objects
+class _ConvBnNd(nn.modules.conv._ConvNd, _ConvForwardMixin)
-
-FocalNet Objects
-class FocalNet(nn.Module)
+
+train
+def train(mode: bool = True) -> Any
-Focal Modulation Networks (FocalNets)
-Arguments:
-img_size(int | tuple(int): Input image size. Default 224
- patch_size(int | tuple(int): Patch size. Default: 4
-- in_chans(int)
- Number of input image channels. Default: 3
-- num_classes(int)
- Number of classes for classification head. Default: 1000
-- embed_dim(int)
- Patch embedding dimension. Default: 96
-- depths(tuple(int)
- Depth of each Focal Transformer layer.
-- mlp_ratio(float)
- Ratio of mlp hidden dim to embedding dim. Default: 4
-- drop_rate(float)
- Dropout rate. Default: 0
-- drop_path_rate(float)
- Stochastic depth rate. Default: 0.1
-- norm_layer(nn.Module)
- Normalization layer. Default: nn.LayerNorm.
-- patch_norm(bool)
- If True, add normalization after patch embedding. Default: True
-- use_checkpoint(bool)
- Whether to use checkpointing to save memory. Default: False
-- focal_levels(list)
- How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1]
-- focal_windows(list)
- The focal window size at all stages. Default: [7, 5, 3, 1]
-- use_conv_embed(bool)
- Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False
-- use_layerscale(bool)
- Whether use layerscale proposed in CaiT. Default: False
-- layerscale_value(float)
- Value for layer scale. Default: 1e-4
-- use_postln(bool)
- Whether use layernorm after modulation (it helps stablize training of large models)
-
-no_weight_decay
-@torch.jit.ignore
-def no_weight_decay()
+Batchnorm's training behavior is using the self.training flag. Prevent
+changing it if BN is frozen. This makes sure that calling model.train()
+on a model with a frozen BN will behave properly.
+
+from_float
+@classmethod
+def from_float(cls, mod)
-
-no_weight_decay_keywords
-@torch.jit.ignore
-def no_weight_decay_keywords()
+Create a qat module from a float module or qparams_dict
+Args: mod
a float module, either produced by torch.quantization utilities
+or directly from user
+
+ConvBn1d Objects
+class ConvBn1d(_ConvBnNd)
-
-forward_features
-def forward_features(x)
+A ConvBn1d module is a module fused from Conv1d and BatchNorm1d,
+attached with FakeQuantize modules for weight,
+used in quantization aware training.
+We combined the interface of :class:torch.nn.Conv1d
and
+:class:torch.nn.BatchNorm1d
.
+Similar to :class:torch.nn.Conv1d
, with FakeQuantize modules initialized
+to default.
+Attributes:
+freeze_bn:
+- weight_fake_quant
- fake quant module for weight
+
+ConvBnReLU1d Objects
+class ConvBnReLU1d(ConvBn1d)
-Arguments:
-x:
-
-forward
-def forward(x)
-
-
-Arguments:
-x:
-
-flops
-def flops()
-
-
-
-build_transforms
-def build_transforms(img_size, center_crop=False)
-
-
-Arguments:
-img_size:
-- center_crop
- (Default value = False)
-
-build_transforms4display
-def build_transforms4display(img_size, center_crop=False)
-
-
-Arguments:
-img_size:
-- center_crop
- (Default value = False)
-
-focalnet_tiny_srf
-@register_model
-def focalnet_tiny_srf(pretrained=False, **kwargs)
-
-
-Arguments:
+A ConvBnReLU1d module is a module fused from Conv1d, BatchNorm1d and ReLU,
+attached with FakeQuantize modules for weight,
+used in quantization aware training.
+We combined the interface of :class:torch.nn.Conv1d
and
+:class:torch.nn.BatchNorm1d
and :class:torch.nn.ReLU
.
+Similar to torch.nn.Conv1d
, with FakeQuantize modules initialized to
+default.
+Attributes:
-pretrained
- (Default value = False)
- **kwargs:
+weight_fake_quant
- fake quant module for weight
-
-focalnet_small_srf
-@register_model
-def focalnet_small_srf(pretrained=False, **kwargs)
+
+ConvBn2d Objects
+class ConvBn2d(_ConvBnNd)
-Arguments:
-
-pretrained
- (Default value = False)
- **kwargs:
-
-
-focalnet_base_srf
-@register_model
-def focalnet_base_srf(pretrained=False, **kwargs)
+A ConvBn2d module is a module fused from Conv2d and BatchNorm2d,
+attached with FakeQuantize modules for weight,
+used in quantization aware training.
+We combined the interface of :class:torch.nn.Conv2d
and
+:class:torch.nn.BatchNorm2d
.
+Similar to :class:torch.nn.Conv2d
, with FakeQuantize modules initialized
+to default.
+Attributes:
+freeze_bn:
+- weight_fake_quant
- fake quant module for weight
+
+ConvBnReLU2d Objects
+class ConvBnReLU2d(ConvBn2d)
-Arguments:
+A ConvBnReLU2d module is a module fused from Conv2d, BatchNorm2d and ReLU,
+attached with FakeQuantize modules for weight,
+used in quantization aware training.
+We combined the interface of :class:torch.nn.Conv2d
and
+:class:torch.nn.BatchNorm2d
and :class:torch.nn.ReLU
.
+Similar to torch.nn.Conv2d
, with FakeQuantize modules initialized to
+default.
+Attributes:
-pretrained
- (Default value = False)
- **kwargs:
+weight_fake_quant
- fake quant module for weight
-
-focalnet_tiny_lrf
-@register_model
-def focalnet_tiny_lrf(pretrained=False, **kwargs)
+
+ConvReLU2d Objects
+class ConvReLU2d(nn.Conv2d, _ConvForwardMixin)
-Arguments:
+A ConvReLU2d module is a fused module of Conv2d and ReLU, attached with
+FakeQuantize modules for weight for
+quantization aware training.
+We combined the interface of :class:~torch.nn.Conv2d
and
+:class:~torch.nn.BatchNorm2d
.
+Attributes:
-pretrained
- (Default value = False)
- **kwargs:
+weight_fake_quant
- fake quant module for weight
-
-focalnet_small_lrf
-@register_model
-def focalnet_small_lrf(pretrained=False, **kwargs)
+
+ConvReLU1d Objects
+class ConvReLU1d(nn.Conv1d, _ConvForwardMixin)
-Arguments:
-
-pretrained
- (Default value = False)
- **kwargs:
-
-
-focalnet_base_lrf
-@register_model
-def focalnet_base_lrf(pretrained=False, **kwargs)
+A ConvReLU1d module is fused module of Conv1d and ReLU, attached with
+FakeQuantize modules for quantization aware training
+
+Conv1d Objects
+class Conv1d(nn.Conv1d, _ConvForwardMixin)
-Arguments:
+A Conv1d module is a Conv1d module , attached with
+FakeQuantize modules for weight for
+quantization aware training.
+Attributes:
-pretrained
- (Default value = False)
- **kwargs:
+weight_fake_quant
- fake quant module for weight
+bias_fake_quant
- fake quant module for bias
+activation_post_process
- fake_quant_module for activations
-
-focalnet_tiny_iso_16
-@register_model
-def focalnet_tiny_iso_16(pretrained=False, **kwargs)
+
+Conv2d Objects
+class Conv2d(nn.Conv2d, _ConvForwardMixin)
-Arguments:
+A Conv2d module is a Conv2d module , attached with
+FakeQuantize modules for weight for
+quantization aware training.
+Attributes:
-pretrained
- (Default value = False)
- **kwargs:
+weight_fake_quant
- fake quant module for weight
+bias_fake_quant
- fake quant module for bias
+activation_post_process
- fake_quant_module for activations
-
-focalnet_small_iso_16
-@register_model
-def focalnet_small_iso_16(pretrained=False, **kwargs)
+
+Linear Objects
+class Linear(nn.Linear)
-Arguments:
+A linear module attached with FakeQuantize modules for weight,
+used for quantization aware training.
+We adopt the same interface as torch.nn.Linear
, please see
+https://pytorch.org/docs/stable/nn.html#torch.nn.Linear
+for documentation.
+Similar to torch.nn.Linear
, with FakeQuantize modules initialized to
+default.
+Attributes:
-pretrained
- (Default value = False)
- **kwargs:
+weight
- fake quant module for weight
-
-focalnet_base_iso_16
-@register_model
-def focalnet_base_iso_16(pretrained=False, **kwargs)
+
+from_float
+@classmethod
+def from_float(cls, mod)
-Arguments:
-
-pretrained
- (Default value = False)
- **kwargs:
-
-
-focalnet_large_fl3
-@register_model
-def focalnet_large_fl3(pretrained=False, **kwargs)
+Create a qat module from a float module or qparams_dict
+Args: mod
a float module, either produced by torch.quantization utilities
+or directly from user
+
+LinearReLU Objects
+class LinearReLU(nn.Linear)
-Arguments:
+A linear module attached with FakeQuantize modules and ReLU for weight,
+used for quantization aware training.
+We adopt the same interface as torch.nn.Linear
, please see
+https://pytorch.org/docs/stable/nn.html#torch.nn.Linear
+for documentation.
+Similar to torch.nn.Linear
, with FakeQuantize modules initialized to
+default.
+Attributes:
-pretrained
- (Default value = False)
- **kwargs:
+weight
- fake quant module for weight
-
-focalnet_large_fl4
-@register_model
-def focalnet_large_fl4(pretrained=False, **kwargs)
+
+from_float
+@classmethod
+def from_float(cls, mod)
-Arguments:
-
-pretrained
- (Default value = False)
- **kwargs:
-
-
-focalnet_xlarge_fl3
-@register_model
-def focalnet_xlarge_fl3(pretrained=False, **kwargs)
+Create a qat module from a float module or qparams_dict
+Args: mod
a float module, either produced by torch.quantization utilities
+or directly from user
+
+Identity Objects
+class Identity(nn.Identity)
-Arguments:
-
-pretrained
- (Default value = False)
- **kwargs:
-
-
-focalnet_xlarge_fl4
-@register_model
-def focalnet_xlarge_fl4(pretrained=False, **kwargs)
+A identity module attached with FakeQuantize modules for weight,
+used for quantization aware training.
+We adopt the same interface as torch.nn.Identity
, please see
+https://pytorch.org/docs/stable/nn.html#torch.nn.Identity
+for documentation.
+Similar to torch.nn.Identity
, with FakeQuantize modules initialized to
+default.
+
+from_float
+@classmethod
+def from_float(cls, mod)
-Arguments:
-
-pretrained
- (Default value = False)
- **kwargs:
-
-
-focalnet_huge_fl3
-@register_model
-def focalnet_huge_fl3(pretrained=False, **kwargs)
-
+Create a qat module from a float module or qparams_dict
+Args: mod
a float module, either produced by torch.quantization utilities
+or directly from user
+
+hannah.ssl.hard_labeling
+
+HardLabeling Objects
+class HardLabeling()
+
-Arguments:
-
-pretrained
- (Default value = False)
- **kwargs:
-
-
-focalnet_huge_fl4
-@register_model
-def focalnet_huge_fl4(pretrained=False, **kwargs)
+
+training_step
+def training_step(unlabeled_data: torch.Tensor,
+ trainer: pl.Trainer,
+ pl_module: pl.LightningModule,
+ batch_idx: int = -1) -> torch.Tensor
-Arguments:
-
-pretrained
- (Default value = False)
- **kwargs:
-
-
-hannah.models._vendor.resnet_mc_dropout
-PyTorch ResNet
-This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with
-additional dropout and dynamic global avg/max pool.
-ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman
-Copyright 2019, Ross Wightman
-
-ResNet Objects
-class ResNet(nn.Module)
+Calculate pseudo label loss from unlabeled data.
+
+get_dropout_layers
+def get_dropout_layers()
-ResNet / ResNeXt / SE-ResNeXt / SE-Net
-This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that
- * have > 1 stride in the 3x3 conv layer of bottleneck
- * have conv-bn-act ordering
-This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s
-variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the
-'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default.
-ResNet variants (the same modifications can be used in SE/ResNeXt models as well):
- * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b
- * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64)
- * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample
- * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample
- * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128)
- * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample
- * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample
-ResNeXt
- * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths
- * same c,d, e, s variants as ResNet can be enabled
-SE-ResNeXt
- * normal - 7x7 stem, stem_width = 64
- * same c, d, e, s variants as ResNet can be enabled
-SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64,
- reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block
-
-__init__
-def __init__(block,
- layers,
- num_classes=1000,
- in_chans=3,
- output_stride=32,
- global_pool="avg",
- cardinality=1,
- base_width=64,
- stem_width=64,
- stem_type="",
- replace_stem_pool=False,
- block_reduce_first=1,
- down_kernel_size=1,
- avg_down=False,
- act_layer=nn.ReLU,
- norm_layer=nn.BatchNorm2d,
- aa_layer=None,
- drop_rate=0.0,
- drop_path_rate=0.0,
- drop_block_rate=0.0,
- zero_init_last=True,
- block_args=None)
+Returns all model layers of class dropout or dropblock.
+
+compute_loss
+def compute_loss(inputs, logits, targets, loss_fn=None)
-Arguments:
-
-block
nn.Module - class for the residual block. Options are BasicBlock, Bottleneck.
- layers (List[int]) : number of layers in each block
-num_classes
int - number of classification classes (default 1000)
-in_chans
int - number of input (color) channels. (default 3)
-output_stride
int - output stride of the network, 32, 16, or 8. (default 32)
-global_pool
str - Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' (default 'avg')
-cardinality
int - number of convolution groups for 3x3 conv in Bottleneck. (default 1)
-base_width
int - bottleneck channels factor. planes * base_width / 64 * cardinality
(default 64)
-stem_width
int - number of channels in stem convolutions (default 64)
-stem_type
str - The type of stem (default ''):
-- '', default - a single 7x7 conv with a width of stem_width
-- 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2
-- 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2
-replace_stem_pool
bool - replace stem max-pooling layer with a 3x3 stride-2 convolution
-block_reduce_first
int - Reduction factor for first convolution output width of residual blocks,
- 1 for all archs except senets, where 2 (default 1)
-down_kernel_size
int - kernel size of residual block downsample path,
- 1x1 for most, 3x3 for senets (default: 1)
-avg_down
bool - use avg pooling for projection skip connection between stages/downsample (default False)
-act_layer
str, nn.Module - activation layer
-norm_layer
str, nn.Module - normalization layer
-aa_layer
nn.Module - anti-aliasing layer
-drop_rate
float - Dropout probability before classifier, for training (default 0.)
-drop_path_rate
float - Stochastic depth drop-path rate (default 0.)
-drop_block_rate
float - Drop block rate (default 0.)
-zero_init_last
bool - zero-init the last weight in residual path (usually last BN affine weight)
-block_args
dict - Extra kwargs to pass through to block module
-
-
-resnet10t_mc_dropout
-@register_model
-def resnet10t_mc_dropout(pretrained=False, **kwargs)
+Helper function to compute loss, possibly with consistency
+regularization by augmentations (FixMatch).
+
+negative_cre_loss
+def negative_cre_loss(logits, targets)
-Constructs a ResNet-10-T model.
-
-resnet14t_mc_dropout
-@register_model
-def resnet14t_mc_dropout(pretrained=False, **kwargs)
+Cross Entropy Loss for negative learning which requires a mutli-
+class and multi-label loss function.
+
+hannah.ssl
+
+hannah.models.convnet
+
+hannah.models.convnet.models
+
+padding_expression
+def padding_expression(kernel_size, stride, dilation=1)
-Constructs a ResNet-14-T model.
-
-resnet18_mc_dropout
-@register_model
-def resnet18_mc_dropout(pretrained=False, **kwargs)
+Symbolically calculate padding such that for a given kernel_size, stride and dilation
+the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2).
+Note: If the input dimension is 1 and stride = 2, the calculated padding will result in
+an output with also dimension 1.
+Parameters
+kernel_size : Union[int, Expression]
+stride : Union[int, Expression]
+dilation : Union[int, Expression], optional
+description, by default 1
+Returns
+Expression
+
+hannah.models.functional_net_test.expressions
+
+padding_expression
+def padding_expression(kernel_size, stride, dilation=1)
-Constructs a ResNet-18 model.
-
-resnet18d_mc_dropout
-@register_model
-def resnet18d_mc_dropout(pretrained=False, **kwargs)
+Symbolically calculate padding such that for a given kernel_size, stride and dilation
+the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2).
+Note: If the input dimension is 1 and stride = 2, the calculated padding will result in
+an output with also dimension 1.
+Parameters
+kernel_size : Union[int, Expression]
+stride : Union[int, Expression]
+dilation : Union[int, Expression], optional
+description, by default 1
+Returns
+Expression
+
+hannah.models.functional_net_test.models
+
+hannah.models.ai8x
+
+hannah.models.ai8x.models
+A search space based on the cifar 10 NASNet search space for ai85x devices from: htt
+
+hannah.models.mobilenet.operators
+
+hannah.models.mobilenet.expressions
+
+padding_expression
+def padding_expression(kernel_size, stride, dilation=1)
-Constructs a ResNet-18-D model.
-
-resnet34_mc_dropout
-@register_model
-def resnet34_mc_dropout(pretrained=False, **kwargs)
+Symbolically calculate padding such that for a given kernel_size, stride and dilation
+the padding is such that the output dimension is kept the same(stride=1) or halved(stride=2).
+Note: If the input dimension is 1 and stride = 2, the calculated padding will result in
+an output with also dimension 1.
+Parameters
+kernel_size : Union[int, Expression]
+stride : Union[int, Expression]
+dilation : Union[int, Expression], optional
+description, by default 1
+Returns
+Expression
+
+hannah.models.mobilenet.models
+
+hannah.models.ekut
+
+hannah.models.ekut.models
+
+conv_bn
+def conv_bn(inp, oup, stride)
-Constructs a ResNet-34 model.
-
-resnet34d_mc_dropout
-@register_model
-def resnet34d_mc_dropout(pretrained=False, **kwargs)
+Arguments:
+inp:
+ oup:
+ stride:
+
+conv_1x1_bn
+def conv_1x1_bn(inp, oup)
-Constructs a ResNet-34-D model.
-
-resnet26_mc_dropout
-@register_model
-def resnet26_mc_dropout(pretrained=False, **kwargs)
+Arguments:
+inp:
+ oup:
+
+InvertedResidual Objects
+class InvertedResidual(nn.Module)
-Constructs a ResNet-26 model.
-
-resnet26t_mc_dropout
-@register_model
-def resnet26t_mc_dropout(pretrained=False, **kwargs)
+
+forward
+def forward(x)
-Constructs a ResNet-26-T model.
-
-resnet26d_mc_dropout
-@register_model
-def resnet26d_mc_dropout(pretrained=False, **kwargs)
-
-
-Constructs a ResNet-26-D model.
-
-resnet50_mc_dropout
-@register_model
-def resnet50_mc_dropout(pretrained=False, **kwargs)
-
-
-Constructs a ResNet-50 model.
-
-resnet50d_mc_dropout
-@register_model
-def resnet50d_mc_dropout(pretrained=False, **kwargs) -> ResNet
-
-
-Constructs a ResNet-50-D model.
-
-resnet50t_mc_dropout
-@register_model
-def resnet50t_mc_dropout(pretrained=False, **kwargs)
-
-
-Constructs a ResNet-50-T model.
-
-resnet101_mc_dropout
-@register_model
-def resnet101_mc_dropout(pretrained=False, **kwargs)
-
-
-Constructs a ResNet-101 model.
-
-resnet101d_mc_dropout
-@register_model
-def resnet101d_mc_dropout(pretrained=False, **kwargs)
-
-
-Constructs a ResNet-101-D model.
-
-resnet152_mc_dropout
-@register_model
-def resnet152_mc_dropout(pretrained=False, **kwargs)
+Arguments:
+x:
+
+RawSpeechModel Objects
+class RawSpeechModel(nn.Module)
-Constructs a ResNet-152 model.
-
-resnet152d_mc_dropout
-@register_model
-def resnet152d_mc_dropout(pretrained=False, **kwargs)
+Speech Recognition on RAW Data using Wolfgang Fuhls Networks
+
+forward
+def forward(x)
-Constructs a ResNet-152-D model.
-
-resnet200_mc_dropout
-@register_model
-def resnet200_mc_dropout(pretrained=False, **kwargs)
+Arguments:
+x:
+
+RawSpeechModelInvertedResidual Objects
+class RawSpeechModelInvertedResidual(nn.Module)
-Constructs a ResNet-200 model.
-
-resnet200d_mc_dropout
-@register_model
-def resnet200d_mc_dropout(pretrained=False, **kwargs)
+
+forward
+def forward(x)
-Constructs a ResNet-200-D model.
-
-tv_resnet34_mc_dropout
-@register_model
-def tv_resnet34_mc_dropout(pretrained=False, **kwargs)
+Arguments:
+x:
+
+hannah.models.utils
+
+next_power_of2
+def next_power_of2(x)
-Constructs a ResNet-34 model with original Torchvision weights.
-
-tv_resnet50_mc_dropout
-@register_model
-def tv_resnet50_mc_dropout(pretrained=False, **kwargs)
+Arguments:
+x:
+
+hannah.models
+
+hannah.models.factory.quantized
+Import from new loacation for backwards compatibility
+
+hannah.models.factory
+
+hannah.models.factory.network
+
+ConvNet Objects
+class ConvNet(nn.Module)
-Constructs a ResNet-50 model with original Torchvision weights.
-
-tv_resnet101_mc_dropout
-@register_model
-def tv_resnet101_mc_dropout(pretrained=False, **kwargs)
+
+forward
+def forward(x)
-Constructs a ResNet-101 model w/ Torchvision pretrained weights.
-
-tv_resnet152_mc_dropout
-@register_model
-def tv_resnet152_mc_dropout(pretrained=False, **kwargs)
+Arguments:
+x:
+
+hannah.models.factory.reduction
+
+ReductionBlockAdd Objects
+class ReductionBlockAdd(nn.Module)
-Constructs a ResNet-152 model w/ Torchvision pretrained weights.
-
-wide_resnet50_2_mc_dropout
-@register_model
-def wide_resnet50_2_mc_dropout(pretrained=False, **kwargs)
+Reduction block that sums over its inputs
+
+forward
+def forward(x: Tensor) -> Tensor
-Constructs a Wide ResNet-50-2 model.
-The model is the same as ResNet except for the bottleneck number of channels
-which is twice larger in every block. The number of channels in outer 1x1
-convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
-channels, and in Wide ResNet-50-2 has 2048-1024-2048.
-
-wide_resnet101_2_mc_dropout
-@register_model
-def wide_resnet101_2_mc_dropout(pretrained=False, **kwargs)
+Arguments:
+
+x
- Tensor:
+x
- Tensor:
+
+
+ReductionBlockConcat Objects
+class ReductionBlockConcat(nn.Module)
-Constructs a Wide ResNet-101-2 model.
-The model is the same as ResNet except for the bottleneck number of channels
-which is twice larger in every block. The number of channels in outer 1x1
-convolutions is the same.
-
-resnet50_gn_mc_dropout
-@register_model
-def resnet50_gn_mc_dropout(pretrained=False, **kwargs)
+Reduction block that concatenates its inputs
+
+forward
+def forward(x: Tensor) -> Tensor
-Constructs a ResNet-50 model w/ GroupNorm
-
-resnext50_32x4d_mc_dropout
-@register_model
-def resnext50_32x4d_mc_dropout(pretrained=False, **kwargs)
+Arguments:
+
+x
- Tensor:
+x
- Tensor:
+
+
+hannah.models.factory.qat
+Import from new loacation for backwards compatibility
+
+hannah.models.factory.qconfig
+Import from new loacation for backwards compatibility
+
+hannah.models.factory.act
+
+DummyActivation Objects
+class DummyActivation(nn.Identity)
-Constructs a ResNeXt50-32x4d model.
-
-resnext50d_32x4d_mc_dropout
-@register_model
-def resnext50d_32x4d_mc_dropout(pretrained=False, **kwargs)
+Dummy class that instantiated to mark a missing activation.
+This can be used to mark requantization of activations for convolutional layers without
+activation functions.
+Arguments:
+
+hannah.models.factory.factory
+A neural network model factory
+It allows us to construct quantized and unquantized versions of the same network,
+allows to explore implementation alternatives using a common neural network construction
+interface.
+
+NormConfig Objects
+@dataclass
+class NormConfig()
-Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample
-
-resnext101_32x4d_mc_dropout
-@register_model
-def resnext101_32x4d_mc_dropout(pretrained=False, **kwargs)
+
+BNConfig Objects
+@dataclass
+class BNConfig(NormConfig)
-Constructs a ResNeXt-101 32x4d model.
-
-resnext101_32x8d_mc_dropout
-@register_model
-def resnext101_32x8d_mc_dropout(pretrained=False, **kwargs)
+
+ActConfig Objects
+@dataclass
+class ActConfig()
-Constructs a ResNeXt-101 32x8d model.
-
-resnext101_64x4d_mc_dropout
-@register_model
-def resnext101_64x4d_mc_dropout(pretrained=False, **kwargs)
+
+ELUConfig Objects
+@dataclass
+class ELUConfig(ActConfig)
-Constructs a ResNeXt101-64x4d model.
-
-tv_resnext50_32x4d_mc_dropout
-@register_model
-def tv_resnext50_32x4d_mc_dropout(pretrained=False, **kwargs)
+
+HardtanhConfig Objects
+@dataclass
+class HardtanhConfig(ActConfig)
-Constructs a ResNeXt50-32x4d model with original Torchvision weights.
-
-ig_resnext101_32x8d_mc_dropout
-@register_model
-def ig_resnext101_32x8d_mc_dropout(pretrained=False, **kwargs)
+
+MinorBlockConfig Objects
+@dataclass
+class MinorBlockConfig()
-Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data
-and finetuned on ImageNet from Figure 5 in
-"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>
_
-Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
-
-ig_resnext101_32x16d_mc_dropout
-@register_model
-def ig_resnext101_32x16d_mc_dropout(pretrained=False, **kwargs)
+
+target
+target Operation
+
+parallel
+execute block in parallel with preceding block
+
+out_channels
+number of output channels
+
+kernel_size
+kernel size of this Operation (if applicable)
+
+stride
+stride for this operation use
+
+padding
+use padding for this operation (padding will always try to keep input dimensions / stride)
+
+dilation
+dilation factor to use for this operation
+
+groups
+number of groups for this operation
+
+norm
+normalization to use (true uses networks default configs)
+
+act
+activation to use (true uses default configs)
+
+upsampling
+Upsampling factor for mbconv layers
+
+bias
+use bias for this operation
+
+out_quant
+use output quantization for this operation
+
+MajorBlockConfig Objects
+@dataclass
+class MajorBlockConfig()
-Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data
-and finetuned on ImageNet from Figure 5 in
-"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>
_
-Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
-
-ig_resnext101_32x32d_mc_dropout
-@register_model
-def ig_resnext101_32x32d_mc_dropout(pretrained=False, **kwargs)
+