Skip to content

Commit

Permalink
Add hardsigmoid & hardswish
Browse files Browse the repository at this point in the history
  • Loading branch information
elad-c committed Nov 19, 2024
1 parent 3d86447 commit e1c1459
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@
OPSET_SIGMOID = "Sigmoid"
OPSET_TANH = "Tanh"
OPSET_GELU = "Gelu"
OPSET_HARDSIGMOID = "HardSigmoid"
OPSET_HARDSWISH = "HardSwish"


def get_tp_model() -> TargetPlatformModel:
Expand Down Expand Up @@ -278,12 +280,16 @@ def generate_tp_model(default_config: OpQuantizationConfig,
sigmoid = tp.OperatorsSet(OPSET_SIGMOID, default_config_options_16bit)
tanh = tp.OperatorsSet(OPSET_TANH, default_config_options_16bit)
gelu = tp.OperatorsSet(OPSET_GELU, default_config_options_16bit)
hardsigmoid = tp.OperatorsSet(OPSET_HARDSIGMOID, default_config_options_16bit)
hardswish = tp.OperatorsSet(OPSET_HARDSWISH, default_config_options_16bit)

# Combine multiple operators into a single operator to avoid quantization between
# them. To do this we define fusing patterns using the OperatorsSets that were created.
# To group multiple sets with regard to fusing, an OperatorSetConcat can be created
activations_after_conv_to_fuse = tp.OperatorSetConcat(any_relu, swish, prelu, sigmoid, tanh, gelu)
activations_after_fc_to_fuse = tp.OperatorSetConcat(any_relu, swish, sigmoid, tanh, gelu)
activations_after_conv_to_fuse = tp.OperatorSetConcat(any_relu, swish, prelu, sigmoid,
tanh, gelu, hardswish, hardsigmoid)
activations_after_fc_to_fuse = tp.OperatorSetConcat(any_relu, swish, sigmoid, tanh, gelu,
hardswish, hardsigmoid)
any_binary = tp.OperatorSetConcat(add, sub, mul, div)

# ------------------- #
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v4.tp_model import OPSET_NO_QUANTIZATION, \
OPSET_QUANTIZATION_PRESERVING, OPSET_DIMENSION_MANIPULATION_OPS_WITH_WEIGHTS, OPSET_DIMENSION_MANIPULATION_OPS, \
OPSET_MERGE_OPS, OPSET_CONV, OPSET_FULLY_CONNECTED, OPSET_ANY_RELU, OPSET_ADD, OPSET_SUB, OPSET_MUL, OPSET_DIV, \
OPSET_PRELU, OPSET_SWISH, OPSET_SIGMOID, OPSET_TANH, OPSET_GELU, OPSET_BATCH_NORM, OPSET_MIN_MAX
OPSET_PRELU, OPSET_SWISH, OPSET_SIGMOID, OPSET_TANH, OPSET_GELU, OPSET_BATCH_NORM, OPSET_MIN_MAX, OPSET_HARDSIGMOID

tp = mct.target_platform

Expand Down Expand Up @@ -135,5 +135,7 @@ def generate_keras_tpc(name: str, tp_model: tp.TargetPlatformModel):
tp.OperationsSetToLayers(OPSET_SIGMOID, [tf.nn.sigmoid, tp.LayerFilterParams(Activation, activation="sigmoid")])
tp.OperationsSetToLayers(OPSET_TANH, [tf.nn.tanh, tp.LayerFilterParams(Activation, activation="tanh")])
tp.OperationsSetToLayers(OPSET_GELU, [tf.nn.gelu, tp.LayerFilterParams(Activation, activation="gelu")])
tp.OperationsSetToLayers(OPSET_HARDSIGMOID, [tf.keras.activations.hard_sigmoid,
tp.LayerFilterParams(Activation, activation="hard_sigmoid")])

return keras_tpc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@
from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v4.tp_model import OPSET_NO_QUANTIZATION, \
OPSET_QUANTIZATION_PRESERVING, OPSET_DIMENSION_MANIPULATION_OPS_WITH_WEIGHTS, OPSET_DIMENSION_MANIPULATION_OPS, \
OPSET_MERGE_OPS, OPSET_CONV, OPSET_FULLY_CONNECTED, OPSET_ANY_RELU, OPSET_ADD, OPSET_SUB, OPSET_MUL, OPSET_DIV, \
OPSET_PRELU, OPSET_SWISH, OPSET_SIGMOID, OPSET_TANH, OPSET_GELU, OPSET_BATCH_NORM, OPSET_MIN_MAX
OPSET_PRELU, OPSET_SWISH, OPSET_SIGMOID, OPSET_TANH, OPSET_GELU, OPSET_BATCH_NORM, OPSET_MIN_MAX, OPSET_HARDSIGMOID, \
OPSET_HARDSWISH

tp = mct.target_platform

Expand Down Expand Up @@ -118,5 +119,7 @@ def generate_pytorch_tpc(name: str, tp_model: tp.TargetPlatformModel):
tp.OperationsSetToLayers(OPSET_SIGMOID, [Sigmoid, sigmoid, F.sigmoid])
tp.OperationsSetToLayers(OPSET_TANH, [Tanh, tanh, F.tanh])
tp.OperationsSetToLayers(OPSET_GELU, [GELU, gelu])
tp.OperationsSetToLayers(OPSET_HARDSIGMOID, [torch.nn.Hardsigmoid, torch.nn.functional.hardsigmoid])
tp.OperationsSetToLayers(OPSET_HARDSWISH, [torch.nn.Hardswish, torch.nn.functional.hardswish])

return pytorch_tpc
1 change: 1 addition & 0 deletions tests/keras_tests/layer_tests/test_layers_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ def test_activation(self):
BaseKerasLayerTest(self,
[Activation('linear'),
Activation('hard_sigmoid'),
tf.keras.activations.hard_sigmoid,
Activation('exponential')]).run_test()

def test_softplus(self):
Expand Down

0 comments on commit e1c1459

Please sign in to comment.