From d3a34fbc5e3c1ada92216d530bd0a03fcd5d2ba1 Mon Sep 17 00:00:00 2001 From: Reuven <44209964+reuvenperetz@users.noreply.github.com> Date: Thu, 5 Dec 2024 14:59:00 +0200 Subject: [PATCH] Update quantization_facade.py Enable SLA and gradual activation quantization by default in Keras. --- model_compression_toolkit/gptq/keras/quantization_facade.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/model_compression_toolkit/gptq/keras/quantization_facade.py b/model_compression_toolkit/gptq/keras/quantization_facade.py index 7a44ff78f..9c723ae8a 100644 --- a/model_compression_toolkit/gptq/keras/quantization_facade.py +++ b/model_compression_toolkit/gptq/keras/quantization_facade.py @@ -66,8 +66,8 @@ def get_keras_gptq_config(n_epochs: int, use_hessian_based_weights: bool = True, regularization_factor: float = None, hessian_batch_size: int = ACT_HESSIAN_DEFAULT_BATCH_SIZE, - use_hessian_sample_attention: bool = False, - gradual_activation_quantization: Union[bool, GradualActivationQuantizationConfig] = False) -> GradientPTQConfig: + use_hessian_sample_attention: bool = True, + gradual_activation_quantization: Union[bool, GradualActivationQuantizationConfig] = True) -> GradientPTQConfig: """ Create a GradientPTQConfig instance for Keras models.