diff --git a/model_compression_toolkit/gptq/keras/quantization_facade.py b/model_compression_toolkit/gptq/keras/quantization_facade.py index 7a44ff78f..9c723ae8a 100644 --- a/model_compression_toolkit/gptq/keras/quantization_facade.py +++ b/model_compression_toolkit/gptq/keras/quantization_facade.py @@ -66,8 +66,8 @@ def get_keras_gptq_config(n_epochs: int, use_hessian_based_weights: bool = True, regularization_factor: float = None, hessian_batch_size: int = ACT_HESSIAN_DEFAULT_BATCH_SIZE, - use_hessian_sample_attention: bool = False, - gradual_activation_quantization: Union[bool, GradualActivationQuantizationConfig] = False) -> GradientPTQConfig: + use_hessian_sample_attention: bool = True, + gradual_activation_quantization: Union[bool, GradualActivationQuantizationConfig] = True) -> GradientPTQConfig: """ Create a GradientPTQConfig instance for Keras models.