Skip to content

Commit

Permalink
Update quantization_facade.py
Browse files Browse the repository at this point in the history
Enable SLA and gradual activation quantization by default in Keras.
  • Loading branch information
reuvenperetz authored Dec 5, 2024
1 parent 6c6e074 commit d3a34fb
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions model_compression_toolkit/gptq/keras/quantization_facade.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ def get_keras_gptq_config(n_epochs: int,
use_hessian_based_weights: bool = True,
regularization_factor: float = None,
hessian_batch_size: int = ACT_HESSIAN_DEFAULT_BATCH_SIZE,
use_hessian_sample_attention: bool = False,
gradual_activation_quantization: Union[bool, GradualActivationQuantizationConfig] = False) -> GradientPTQConfig:
use_hessian_sample_attention: bool = True,
gradual_activation_quantization: Union[bool, GradualActivationQuantizationConfig] = True) -> GradientPTQConfig:
"""
Create a GradientPTQConfig instance for Keras models.
Expand Down

0 comments on commit d3a34fb

Please sign in to comment.