Skip to content
This repository has been archived by the owner on Oct 25, 2024. It is now read-only.

Commit

Permalink
change ci setting
Browse files Browse the repository at this point in the history
Signed-off-by: changwangss <[email protected]>
  • Loading branch information
changwangss committed Jul 19, 2024
1 parent 9993e6d commit fd01178
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 4 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/unit-test-optimize.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ jobs:
test_name: "PR-test"
- test_branch: "main"
test_name: "baseline"
fail-fast: true
fail-fast: false
name: optimize-unit-test-${{ matrix.test_name }}
steps:
- name: Docker Clean Up
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,11 @@
convert_to_smoothquant_model,
replace_linear,
)
from ...tools.utils import is_intel_gpu_available, is_ipex_available, _neural_compressor_version
from ...tools.utils import (
is_intel_gpu_available,
is_ipex_available,
_neural_compressor_version,
)
from accelerate import init_empty_weights
from huggingface_hub import hf_hub_download
from neural_compressor.torch.algorithms.weight_only.modules import WeightOnlyLinear
Expand Down Expand Up @@ -1832,7 +1836,6 @@ def load_low_bit(cls, pretrained_model_name_or_path, *model_args, **kwargs):
if quantization_config.weight_dtype not in [
"fp8_e5m2",
"fp8_e4m3",
"int4_fullrange",
]:
model = build_woq_model(model, quantization_config)
else:
Expand Down Expand Up @@ -1949,7 +1952,6 @@ def replace_ipex_cpu_woq_linear(model, current_name=[]):
if quantization_config.weight_dtype not in [
"fp8_e5m2",
"fp8_e4m3",
"int4_fullrange",
] and not quantization_config.use_ipex:
model = replace_linear(
model,
Expand Down

0 comments on commit fd01178

Please sign in to comment.