Skip to content

Commit

Permalink
Raise error when modules_to_save is specified and multiple adapters…
Browse files Browse the repository at this point in the history
… are being unloaded (huggingface#1137)

* handle `modules_to_save` when unloading

* address comments

* Apply suggestions from code review

Co-authored-by: Benjamin Bossan <[email protected]>

* quality

---------

Co-authored-by: Benjamin Bossan <[email protected]>
  • Loading branch information
pacman100 and BenjaminBossan authored Dec 6, 2023
1 parent c22a8e5 commit 2336780
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 1 deletion.
1 change: 1 addition & 0 deletions src/peft/tuners/ia3/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,6 +313,7 @@ def _unload_and_optionally_merge(
if getattr(self.model, "is_loaded_in_4bit", False):
raise ValueError("Cannot merge ia3 layers when the model is loaded in 4-bit mode")

self._unloading_checks(adapter_names)
key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
for key in key_list:
try:
Expand Down
1 change: 1 addition & 0 deletions src/peft/tuners/lora/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -426,6 +426,7 @@ def _unload_and_optionally_merge(
if getattr(self.model, "quantization_method", None) == "gptq":
raise ValueError("Cannot merge LORA layers when the model is gptq quantized")

self._unloading_checks(adapter_names)
key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
desc = "Unloading " + ("and merging " if merge else "") + "model"
for key in tqdm(key_list, disable=not progressbar, desc=desc):
Expand Down
3 changes: 3 additions & 0 deletions src/peft/tuners/lycoris_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ class LycorisConfig(PeftConfig):
r"""
A base config for LyCORIS like adapters
"""

rank_pattern: Optional[dict] = field(
default_factory=dict,
metadata={
Expand All @@ -61,6 +62,7 @@ class LycorisLayer(BaseTunerLayer):
r"""
A base layer for LyCORIS like adapters
"""

# adapter_layer_names needs to be defined on the child class
other_param_names = ("r", "alpha", "scaling", "rank_dropout", "module_dropout")

Expand Down Expand Up @@ -319,6 +321,7 @@ def _unload_and_optionally_merge(
if getattr(self.model, "quantization_method", None) == "gptq":
raise ValueError("Cannot merge LOHA layers when the model is gptq quantized")

self._unloading_checks(adapter_names)
key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
desc = "Unloading " + ("and merging " if merge else "") + "model"
for key in tqdm(key_list, disable=not progressbar, desc=desc):
Expand Down
11 changes: 10 additions & 1 deletion src/peft/tuners/tuners_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import re
import warnings
from abc import ABC, abstractmethod
from typing import Any, Optional, Union
from typing import Any, List, Optional, Union

import torch
from torch import nn
Expand Down Expand Up @@ -299,6 +299,14 @@ def unmerge_adapter(self):
if isinstance(module, BaseTunerLayer):
module.unmerge()

def _unloading_checks(self, adapter_names: Optional[List[str]]):
adapters_to_consider = adapter_names or self.active_adapters
is_modules_to_save_available = any(
self.peft_config[adapter].modules_to_save for adapter in adapters_to_consider
)
if is_modules_to_save_available and len(adapters_to_consider) > 1:
raise ValueError("Cannot unload multiple adapters that specify `modules_to_save`.")


class BaseTunerLayer(ABC):
r"""
Expand All @@ -310,6 +318,7 @@ class BaseTunerLayer(ABC):
active_adapters (Union[List[`str`], `str`], *optional*):
The name of the active adapter.
"""

active_adapter = None

# All names of layers that may contain adapter (trainable) weights
Expand Down

0 comments on commit 2336780

Please sign in to comment.