# File: peft-main/src/peft/__init__.py
__version__ = '0.12.1.dev0'
from .auto import AutoPeftModel, AutoPeftModelForCausalLM, AutoPeftModelForSequenceClassification, AutoPeftModelForSeq2SeqLM, AutoPeftModelForTokenClassification, AutoPeftModelForQuestionAnswering, AutoPeftModelForFeatureExtraction
from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING, get_peft_config, get_peft_model, inject_adapter_in_model
from .mixed_model import PeftMixedModel
from .peft_model import PeftModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, PeftModelForQuestionAnswering, PeftModelForFeatureExtraction, get_layer_status, get_model_status
from .tuners import AdaptionPromptConfig, AdaptionPromptModel, LoraConfig, LoraRuntimeConfig, LoftQConfig, LoraModel, LoHaConfig, LoHaModel, LoKrConfig, LoKrModel, IA3Config, IA3Model, AdaLoraConfig, AdaLoraModel, BOFTConfig, BOFTModel, PrefixEncoder, PrefixTuningConfig, PromptEmbedding, PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType, PromptTuningConfig, PromptTuningInit, MultitaskPromptTuningConfig, MultitaskPromptTuningInit, OFTConfig, OFTModel, PolyConfig, PolyModel, LNTuningConfig, LNTuningModel, VBLoRAConfig, VBLoRAModel, VeraConfig, VeraModel, FourierFTConfig, FourierFTModel, XLoraConfig, XLoraModel, HRAConfig, HRAModel, VBLoRAConfig
from .utils import TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, PeftType, TaskType, bloom_model_postprocess_past_key_value, get_peft_model_state_dict, prepare_model_for_kbit_training, replace_lora_weights_loftq, set_peft_model_state_dict, shift_tokens_right, load_peft_weights, cast_mixed_precision_params
from .config import PeftConfig, PromptLearningConfig
# File: peft-main/src/peft/auto.py
from __future__ import annotations
import importlib
import os
from typing import Optional
from transformers import AutoModel, AutoModelForCausalLM, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoTokenizer
from .config import PeftConfig
from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING
from .peft_model import PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification
from .utils.constants import TOKENIZER_CONFIG_NAME
from .utils.other import check_file_exists_on_hf_hub
class _BaseAutoPeftModel:
_target_class = None
_target_peft_class = None
def __init__(self, *args, **kwargs):
raise EnvironmentError(f'{self.__class__.__name__} is designed to be instantiated using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or `{self.__class__.__name__}.from_config(config)` methods.')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, adapter_name: str='default', is_trainable: bool=False, config: Optional[PeftConfig]=None, revision: Optional[str]=None, **kwargs):
peft_config = PeftConfig.from_pretrained(pretrained_model_name_or_path, revision=revision, **kwargs)
base_model_path = peft_config.base_model_name_or_path
base_model_revision = peft_config.revision
task_type = getattr(peft_config, 'task_type', None)
if cls._target_class is not None:
target_class = cls._target_class
elif cls._target_class is None and task_type is not None:
raise ValueError("Cannot use `AutoPeftModel` with a task type, please use a specific class for your task type. (e.g. `AutoPeftModelForCausalLM` for `task_type='CAUSAL_LM'`)")
if task_type is not None:
expected_target_class = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[task_type]
if cls._target_peft_class.__name__ != expected_target_class.__name__:
raise ValueError(f'Expected target PEFT class: {expected_target_class.__name__}, but you have asked for: {cls._target_peft_class.__name__} make sure that you are loading the correct model for your task type.')
elif task_type is None and getattr(peft_config, 'auto_mapping', None) is not None:
auto_mapping = getattr(peft_config, 'auto_mapping', None)
base_model_class = auto_mapping['base_model_class']
parent_library_name = auto_mapping['parent_library']
parent_library = importlib.import_module(parent_library_name)
target_class = getattr(parent_library, base_model_class)
else:
raise ValueError('Cannot infer the auto class from the config, please make sure that you are loading the correct model for your task type.')
base_model = target_class.from_pretrained(base_model_path, revision=base_model_revision, **kwargs)
tokenizer_exists = False
if os.path.exists(os.path.join(pretrained_model_name_or_path, TOKENIZER_CONFIG_NAME)):
tokenizer_exists = True
else:
token = kwargs.get('token', None)
if token is None:
token = kwargs.get('use_auth_token', None)
tokenizer_exists = check_file_exists_on_hf_hub(repo_id=pretrained_model_name_or_path, filename=TOKENIZER_CONFIG_NAME, revision=revision, repo_type=kwargs.get('repo_type', None), token=token)
if tokenizer_exists:
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, trust_remote_code=kwargs.get('trust_remote_code', False))
base_model.resize_token_embeddings(len(tokenizer))
return cls._target_peft_class.from_pretrained(base_model, pretrained_model_name_or_path, adapter_name=adapter_name, is_trainable=is_trainable, config=config, **kwargs)
class AutoPeftModel(_BaseAutoPeftModel):
_target_class = None
_target_peft_class = PeftModel
class AutoPeftModelForCausalLM(_BaseAutoPeftModel):
_target_class = AutoModelForCausalLM
_target_peft_class = PeftModelForCausalLM
class AutoPeftModelForSeq2SeqLM(_BaseAutoPeftModel):
_target_class = AutoModelForSeq2SeqLM
_target_peft_class = PeftModelForSeq2SeqLM
class AutoPeftModelForSequenceClassification(_BaseAutoPeftModel):
_target_class = AutoModelForSequenceClassification
_target_peft_class = PeftModelForSequenceClassification
class AutoPeftModelForTokenClassification(_BaseAutoPeftModel):
_target_class = AutoModelForTokenClassification
_target_peft_class = PeftModelForTokenClassification
class AutoPeftModelForQuestionAnswering(_BaseAutoPeftModel):
_target_class = AutoModelForQuestionAnswering
_target_peft_class = PeftModelForQuestionAnswering
class AutoPeftModelForFeatureExtraction(_BaseAutoPeftModel):
_target_class = AutoModel
_target_peft_class = PeftModelForFeatureExtraction
# File: peft-main/src/peft/config.py
import inspect
import json
import os
import warnings
from dataclasses import asdict, dataclass, field
from typing import Dict, Optional, Union
from huggingface_hub import hf_hub_download
from transformers.utils import PushToHubMixin
from .utils import CONFIG_NAME, PeftType, TaskType
@dataclass
class PeftConfigMixin(PushToHubMixin):
peft_type: Optional[PeftType] = field(default=None, metadata={'help': 'The type of PEFT model.'})
auto_mapping: Optional[dict] = field(default=None, metadata={'help': 'An auto mapping dict to help retrieve the base model class if needed.'})
def to_dict(self) -> Dict:
return asdict(self)
def save_pretrained(self, save_directory: str, **kwargs) -> None:
if os.path.isfile(save_directory):
raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(save_directory, exist_ok=True)
auto_mapping_dict = kwargs.pop('auto_mapping_dict', None)
output_dict = self.to_dict()
for (key, value) in output_dict.items():
if isinstance(value, set):
output_dict[key] = list(value)
output_path = os.path.join(save_directory, CONFIG_NAME)
if auto_mapping_dict is not None:
output_dict['auto_mapping'] = auto_mapping_dict
with open(output_path, 'w') as writer:
writer.write(json.dumps(output_dict, indent=2, sort_keys=True))
@classmethod
def from_peft_type(cls, **kwargs):
from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING
if 'peft_type' in kwargs:
peft_type = kwargs['peft_type']
config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type]
else:
config_cls = cls
return config_cls(**kwargs)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional[str]=None, **kwargs):
path = os.path.join(pretrained_model_name_or_path, subfolder) if subfolder is not None else pretrained_model_name_or_path
(hf_hub_download_kwargs, class_kwargs, _) = cls._split_kwargs(kwargs)
if os.path.isfile(os.path.join(path, CONFIG_NAME)):
config_file = os.path.join(path, CONFIG_NAME)
else:
try:
config_file = hf_hub_download(pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs)
except Exception as exc:
raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'") from exc
loaded_attributes = cls.from_json_file(config_file)
kwargs = {**class_kwargs, **loaded_attributes}
return cls.from_peft_type(**kwargs)
@classmethod
def from_json_file(cls, path_json_file: str, **kwargs):
with open(path_json_file) as file:
json_object = json.load(file)
if 'runtime_config' in json_object:
warnings.warn('The configuration file contains a `runtime_config` key. This is ignored. Runtime configurations are only valid at runtime.')
del json_object['runtime_config']
return json_object
@classmethod
def _split_kwargs(cls, kwargs):
hf_hub_download_kwargs = {}
class_kwargs = {}
other_kwargs = {}
for (key, value) in kwargs.items():
if key in inspect.signature(hf_hub_download).parameters:
hf_hub_download_kwargs[key] = value
elif key in list(cls.__annotations__):
class_kwargs[key] = value
else:
other_kwargs[key] = value
return (hf_hub_download_kwargs, class_kwargs, other_kwargs)
@classmethod
def _get_peft_type(cls, model_id: str, **hf_hub_download_kwargs):
subfolder = hf_hub_download_kwargs.get('subfolder', None)
path = os.path.join(model_id, subfolder) if subfolder is not None else model_id
if os.path.isfile(os.path.join(path, CONFIG_NAME)):
config_file = os.path.join(path, CONFIG_NAME)
else:
try:
config_file = hf_hub_download(model_id, CONFIG_NAME, **hf_hub_download_kwargs)
except Exception:
raise ValueError(f"Can't find '{CONFIG_NAME}' at '{model_id}'")
loaded_attributes = cls.from_json_file(config_file)
return loaded_attributes['peft_type']
@property
def is_prompt_learning(self) -> bool:
return False
@property
def is_adaption_prompt(self) -> bool:
return False
@dataclass
class PeftConfig(PeftConfigMixin):
base_model_name_or_path: Optional[str] = field(default=None, metadata={'help': 'The name of the base model to use.'})
revision: Optional[str] = field(default=None, metadata={'help': 'The specific base model version to use.'})
peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={'help': 'Peft type'})
task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={'help': 'Task type'})
inference_mode: bool = field(default=False, metadata={'help': 'Whether to use inference mode'})
@dataclass
class PromptLearningConfig(PeftConfig):
num_virtual_tokens: int = field(default=None, metadata={'help': 'Number of virtual tokens'})
token_dim: int = field(default=None, metadata={'help': 'The hidden embedding dimension of the base transformer model'})
num_transformer_submodules: Optional[int] = field(default=None, metadata={'help': 'Number of transformer submodules'})
num_attention_heads: Optional[int] = field(default=None, metadata={'help': 'Number of attention heads'})
num_layers: Optional[int] = field(default=None, metadata={'help': 'Number of transformer layers'})
@property
def is_prompt_learning(self) -> bool:
return True
# File: peft-main/src/peft/helpers.py
import inspect
from contextlib import contextmanager
from copy import deepcopy
from functools import update_wrapper
from types import MethodType
from .peft_model import PeftConfig, PeftModel
from .tuners.lora.layer import LoraLayer
def update_forward_signature(model: PeftModel) -> None:
current_signature = inspect.signature(model.forward)
if len(current_signature.parameters) == 2 and 'args' in current_signature.parameters and ('kwargs' in current_signature.parameters):
forward = deepcopy(model.forward.__func__)
update_wrapper(forward, type(model.get_base_model()).forward, assigned=('__doc__', '__name__', '__annotations__'))
model.forward = MethodType(forward, model)
def update_generate_signature(model: PeftModel) -> None:
if not hasattr(model, 'generate'):
return
current_signature = inspect.signature(model.generate)
if len(current_signature.parameters) == 2 and 'args' in current_signature.parameters and ('kwargs' in current_signature.parameters) or (len(current_signature.parameters) == 1 and 'kwargs' in current_signature.parameters):
generate = deepcopy(model.generate.__func__)
update_wrapper(generate, type(model.get_base_model()).generate, assigned=('__doc__', '__name__', '__annotations__'))
model.generate = MethodType(generate, model)
def update_signature(model: PeftModel, method: str='all') -> None:
if method == 'forward':
update_forward_signature(model)
elif method == 'generate':
update_generate_signature(model)
elif method == 'all':
update_forward_signature(model)
update_generate_signature(model)
else:
raise ValueError(f"method {method} is not supported please choose one of ['forward', 'generate', 'all']")
def check_if_peft_model(model_name_or_path: str) -> bool:
is_peft_model = True
try:
PeftConfig.from_pretrained(model_name_or_path)
except Exception:
is_peft_model = False
return is_peft_model
@contextmanager
def rescale_adapter_scale(model, multiplier):
if not isinstance(multiplier, (float, int)):
raise TypeError(f'Argument multiplier should be of type float, got {type(multiplier)}')
original_scaling = {}
for module in model.modules():
if isinstance(module, LoraLayer):
original_scaling[module] = module.scaling.copy()
module.scaling = {k: v * multiplier for (k, v) in module.scaling.items()}
if not original_scaling:
raise ValueError('scaling is only supported for models with `LoraLayer`s')
try:
yield
finally:
for (module, scaling) in original_scaling.items():
module.scaling = scaling
# File: peft-main/src/peft/import_utils.py
import importlib
import importlib.metadata as importlib_metadata
from functools import lru_cache
import packaging.version
@lru_cache
def is_bnb_available() -> bool:
return importlib.util.find_spec('bitsandbytes') is not None
@lru_cache
def is_bnb_4bit_available() -> bool:
if not is_bnb_available():
return False
import bitsandbytes as bnb
return hasattr(bnb.nn, 'Linear4bit')
@lru_cache
def is_auto_gptq_available():
if importlib.util.find_spec('auto_gptq') is not None:
AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse('0.5.0')
version_autogptq = packaging.version.parse(importlib_metadata.version('auto_gptq'))
if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq:
return True
else:
raise ImportError(f'Found an incompatible version of auto-gptq. Found version {version_autogptq}, but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported')
@lru_cache
def is_optimum_available() -> bool:
return importlib.util.find_spec('optimum') is not None
@lru_cache
def is_torch_tpu_available(check_device=True):
if importlib.util.find_spec('torch_xla') is not None:
if check_device:
try:
import torch_xla.core.xla_model as xm
_ = xm.xla_device()
return True
except RuntimeError:
return False
return True
return False
@lru_cache
def is_aqlm_available():
return importlib.util.find_spec('aqlm') is not None
@lru_cache
def is_auto_awq_available():
return importlib.util.find_spec('awq') is not None
@lru_cache
def is_eetq_available():
return importlib.util.find_spec('eetq') is not None
@lru_cache
def is_hqq_available():
return importlib.util.find_spec('hqq') is not None
# File: peft-main/src/peft/mapping.py
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any, Optional
import torch
from peft.tuners.xlora.model import XLoraModel
from .config import PeftConfig
from .mixed_model import PeftMixedModel
from .peft_model import PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification
from .tuners import AdaLoraConfig, AdaLoraModel, AdaptionPromptConfig, BOFTConfig, BOFTModel, FourierFTConfig, FourierFTModel, HRAConfig, HRAModel, IA3Config, IA3Model, LNTuningConfig, LNTuningModel, LoHaConfig, LoHaModel, LoKrConfig, LoKrModel, LoraConfig, LoraModel, MultitaskPromptTuningConfig, OFTConfig, OFTModel, PolyConfig, PolyModel, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, VBLoRAConfig, VBLoRAModel, VeraConfig, VeraModel, XLoraConfig
from .tuners.tuners_utils import BaseTuner
from .utils import _prepare_prompt_learning_config
if TYPE_CHECKING:
from transformers import PreTrainedModel
MODEL_TYPE_TO_PEFT_MODEL_MAPPING: dict[str, type[PeftModel]] = {'SEQ_CLS': PeftModelForSequenceClassification, 'SEQ_2_SEQ_LM': PeftModelForSeq2SeqLM, 'CAUSAL_LM': PeftModelForCausalLM, 'TOKEN_CLS': PeftModelForTokenClassification, 'QUESTION_ANS': PeftModelForQuestionAnswering, 'FEATURE_EXTRACTION': PeftModelForFeatureExtraction}
PEFT_TYPE_TO_CONFIG_MAPPING: dict[str, type[PeftConfig]] = {'ADAPTION_PROMPT': AdaptionPromptConfig, 'PROMPT_TUNING': PromptTuningConfig, 'PREFIX_TUNING': PrefixTuningConfig, 'P_TUNING': PromptEncoderConfig, 'LORA': LoraConfig, 'LOHA': LoHaConfig, 'LORAPLUS': LoraConfig, 'LOKR': LoKrConfig, 'ADALORA': AdaLoraConfig, 'BOFT': BOFTConfig, 'IA3': IA3Config, 'MULTITASK_PROMPT_TUNING': MultitaskPromptTuningConfig, 'OFT': OFTConfig, 'POLY': PolyConfig, 'LN_TUNING': LNTuningConfig, 'VERA': VeraConfig, 'FOURIERFT': FourierFTConfig, 'XLORA': XLoraConfig, 'HRA': HRAConfig, 'VBLORA': VBLoRAConfig}
PEFT_TYPE_TO_TUNER_MAPPING: dict[str, type[BaseTuner]] = {'LORA': LoraModel, 'LOHA': LoHaModel, 'LOKR': LoKrModel, 'ADALORA': AdaLoraModel, 'BOFT': BOFTModel, 'IA3': IA3Model, 'OFT': OFTModel, 'POLY': PolyModel, 'LN_TUNING': LNTuningModel, 'VERA': VeraModel, 'FOURIERFT': FourierFTModel, 'XLORA': XLoraModel, 'HRA': HRAModel, 'VBLORA': VBLoRAModel}
def get_peft_config(config_dict: dict[str, Any]) -> PeftConfig:
return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict['peft_type']](**config_dict)
def get_peft_model(model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str='default', mixed: bool=False, autocast_adapter_dtype: bool=True, revision: Optional[str]=None) -> PeftModel | PeftMixedModel:
model_config = BaseTuner.get_model_config(model)
old_name = peft_config.base_model_name_or_path
new_name = model.__dict__.get('name_or_path', None)
peft_config.base_model_name_or_path = new_name
if old_name is not None and old_name != new_name:
warnings.warn(f"The PEFT config's `base_model_name_or_path` was renamed from '{old_name}' to '{new_name}'. Please ensure that the correct base model is loaded when loading this checkpoint.")
if revision is not None:
if peft_config.revision is not None and peft_config.revision != revision:
warnings.warn(f'peft config has already set base model revision to {peft_config.revision}, overwriting with revision {revision}')
peft_config.revision = revision
if mixed:
return PeftMixedModel(model, peft_config, adapter_name=adapter_name)
if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and (not peft_config.is_prompt_learning):
return PeftModel(model, peft_config, adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype)
if peft_config.is_prompt_learning:
peft_config = _prepare_prompt_learning_config(peft_config, model_config)
return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config, adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype)
def inject_adapter_in_model(peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str='default') -> torch.nn.Module:
if peft_config.is_prompt_learning or peft_config.is_adaption_prompt:
raise ValueError('`create_and_replace` does not support prompt learning and adaption prompt yet.')
if peft_config.peft_type not in PEFT_TYPE_TO_TUNER_MAPPING.keys():
raise ValueError(f'`inject_adapter_in_model` does not support {peft_config.peft_type} yet. Please use `get_peft_model`.')
tuner_cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type]
peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name)
return peft_model.model
# File: peft-main/src/peft/mixed_model.py
from __future__ import annotations
import os
from contextlib import contextmanager
from typing import Any, Optional, Union
import torch
from accelerate.hooks import remove_hook_from_submodules
from torch import nn
from transformers.utils import PushToHubMixin
from peft.utils.constants import DUMMY_MODEL_CONFIG
from .config import PeftConfig
from .peft_model import PeftModel
from .tuners import AdaLoraModel, IA3Model, LoHaModel, LoKrModel, LoraModel, MixedModel, OFTModel
from .tuners.mixed import COMPATIBLE_TUNER_TYPES
from .utils import PeftType, _set_adapter, _set_trainable
PEFT_TYPE_TO_MODEL_MAPPING = {PeftType.LORA: LoraModel, PeftType.LOHA: LoHaModel, PeftType.LOKR: LoKrModel, PeftType.ADALORA: AdaLoraModel, PeftType.IA3: IA3Model, PeftType.OFT: OFTModel}
def _prepare_model_for_gradient_checkpointing(model: nn.Module) -> None:
if not getattr(model, 'is_gradient_checkpointing', True):
return model
if not (getattr(model, 'is_loaded_in_8bit', False) or getattr(model, 'is_loaded_in_4bit', False) or getattr(model, 'is_quantized', False)):
if hasattr(model, 'enable_input_require_grads'):
model.enable_input_require_grads()
elif hasattr(model, 'get_input_embeddings'):
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
def _check_config_compatible(peft_config: PeftConfig) -> None:
if peft_config.peft_type not in COMPATIBLE_TUNER_TYPES:
raise ValueError(f"The provided `peft_type` '{peft_config.peft_type.value}' is not compatible with the `PeftMixedModel`. Compatible types are: {COMPATIBLE_TUNER_TYPES}")
class PeftMixedModel(PushToHubMixin, torch.nn.Module):
def __init__(self, model: nn.Module, peft_config: PeftConfig, adapter_name: str='default') -> None:
super().__init__()
_check_config_compatible(peft_config)
_prepare_model_for_gradient_checkpointing(model)
self.modules_to_save = None
self.base_model = MixedModel(model, {adapter_name: peft_config}, adapter_name)
self.set_modules_to_save(peft_config, adapter_name)
self.config = getattr(model, 'config', DUMMY_MODEL_CONFIG)
if hasattr(self.base_model, 'config') and hasattr(self.base_model.config, 'pretraining_tp'):
self.base_model.config.pretraining_tp = 1
@property
def peft_config(self) -> dict[str, PeftConfig]:
return self.base_model.peft_config
@property
def active_adapter(self) -> str:
return self.base_model.active_adapter
@property
def active_adapters(self) -> list[str]:
return self.base_model.active_adapters
def get_nb_trainable_parameters(self):
trainable_params = 0
all_param = 0
for (_, param) in self.named_parameters():
num_params = param.numel()
if num_params == 0 and hasattr(param, 'ds_numel'):
num_params = param.ds_numel
if param.__class__.__name__ == 'Params4bit':
num_params = num_params * 2
all_param += num_params
if param.requires_grad:
trainable_params += num_params
return (trainable_params, all_param)
def print_trainable_parameters(self):
(trainable_params, all_param) = self.get_nb_trainable_parameters()
print(f'trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param:.4f}')
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'base_model':
raise
return getattr(self.base_model, name)
def forward(self, *args: Any, **kwargs: Any):
return self.base_model(*args, **kwargs)
def generate(self, *args: Any, **kwargs: Any):
return self.base_model.generate(*args, **kwargs)
@contextmanager
def disable_adapter(self):
try:
self.base_model.disable_adapter_layers()
yield
finally:
self.base_model.enable_adapter_layers()
def add_adapter(self, adapter_name: str, peft_config: PeftConfig):
_check_config_compatible(peft_config)
try:
self.peft_config[adapter_name] = peft_config
self.base_model.inject_adapter(self, adapter_name)
except Exception:
if adapter_name in self.peft_config:
del self.peft_config[adapter_name]
raise
self.set_modules_to_save(peft_config, adapter_name)
def set_modules_to_save(self, peft_config: PeftConfig, adapter_name: str) -> None:
if (modules_to_save := getattr(peft_config, 'modules_to_save', None)) is None:
return
if self.modules_to_save is None:
self.modules_to_save = set(modules_to_save)
else:
self.modules_to_save.update(modules_to_save)
_set_trainable(self, adapter_name)
def set_adapter(self, adapter_name: Union[str, list[str]]) -> None:
if isinstance(adapter_name, str):
adapter_name = [adapter_name]
mismatched = set(adapter_name) - set(self.peft_config.keys())
if mismatched:
raise ValueError(f'Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}')
self.base_model.set_adapter(adapter_name)
_set_adapter(self, adapter_name)
def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None:
if isinstance(adapter_name, str):
adapter_name = [adapter_name]
mismatched = set(adapter_name) - set(self.peft_config.keys())
if mismatched:
raise ValueError(f'Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}')
self.base_model.delete_adapter(adapter_name)
def merge_and_unload(self, *args: Any, **kwargs: Any):
return self.base_model.merge_and_unload(*args, **kwargs)
def unload(self, *args: Any, **kwargs: Any):
return self.base_model.unload(*args, **kwargs)
def get_layer_status(self):
raise TypeError(f'get_layer_status is not supported for {self.__class__.__name__}.')
def get_model_status(self):
raise TypeError(f'get_model_status is not supported for {self.__class__.__name__}.')
@classmethod
def _split_kwargs(cls, kwargs: dict[str, Any]):
return PeftModel._split_kwargs(kwargs)
def load_adapter(self, model_id: str, adapter_name: str, *args: Any, **kwargs: Any):
output = PeftModel.load_adapter(self, model_id, adapter_name, *args, **kwargs)
self.set_adapter(self.active_adapters)
return output
def create_or_update_model_card(self, output_dir: str):
raise NotImplementedError(f'Model card creation is not supported for {self.__class__.__name__} (yet).')
def save_pretrained(self, save_directory: str, safe_serialization: bool=False, selected_adapters: Optional[list[str]]=None, **kwargs: Any):
raise NotImplementedError(f'Saving is not supported for {self.__class__.__name__} (yet).')
@classmethod
def from_pretrained(cls, model: nn.Module, model_id: str | os.PathLike, adapter_name: str='default', is_trainable: bool=False, config: Optional[PeftConfig]=None, **kwargs: Any):
from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
if config is None:
config = PEFT_TYPE_TO_CONFIG_MAPPING[PeftConfig._get_peft_type(model_id, subfolder=kwargs.get('subfolder', None), revision=kwargs.get('revision', None), cache_dir=kwargs.get('cache_dir', None), use_auth_token=kwargs.get('use_auth_token', None))].from_pretrained(model_id, **kwargs)
elif isinstance(config, PeftConfig):
config.inference_mode = not is_trainable
else:
raise ValueError(f'The input config must be a PeftConfig, got {config.__class__}')
if config.peft_type not in PEFT_TYPE_TO_MODEL_MAPPING:
raise ValueError(f'Adapter of type {config.peft_type} is not supported for mixed models.')
if getattr(model, 'hf_device_map', None) is not None and len(set(model.hf_device_map.values()).intersection({'cpu', 'disk'})) > 0:
remove_hook_from_submodules(model)
if config.is_prompt_learning and is_trainable:
raise ValueError('Cannot set a prompt learning adapter to trainable when loading pretrained adapter.')
else:
config.inference_mode = not is_trainable
model = cls(model, config, adapter_name)
model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs)
return model
# File: peft-main/src/peft/optimizers/loraplus.py
""""""
from __future__ import annotations
from operator import attrgetter
import torch.nn as nn
from torch.optim import Optimizer
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
from transformers.trainer_pt_utils import get_parameter_names
from ..peft_model import PeftModel
from ..tuners.lora.layer import Embedding
def create_loraplus_optimizer(model: PeftModel, optimizer_cls: type[Optimizer], *, lr: float, loraplus_lr_ratio: float, **kwargs) -> Optimizer:
decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if 'bias' not in name]
param_groups = {'groupA': {}, 'groupB': {}, 'groupB_no_decay': {}, 'embedding': {}}
for (name, param) in model.named_parameters():
if not param.requires_grad:
continue
module = attrgetter(name)(model)
if isinstance(module, Embedding):
param_groups['embedding'][name] = param
elif 'lora_B' in name or param.ndim == 1:
if name in decay_parameters:
param_groups['groupB'][name] = param
else:
param_groups['groupB_no_decay'][name] = param
else:
param_groups['groupA'][name] = param
kwargs['lr'] = lr
loraplus_weight_decay = kwargs.pop('loraplus_weight_decay', 0.0)
loraplus_lr_embedding = kwargs.pop('loraplus_lr_embedding', 1e-06)
optimizer_grouped_parameters = [{'params': list(param_groups['groupA'].values()), 'weight_decay': loraplus_weight_decay, 'lr': lr}, {'params': list(param_groups['embedding'].values()), 'weight_decay': loraplus_weight_decay, 'lr': loraplus_lr_embedding}, {'params': list(param_groups['groupB'].values()), 'weight_decay': loraplus_weight_decay, 'lr': lr * loraplus_lr_ratio}, {'params': list(param_groups['groupB_no_decay'].values()), 'weight_decay': 0.0, 'lr': lr * loraplus_lr_ratio}]
optimizer = optimizer_cls(optimizer_grouped_parameters, **kwargs)
eight_bit_names = ['Adam8bit', 'AdamW8bit', 'PagedAdam8bit', 'PagedAdamW8bit']
if optimizer_cls.__name__ in eight_bit_names:
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
for module in model.modules():
if isinstance(module, nn.Embedding):
manager.register_module_override(module, 'weight', {'optim_bits': 32})
return optimizer
# File: peft-main/src/peft/peft_model.py
from __future__ import annotations
import collections
import inspect
import os
import warnings
from contextlib import contextmanager
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Literal, Optional, Union
import packaging.version
import torch
import transformers
from accelerate import dispatch_model, infer_auto_device_map
from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules
from accelerate.utils import get_balanced_memory, named_module_tensors
from huggingface_hub import HfFileSystem, ModelCard, ModelCardData, hf_hub_download
from safetensors import safe_open
from safetensors.torch import save_file as safe_save_file
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers import PreTrainedModel
from transformers.modeling_outputs import QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from transformers.utils import PushToHubMixin
from peft.utils.constants import DUMMY_MODEL_CONFIG
from . import __version__
from .config import PeftConfig
from .tuners import AdaLoraModel, AdaptionPromptModel, BOFTModel, FourierFTModel, HRAModel, IA3Model, LNTuningModel, LoHaModel, LoKrModel, LoraModel, MultitaskPromptEmbedding, OFTModel, PolyModel, PrefixEncoder, PromptEmbedding, PromptEncoder, VBLoRAModel, VeraModel, XLoraConfig, XLoraModel
from .tuners.tuners_utils import BaseTuner, BaseTunerLayer
from .utils import SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftType, TaskType, _get_batch_size, _prepare_prompt_learning_config, _set_adapter, _set_trainable, get_peft_model_state_dict, id_tensor_storage, infer_device, load_peft_weights, set_peft_model_state_dict, shift_tokens_right
PEFT_TYPE_TO_MODEL_MAPPING = {PeftType.LORA: LoraModel, PeftType.LOHA: LoHaModel, PeftType.LOKR: LoKrModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder, PeftType.ADALORA: AdaLoraModel, PeftType.BOFT: BOFTModel, PeftType.ADAPTION_PROMPT: AdaptionPromptModel, PeftType.IA3: IA3Model, PeftType.OFT: OFTModel, PeftType.POLY: PolyModel, PeftType.LN_TUNING: LNTuningModel, PeftType.VERA: VeraModel, PeftType.FOURIERFT: FourierFTModel, PeftType.XLORA: XLoraModel, PeftType.HRA: HRAModel, PeftType.VBLORA: VBLoRAModel}
class PeftModel(PushToHubMixin, torch.nn.Module):
def __init__(self, model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str='default', autocast_adapter_dtype: bool=True) -> None:
super().__init__()
self.modules_to_save = None
self.active_adapter = adapter_name
self.peft_type = peft_config.peft_type
self.special_peft_forward_args = {'adapter_names'}
self._is_prompt_learning = peft_config.is_prompt_learning
if self._is_prompt_learning:
self._peft_config = {adapter_name: peft_config}
self.base_model = model
self.add_adapter(adapter_name, peft_config)
else:
self._peft_config = None
cls = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type]
self.base_model = cls(model, {adapter_name: peft_config}, adapter_name)
self.set_additional_trainable_modules(peft_config, adapter_name)
if hasattr(self.base_model, '_cast_adapter_dtype'):
self.base_model._cast_adapter_dtype(adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype)
if getattr(model, 'is_gradient_checkpointing', True):
model = self._prepare_model_for_gradient_checkpointing(model)
if hasattr(self.base_model, 'config') and hasattr(self.base_model.config, 'pretraining_tp'):
self.base_model.config.pretraining_tp = 1
@property
def peft_config(self) -> dict[str, PeftConfig]:
if self._is_prompt_learning:
return self._peft_config
return self.base_model.peft_config
@property
def active_adapters(self) -> list[str]:
try:
adapters = self.base_model.active_adapters
if not isinstance(adapters, list):
adapters = self.active_adapter
if isinstance(adapters, str):
adapters = [adapters]
except AttributeError:
adapters = self.active_adapter
if isinstance(adapters, str):
adapters = [adapters]
return adapters
@peft_config.setter
def peft_config(self, value: dict[str, PeftConfig]):
if self._is_prompt_learning:
self._peft_config = value
else:
self.base_model.peft_config = value
def save_pretrained(self, save_directory: str, safe_serialization: bool=True, selected_adapters: Optional[list[str]]=None, save_embedding_layers: Union[str, bool]='auto', is_main_process: bool=True, convert_pissa_to_lora: Optional[str]=None, path_initial_model_for_weight_conversion: Optional[str]=None, **kwargs: Any) -> None:
if os.path.isfile(save_directory):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file')
if selected_adapters is None:
selected_adapters = list(self.peft_config.keys())
elif any((selected_adapter_name not in list(self.peft_config.keys()) for selected_adapter_name in selected_adapters)):
raise ValueError(f'You passed an invalid `selected_adapters` arguments, current supported adapter names are {list(self.peft_config.keys())} - got {selected_adapters}.')
if convert_pissa_to_lora is not None:
warnings.warn('`convert_pissa_to_lora` is deprecated and will be removed in a future version. Use `path_initial_model_for_weight_conversion` instead.')
path_initial_model_for_weight_conversion = convert_pissa_to_lora
def save_mutated_as_lora(peft_config, path_initial_model_for_weight_conversion, output_state_dict, kwargs):
if peft_config.use_rslora and (peft_config.rank_pattern or peft_config.alpha_pattern):
msg = 'Passing `path_initial_model_for_weight_conversion` to `save_pretrained` is not supported when using `rank_pattern` or `alpha_pattern` at the same time as `use_rslora=True`.'
raise ValueError(msg)
if not any((str(peft_config.init_lora_weights).lower().startswith(prefix) for prefix in ['pissa', 'olora', 'true'])):
warnings.warn('`path_initial_model_for_weight_conversion` only works for converting a PiSSA or OLoRA adapter to a LoRA adapter')
initial_adapter_name = os.path.basename(path_initial_model_for_weight_conversion)
try:
self.load_adapter(os.path.dirname(path_initial_model_for_weight_conversion), subfolder=initial_adapter_name, adapter_name=initial_adapter_name)
is_pissa = str(self.peft_config[initial_adapter_name].init_lora_weights).lower().startswith('pissa')
is_olora = str(self.peft_config[initial_adapter_name].init_lora_weights).lower() == 'olora'
if is_pissa or is_olora:
raise ValueError('The `init_lora_weights` parameter of the initial adapter should be set to `True`. Otherwise, `self.load_adapter` will subtract the decomposed values again based on the residual model.')
output_state_dict = self.base_model.subtract_mutated_init(output_state_dict, initial_adapter_name, kwargs)
finally:
self.delete_adapter(initial_adapter_name)
return output_state_dict
if is_main_process:
os.makedirs(save_directory, exist_ok=True)
self.create_or_update_model_card(save_directory)
for adapter_name in selected_adapters:
peft_config = self.peft_config[adapter_name]
output_state_dict = get_peft_model_state_dict(self, state_dict=kwargs.get('state_dict', None), adapter_name=adapter_name, save_embedding_layers=save_embedding_layers)
output_dir = os.path.join(save_directory, adapter_name) if adapter_name != 'default' else save_directory
os.makedirs(output_dir, exist_ok=True)
if is_main_process and safe_serialization:
ptrs = collections.defaultdict(list)
for (name, tensor) in output_state_dict.items():
if isinstance(tensor, torch.Tensor):
ptrs[id_tensor_storage(tensor)].append(name)
else:
ptrs[id(tensor)].append(name)
shared_ptrs = {ptr: names for (ptr, names) in ptrs.items() if len(names) > 1}
for (_, names) in shared_ptrs.items():
for shared_tensor_name in names[1:]:
output_state_dict[shared_tensor_name] = output_state_dict[shared_tensor_name].clone()
if path_initial_model_for_weight_conversion is not None:
peft_config.init_lora_weights = True
peft_config.save_pretrained(path_initial_model_for_weight_conversion)
output_state_dict = save_mutated_as_lora(peft_config, path_initial_model_for_weight_conversion, output_state_dict, kwargs)
safe_save_file(output_state_dict, os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME), metadata={'format': 'pt'})
elif is_main_process:
if path_initial_model_for_weight_conversion is not None:
peft_config.init_lora_weights = True
peft_config.save_pretrained(path_initial_model_for_weight_conversion)
output_state_dict = save_mutated_as_lora(peft_config, path_initial_model_for_weight_conversion, output_state_dict, kwargs)
torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME))
if peft_config.base_model_name_or_path is None:
peft_config.base_model_name_or_path = self.base_model.__dict__.get('name_or_path', None) if peft_config.is_prompt_learning else self.base_model.model.__dict__.get('name_or_path', None)
inference_mode = peft_config.inference_mode
peft_config.inference_mode = True
if peft_config.task_type is None:
base_model_class = self._get_base_model_class(is_prompt_tuning=peft_config.is_prompt_learning)
parent_library = base_model_class.__module__
auto_mapping_dict = {'base_model_class': base_model_class.__name__, 'parent_library': parent_library}
else:
auto_mapping_dict = None
if is_main_process:
if path_initial_model_for_weight_conversion is not None:
peft_config.init_lora_weights = True
peft_config.r *= 2
if not peft_config.use_rslora:
peft_config.lora_alpha *= 2
else:
peft_config.lora_alpha *= 2 ** 0.5
if peft_config.rank_pattern:
peft_config.rank_pattern = {key: 2 * val for (key, val) in peft_config.rank_pattern.items()}
if peft_config.alpha_pattern:
peft_config.alpha_pattern = {key: 2 * val for (key, val) in peft_config.alpha_pattern.items()}
peft_config.save_pretrained(output_dir, auto_mapping_dict=auto_mapping_dict)
peft_config.inference_mode = inference_mode
@classmethod
def from_pretrained(cls, model: torch.nn.Module, model_id: Union[str, os.PathLike], adapter_name: str='default', is_trainable: bool=False, config: Optional[PeftConfig]=None, autocast_adapter_dtype: bool=True, ephemeral_gpu_offload: bool=False, **kwargs: Any) -> PeftModel:
from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING
if config is None:
config = PEFT_TYPE_TO_CONFIG_MAPPING[PeftConfig._get_peft_type(model_id, subfolder=kwargs.get('subfolder', None), revision=kwargs.get('revision', None), cache_dir=kwargs.get('cache_dir', None), use_auth_token=kwargs.get('use_auth_token', None), token=kwargs.get('token', None))].from_pretrained(model_id, **kwargs)
elif isinstance(config, PeftConfig):
config.inference_mode = not is_trainable
else:
raise ValueError(f'The input config must be a PeftConfig, got {config.__class__}')
if hasattr(config, 'runtime_config'):
config.runtime_config.ephemeral_gpu_offload = ephemeral_gpu_offload
elif ephemeral_gpu_offload:
warnings.warn('Ephemeral GPU offloading is not supported for this model. Ignoring.')
if hasattr(model, 'hf_device_map'):
weight_map = dict(named_module_tensors(model, recurse=True))
disk_modules = set()
index = None
for (name, module) in model.named_modules():
if hasattr(module, '_hf_hook') and hasattr(module._hf_hook, 'original_devices'):
if hasattr(module._hf_hook.weights_map, 'dataset'):
index = module._hf_hook.weights_map.dataset.index
for key in module._hf_hook.original_devices.keys():
if module._hf_hook.original_devices[key] == torch.device('meta'):
disk_modules.add(str(name) + '.' + str(key))
if disk_modules and (not kwargs.get('use_safetensors', True)):
raise ValueError('Disk offloading currently only supported for safetensors')
if index:
offload_index = {p: {'safetensors_file': index[p]['safetensors_file'], 'weight_name': p, 'dtype': str(weight_map[p].dtype).replace('torch.', '')} for p in weight_map.keys() if p in disk_modules}
kwargs['offload_index'] = offload_index
if getattr(model, 'hf_device_map', None) is not None and len(set(model.hf_device_map.values()).intersection({'cpu', 'disk'})) > 0:
remove_hook_from_submodules(model)
if config.is_prompt_learning and is_trainable:
raise ValueError('Cannot set a prompt learning adapter to trainable when loading pretrained adapter.')
else:
config.inference_mode = not is_trainable
if isinstance(getattr(model, 'base_model', None), XLoraModel):
if not isinstance(config, XLoraConfig):
raise TypeError(f"Expected 'XLoraConfig', got '{type(config)}' instead.")
if 'adapters' in kwargs:
config.adapters = kwargs['adapters']
elif not os.path.exists(model_id):
s = HfFileSystem()
adapter_names = [file['name'][len(model_id) + 1:] for file in s.ls(model_id) if file['type'] == 'directory']
adapter_paths = {}
for adapter_name in adapter_names:
adapter_paths[adapter_name] = os.path.join(model_id, model_id)
config.adapters = adapter_paths
config._subfolders = adapter_names
elif 'adapters' not in kwargs:
raise ValueError('If model_id is a local path, then `adapters` must be passed in kwargs.')
if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys():
model = cls(model, config, adapter_name, autocast_adapter_dtype=autocast_adapter_dtype)
else:
model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name, autocast_adapter_dtype=autocast_adapter_dtype)
model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, autocast_adapter_dtype=autocast_adapter_dtype, **kwargs)
return model
def _setup_prompt_encoder(self, adapter_name: str):
config = self.peft_config[adapter_name]
if not hasattr(self, 'prompt_encoder'):
self.prompt_encoder = torch.nn.ModuleDict({})
self.prompt_tokens = {}
transformer_backbone = None
for (name, module) in self.base_model.named_children():
for param in module.parameters():
param.requires_grad = False
if isinstance(module, PreTrainedModel):
if transformer_backbone is None:
transformer_backbone = module
self.transformer_backbone_name = name
if transformer_backbone is None:
transformer_backbone = self.base_model
if config.num_transformer_submodules is None:
config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1
for (named_param, value) in list(transformer_backbone.named_parameters()):
deepspeed_distributed_tensor_shape = getattr(value, 'ds_shape', None)
if value.shape[0] == self.base_model.config.vocab_size or (deepspeed_distributed_tensor_shape is not None and deepspeed_distributed_tensor_shape[0] == self.base_model.config.vocab_size):
self.word_embeddings = transformer_backbone.get_submodule(named_param.replace('.weight', ''))
break
if config.peft_type == PeftType.PROMPT_TUNING:
prompt_encoder = PromptEmbedding(config, self.word_embeddings)
elif config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
prompt_encoder = MultitaskPromptEmbedding(config, self.word_embeddings)
elif config.peft_type == PeftType.P_TUNING:
prompt_encoder = PromptEncoder(config)
elif config.peft_type == PeftType.PREFIX_TUNING:
prompt_encoder = PrefixEncoder(config)
else:
raise ValueError('Not supported')
prompt_encoder = prompt_encoder.to(self.device)
self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder}))
self.prompt_tokens[adapter_name] = torch.arange(config.num_virtual_tokens * config.num_transformer_submodules).long()
def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel):
if not (getattr(model, 'is_loaded_in_8bit', False) or getattr(model, 'is_loaded_in_4bit', False) or getattr(model, 'is_quantized', False)):
if hasattr(model, 'enable_input_require_grads'):
model.enable_input_require_grads()
elif hasattr(model, 'get_input_embeddings'):
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
return model
def get_prompt_embedding_to_save(self, adapter_name: str) -> torch.Tensor:
prompt_encoder = self.prompt_encoder[adapter_name]
prompt_tokens = self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device)
if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING:
prompt_tokens = prompt_tokens[:, :self.peft_config[adapter_name].num_virtual_tokens]
if self.peft_config[adapter_name].peft_type == PeftType.MULTITASK_PROMPT_TUNING:
prompt_embeddings = super(MultitaskPromptEmbedding, prompt_encoder).forward(prompt_tokens)
else:
prompt_embeddings = prompt_encoder(prompt_tokens)
return prompt_embeddings[0].detach().cpu()
def get_prompt(self, batch_size: int, task_ids: Optional[torch.Tensor]=None) -> torch.Tensor:
peft_config = self.active_peft_config
prompt_encoder = self.prompt_encoder[self.active_adapter]
prompt_tokens = self.prompt_tokens[self.active_adapter].unsqueeze(0).expand(batch_size, -1).to(prompt_encoder.embedding.weight.device)
if peft_config.peft_type == PeftType.PREFIX_TUNING:
prompt_tokens = prompt_tokens[:, :peft_config.num_virtual_tokens]
if peft_config.inference_mode:
past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)
else:
past_key_values = prompt_encoder(prompt_tokens)
if self.base_model_torch_dtype is not None:
past_key_values = past_key_values.to(self.base_model_torch_dtype)
past_key_values = past_key_values.view(batch_size, peft_config.num_virtual_tokens, peft_config.num_layers * 2, peft_config.num_attention_heads, peft_config.token_dim // peft_config.num_attention_heads)
if peft_config.num_transformer_submodules == 2:
past_key_values = torch.cat([past_key_values, past_key_values], dim=2)
past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(peft_config.num_transformer_submodules * 2)
if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None:
post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type]
past_key_values = post_process_fn(past_key_values)
return past_key_values
else:
if peft_config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
prompts = prompt_encoder(prompt_tokens, task_ids)
else:
if peft_config.inference_mode:
prompts = prompt_encoder.embedding.weight
else:
prompt_tokens = prompt_tokens[:1]
prompts = prompt_encoder(prompt_tokens)
prompts = prompts.repeat(batch_size, 1, 1)
return prompts
def get_nb_trainable_parameters(self) -> tuple[int, int]:
trainable_params = 0
all_param = 0
for (_, param) in self.named_parameters():
num_params = param.numel()
if num_params == 0 and hasattr(param, 'ds_numel'):
num_params = param.ds_numel
if param.__class__.__name__ == 'Params4bit':
if hasattr(param, 'element_size'):
num_bytes = param.element_size()
elif not hasattr(param, 'quant_storage'):
num_bytes = 1
else:
num_bytes = param.quant_storage.itemsize
num_params = num_params * 2 * num_bytes
all_param += num_params
if param.requires_grad:
trainable_params += num_params
return (trainable_params, all_param)
def print_trainable_parameters(self) -> None:
(trainable_params, all_param) = self.get_nb_trainable_parameters()
print(f'trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param:.4f}')
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'base_model':
raise
return getattr(self.base_model, name)
@contextmanager
def _enable_peft_forward_hooks(self, *args, **kwargs):
if hasattr(self.base_model, '_enable_peft_forward_hooks'):
with self.base_model._enable_peft_forward_hooks(*args, **kwargs):
yield
return
else:
yield
return
def forward(self, *args: Any, **kwargs: Any):
with self._enable_peft_forward_hooks(*args, **kwargs):
kwargs = {k: v for (k, v) in kwargs.items() if k not in self.special_peft_forward_args}
return self.get_base_model()(*args, **kwargs)
def generate(self, *args, **kwargs):
with self._enable_peft_forward_hooks(*args, **kwargs):
kwargs = {k: v for (k, v) in kwargs.items() if k not in self.special_peft_forward_args}
return self.get_base_model().generate(*args, **kwargs)
def _get_base_model_class(self, is_prompt_tuning=False):
if not is_prompt_tuning:
return self.base_model.model.__class__
return self.base_model.__class__
@contextmanager
def disable_adapter(self):
if self.peft_config[self.active_adapter].is_prompt_learning:
try:
old_forward = self.forward
self.forward = self.base_model.forward
old_prepare_inputs_for_generation = self.prepare_inputs_for_generation
self.prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
yield
finally:
self.forward = old_forward
self.prepare_inputs_for_generation = old_prepare_inputs_for_generation
elif self.peft_config[self.active_adapter].is_adaption_prompt:
try:
self.base_model.disable_adapter_layers()
yield
finally:
self.base_model.enable_adapter_layers()
else:
model_status = self.get_model_status()
if model_status.enabled == 'irregular':
warnings.warn('The model contains some adapter layers that are enabled and others that are disabled. This is most likely unintentional. After exiting the disable_adapter context, all adapters will be enabled')
try:
self.base_model.disable_adapter_layers()
yield
finally:
if model_status.enabled is not False:
self.base_model.enable_adapter_layers()
def get_base_model(self) -> torch.nn.Module:
return self.base_model if self.active_peft_config.is_prompt_learning or self.peft_type == PeftType.POLY else self.base_model.model
def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None:
if peft_config.peft_type != self.peft_type:
raise ValueError(f'Cannot combine adapters with different peft types. Found {self.peft_type} and {peft_config.peft_type}.')
try:
if peft_config.is_prompt_learning:
self.peft_config[adapter_name] = peft_config
if hasattr(self.config, 'to_dict'):
dict_config = self.config.to_dict()
else:
dict_config = self.config
peft_config = _prepare_prompt_learning_config(peft_config, dict_config)
self._setup_prompt_encoder(adapter_name)
elif peft_config.is_adaption_prompt:
self.base_model.add_adapter(adapter_name, peft_config)
else:
self.peft_config[adapter_name] = peft_config
self.base_model.inject_adapter(self.base_model.model, adapter_name)
except Exception:
if adapter_name in self.peft_config:
del self.peft_config[adapter_name]
raise
self.set_additional_trainable_modules(peft_config, adapter_name)
def set_additional_trainable_modules(self, peft_config, adapter_name):
if getattr(peft_config, 'modules_to_save', None) is not None:
if self.modules_to_save is None:
self.modules_to_save = set(peft_config.modules_to_save)
else:
self.modules_to_save.update(peft_config.modules_to_save)
_set_trainable(self, adapter_name)
def get_layer_status(self) -> list[TunerLayerStatus]:
return get_layer_status(self)
def get_model_status(self) -> TunerModelStatus:
return get_model_status(self)
@classmethod
def _split_kwargs(cls, kwargs: dict[str, Any]):
_kwargs_not_in_hf_hub_download_signature = ('use_auth_token',)
hf_hub_download_kwargs = {}
other_kwargs = {}
for (key, value) in kwargs.items():
if key in inspect.signature(hf_hub_download).parameters or key in _kwargs_not_in_hf_hub_download_signature:
hf_hub_download_kwargs[key] = value
else:
other_kwargs[key] = value
return (hf_hub_download_kwargs, other_kwargs)
def _update_offload(self, offload_index: dict[str, dict[str, str]], adapters_weights: dict[str, torch.tensor]):
if not offload_index:
return offload_index
prefix = 'base_model.model.'
adapter_names = list(self.peft_config.keys())
for adapter_name in adapter_names:
keys = list(offload_index.keys())
block_id = keys[0].split('.')[0] + '.'
for key in keys:
suffix_pos = key.rfind('.')
extended_prefix = prefix + key[:suffix_pos]
module = dict(self.named_modules())[extended_prefix]
if isinstance(module, BaseTunerLayer):
new_key = prefix + key[:suffix_pos] + '.base_layer' + key[suffix_pos:]
else:
new_key = prefix + key
offload_index[key]['weight_name'] = new_key
offload_index[new_key] = offload_index[key]
del offload_index[key]
files_seen = set()
for new_key in list(offload_index.keys()):
fname = offload_index[new_key]['safetensors_file']
new_fname_list = list(fname.split(os.sep))
for (i, name) in enumerate(new_fname_list):
if '--' in name:
new_fname_list[i] += '-peft'
break
new_fname = os.path.join(*new_fname_list)
if fname in files_seen:
continue
safe_dict = {}
with safe_open(fname, framework='pt') as f:
for safe_key in f.keys():
safe_tensor = f.get_tensor(safe_key)
metadata = f.metadata()
suffix_pos = safe_key.rfind('.')
extended_prefix = prefix + block_id + safe_key[:suffix_pos]
safe_module = dict(self.named_modules())[extended_prefix]
if isinstance(safe_module, BaseTunerLayer):
final_key = extended_prefix + '.base_layer' + safe_key[suffix_pos:]
lora_dict = {key: val for (key, val) in adapters_weights.items() if extended_prefix in key}
for (lora_key, lora_val) in lora_dict.items():
divide = lora_key.rfind('.')
new_key = lora_key[:divide] + f'.{adapter_name}' + lora_key[divide:]
safe_dict[new_key] = lora_val
else:
final_key = prefix + block_id + safe_key
safe_dict[final_key] = safe_tensor
files_seen.add(new_fname)
for key in safe_dict.keys():
offload_index[key] = {'safetensors_file': new_fname, 'weight_name': key}
base_name = os.path.dirname(new_fname)
if not os.path.exists(base_name):
os.makedirs(base_name)
safe_save_file(safe_dict, new_fname, metadata=metadata)
def load_adapter(self, model_id: str, adapter_name: str, is_trainable: bool=False, torch_device: Optional[str]=None, autocast_adapter_dtype: bool=True, ephemeral_gpu_offload: bool=False, **kwargs: Any):
from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
(hf_hub_download_kwargs, kwargs) = self._split_kwargs(kwargs)
if torch_device is None:
torch_device = infer_device()
if adapter_name not in self.peft_config:
peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[PeftConfig._get_peft_type(model_id, **hf_hub_download_kwargs)].from_pretrained(model_id, ephemeral_gpu_offload=ephemeral_gpu_offload, **hf_hub_download_kwargs)
if peft_config.is_prompt_learning and is_trainable:
raise ValueError('Cannot set a prompt learning adapter to trainable when loading pretrained adapter.')
else:
peft_config.inference_mode = not is_trainable
self.add_adapter(adapter_name, peft_config)
adapters_weights = load_peft_weights(model_id, device=torch_device, **hf_hub_download_kwargs)
ignore_mismatched_sizes = kwargs.get('ignore_mismatched_sizes', False)
load_result = set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name, ignore_mismatched_sizes=ignore_mismatched_sizes)
if getattr(self, 'hf_device_map', None) is not None and len(set(self.hf_device_map.values()).intersection({'cpu', 'disk'})) > 0 and (len(self.peft_config) == 1):
device_map = kwargs.get('device_map', 'auto')
max_memory = kwargs.get('max_memory', None)
offload_dir = kwargs.get('offload_folder', None)
offload_index = kwargs.get('offload_index', None)
dispatch_model_kwargs = {}
if 'offload_index' in inspect.signature(dispatch_model).parameters:
dispatch_model_kwargs['offload_index'] = offload_index
no_split_module_classes = self._no_split_modules
if device_map != 'sequential':
max_memory = get_balanced_memory(self, max_memory=max_memory, no_split_module_classes=no_split_module_classes, low_zero=device_map == 'balanced_low_0')
if isinstance(device_map, str):
device_map = infer_auto_device_map(self, max_memory=max_memory, no_split_module_classes=no_split_module_classes)
self._update_offload(offload_index, adapters_weights)
dispatch_model_kwargs['offload_index'] = offload_index
dispatch_model(self, device_map=device_map, offload_dir=offload_dir, **dispatch_model_kwargs)
hook = AlignDevicesHook(io_same_device=True)
if self.peft_config[adapter_name].is_prompt_learning:
remove_hook_from_submodules(self.prompt_encoder)
add_hook_to_module(self.get_base_model(), hook)
if hasattr(self.base_model, '_cast_adapter_dtype'):
self.base_model._cast_adapter_dtype(adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype)
if not is_trainable:
self.eval()
return load_result
def set_adapter(self, adapter_name: str) -> None:
if adapter_name not in self.peft_config:
raise ValueError(f'Adapter {adapter_name} not found.')
self.active_adapter = adapter_name
if not self.peft_config[adapter_name].is_prompt_learning:
self.base_model.set_adapter(adapter_name)
_set_adapter(self, adapter_name)
@property
def base_model_torch_dtype(self):
return getattr(self.base_model, 'dtype', None)
@property
def active_peft_config(self):
return self.peft_config[self.active_adapter]
def create_or_update_model_card(self, output_dir: str):
filename = os.path.join(output_dir, 'README.md')
card = ModelCard.load(filename) if os.path.exists(filename) else ModelCard.from_template(ModelCardData())
card.data['library_name'] = 'peft'
model_config = BaseTuner.get_model_config(self)
model_config = None if model_config == DUMMY_MODEL_CONFIG else model_config
if model_config is not None and '_name_or_path' in model_config:
card.data['base_model'] = model_config['_name_or_path']
lines = card.text.splitlines()
quantization_config = None
if hasattr(model_config, 'quantization_config'):
quantization_config = self.config.quantization_config.to_dict()
training_config_text = ''
quantization_prefix = 'The following `bitsandbytes` quantization config was used during training:'
if quantization_config is not None:
training_config_text += f'\n{quantization_prefix}\n'
training_config_text += '\n'.join([f'- {name}: {value}' for (name, value) in quantization_config.items()])
training_config_text += '\n'
training_procedure_heading = '## Training procedure'
if quantization_prefix not in lines and bool(training_config_text):
if training_procedure_heading in lines:
lines.insert(lines.index(training_procedure_heading) + 2, training_config_text)
else:
lines.append(f'{training_procedure_heading}\n{training_config_text}')
framework_block_heading = '### Framework versions'
if f'- PEFT {__version__}' not in lines:
if framework_block_heading in lines:
lines.insert(lines.index(framework_block_heading) + 2, f'- PEFT {__version__}')
else:
lines.append(f'{framework_block_heading}\n\n- PEFT {__version__}')
card.text = '\n'.join(lines)
card.save(filename)
class PeftModelForSequenceClassification(PeftModel):
def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str='default', **kwargs) -> None:
super().__init__(model, peft_config, adapter_name, **kwargs)
classifier_module_names = ['classifier', 'score']
if self.modules_to_save is None:
self.modules_to_save = set(classifier_module_names)
else:
self.modules_to_save.update(classifier_module_names)
if hasattr(peft_config, 'modules_to_save'):
if peft_config.modules_to_save is None:
peft_config.modules_to_save = classifier_module_names[:]
else:
peft_config.modules_to_save.extend(classifier_module_names)
for (name, _) in self.base_model.named_children():
if any((module_name in name for module_name in self.modules_to_save)):
self.cls_layer_name = name
break
_set_trainable(self, adapter_name)
def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None:
if hasattr(peft_config, 'modules_to_save'):
classifier_module_names = ['classifier', 'score']
if peft_config.modules_to_save is None:
peft_config.modules_to_save = classifier_module_names[:]
else:
peft_config.modules_to_save.extend(classifier_module_names)
return super().add_adapter(adapter_name, peft_config)
def forward(self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
peft_config = self.active_peft_config
if not peft_config.is_prompt_learning:
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for (k, v) in kwargs.items() if k not in self.special_peft_forward_args}
if peft_config.peft_type == PeftType.POLY:
kwargs['task_ids'] = task_ids
return self.base_model(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs)
batch_size = _get_batch_size(input_ids, inputs_embeds)
if attention_mask is not None:
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
if kwargs.get('position_ids', None) is not None:
warnings.warn('Position ids are not supported for parameter efficient tuning. Ignoring position ids.')
kwargs['position_ids'] = None
kwargs.update({'attention_mask': attention_mask, 'labels': labels, 'output_attentions': output_attentions, 'output_hidden_states': output_hidden_states, 'return_dict': return_dict})
if peft_config.peft_type == PeftType.PREFIX_TUNING:
return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
else:
if kwargs.get('token_type_ids', None) is not None:
kwargs['token_type_ids'] = torch.cat((torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs['token_type_ids']), dim=1).long()
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
def _prefix_tuning_forward(self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs):
batch_size = _get_batch_size(input_ids, inputs_embeds)
past_key_values = self.get_prompt(batch_size)
fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
kwargs.update({'input_ids': input_ids, 'attention_mask': attention_mask, 'inputs_embeds': inputs_embeds, 'output_attentions': output_attentions, 'output_hidden_states': output_hidden_states, 'return_dict': return_dict, 'past_key_values': past_key_values})
if 'past_key_values' in fwd_params:
return self.base_model(labels=labels, **kwargs)
else:
transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
if 'past_key_values' not in fwd_params:
raise ValueError('Model does not support past key values which are required for prefix tuning.')
outputs = transformer_backbone_name(**kwargs)
pooled_output = outputs[1] if len(outputs) > 1 else outputs[0]
if 'dropout' in [name for (name, _) in list(self.base_model.named_children())]:
pooled_output = self.base_model.dropout(pooled_output)
logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.base_model.num_labels == 1:
self.config.problem_type = 'regression'
elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.base_model.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
class PeftModelForCausalLM(PeftModel):
def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str='default', **kwargs) -> None:
super().__init__(model, peft_config, adapter_name, **kwargs)
self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
def forward(self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs):
peft_config = self.active_peft_config
if not peft_config.is_prompt_learning:
if self.base_model.config.model_type == 'mpt':
if inputs_embeds is not None:
raise AssertionError('forward in MPTForCausalLM does not support inputs_embeds')
return self.base_model(input_ids=input_ids, attention_mask=attention_mask, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs)
if peft_config.peft_type == PeftType.POLY:
kwargs['task_ids'] = task_ids
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for (k, v) in kwargs.items() if k not in self.special_peft_forward_args}
return self.base_model(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs)
batch_size = _get_batch_size(input_ids, inputs_embeds)
if attention_mask is not None:
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
if kwargs.get('position_ids', None) is not None:
warnings.warn('Position ids are not supported for parameter efficient tuning. Ignoring position ids.')
kwargs['position_ids'] = None
if kwargs.get('token_type_ids', None) is not None:
warnings.warn('Token type ids are not supported for parameter efficient tuning. Ignoring token type ids')
kwargs['token_type_ids'] = None
kwargs.update({'attention_mask': attention_mask, 'labels': labels, 'output_attentions': output_attentions, 'output_hidden_states': output_hidden_states, 'return_dict': return_dict})
if peft_config.peft_type == PeftType.PREFIX_TUNING:
kwargs['past_key_values'] = self.get_prompt(batch_size)
return self.base_model(input_ids=input_ids, inputs_embeds=inputs_embeds, **kwargs)
else:
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if labels is not None:
prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)
kwargs['labels'] = torch.cat((prefix_labels, labels), dim=1)
prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
def generate(self, *args, **kwargs):
peft_config = self.active_peft_config
self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation
if hasattr(self.base_model, 'model'):
self.base_model.model.generation_config = self.generation_config
else:
self.base_model.generation_config = self.generation_config
try:
if not peft_config.is_prompt_learning:
with self._enable_peft_forward_hooks(*args, **kwargs):
kwargs = {k: v for (k, v) in kwargs.items() if k not in self.special_peft_forward_args}
outputs = self.base_model.generate(*args, **kwargs)
else:
outputs = self.base_model.generate(**kwargs)
except:
self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
raise
else:
self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
return outputs
def prepare_inputs_for_generation(self, *args, task_ids: Optional[torch.Tensor]=None, **kwargs):
peft_config = self.active_peft_config
model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)
uses_transformers_4_38 = packaging.version.parse(transformers.__version__) >= packaging.version.parse('4.38.0')
uses_transformers_4_36 = packaging.version.parse(transformers.__version__) >= packaging.version.parse('4.36.0')
transformers_new_cache_archs = ['llama', 'mistral', 'persimmon', 'phi']
if packaging.version.parse(transformers.__version__) > packaging.version.parse('4.43.3'):
transformers_new_cache_archs.append('bloom')
uses_cache = uses_transformers_4_38 or (uses_transformers_4_36 and self.base_model.config.model_type in transformers_new_cache_archs)
if peft_config.peft_type == PeftType.POLY:
model_kwargs['task_ids'] = task_ids
if peft_config.is_prompt_learning:
if uses_cache and model_kwargs['past_key_values'] is not None:
past_key_values = model_kwargs['past_key_values']
if isinstance(past_key_values, (tuple, list)):
seq_len = past_key_values[0][0].shape[-2]
else:
seq_len = past_key_values.get_seq_length()
if seq_len >= model_kwargs['input_ids'].shape[1]:
model_kwargs['input_ids'] = model_kwargs['input_ids'][:, -1:]
if model_kwargs.get('attention_mask', None) is not None:
size = (model_kwargs['input_ids'].shape[0], peft_config.num_virtual_tokens)
prefix_attention_mask = torch.ones(size).to(model_kwargs['input_ids'].device)
model_kwargs['attention_mask'] = torch.cat((prefix_attention_mask, model_kwargs['attention_mask']), dim=1)
if model_kwargs.get('position_ids', None) is not None:
warnings.warn('Position ids are not supported for parameter efficient tuning. Ignoring position ids.')
model_kwargs['position_ids'] = None
if kwargs.get('token_type_ids', None) is not None:
warnings.warn('Token type ids are not supported for parameter efficient tuning. Ignoring token type ids')
kwargs['token_type_ids'] = None
requires_prompt_injection = model_kwargs['past_key_values'] is None or (isinstance(model_kwargs['past_key_values'], transformers.Cache) and (not model_kwargs['past_key_values']))
if requires_prompt_injection and peft_config.peft_type == PeftType.PREFIX_TUNING:
new_past_key_values = self.get_prompt(batch_size=model_kwargs['input_ids'].shape[0])
model_kwargs['past_key_values'] = new_past_key_values
elif requires_prompt_injection:
inputs_embeds = self.word_embeddings(model_kwargs['input_ids'])
prompts = self.get_prompt(batch_size=model_kwargs['input_ids'].shape[0], task_ids=task_ids)
prompts = prompts.to(inputs_embeds.dtype)
model_kwargs['inputs_embeds'] = torch.cat((prompts, inputs_embeds), dim=1)
model_kwargs['input_ids'] = None
_ = model_kwargs.pop('cache_position', None)
return model_kwargs
class PeftModelForSeq2SeqLM(PeftModel):
def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str='default', **kwargs) -> None:
super().__init__(model, peft_config, adapter_name, **kwargs)
self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
self.base_model_prepare_encoder_decoder_kwargs_for_generation = self.base_model._prepare_encoder_decoder_kwargs_for_generation
def forward(self, input_ids=None, attention_mask=None, inputs_embeds=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs):
peft_config = self.active_peft_config
if not peft_config.is_prompt_learning:
if peft_config.peft_type == PeftType.POLY:
kwargs['task_ids'] = task_ids
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for (k, v) in kwargs.items() if k not in self.special_peft_forward_args}
return self.base_model(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_inputs_embeds=decoder_inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs)
batch_size = _get_batch_size(input_ids, inputs_embeds)
if decoder_attention_mask is not None:
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(decoder_attention_mask.device)
if peft_config.peft_type not in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]:
decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1)
if kwargs.get('position_ids', None) is not None:
warnings.warn('Position ids are not supported for parameter efficient tuning. Ignoring position ids.')
kwargs['position_ids'] = None
if kwargs.get('token_type_ids', None) is not None:
warnings.warn('Token type ids are not supported for parameter efficient tuning. Ignoring token type ids')
kwargs['token_type_ids'] = None
kwargs.update({'attention_mask': attention_mask, 'decoder_attention_mask': decoder_attention_mask, 'labels': labels, 'output_attentions': output_attentions, 'output_hidden_states': output_hidden_states, 'return_dict': return_dict})
if peft_config.peft_type == PeftType.PREFIX_TUNING:
kwargs['past_key_values'] = self.get_prompt(batch_size)
return self.base_model(input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs)
elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]:
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if attention_mask is not None:
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
kwargs['attention_mask'] = torch.cat((prefix_attention_mask, attention_mask), dim=1)
prompts = self.get_prompt(batch_size=batch_size)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts[:, :peft_config.num_virtual_tokens], inputs_embeds), dim=1)
return self.base_model(inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs)
else:
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if decoder_inputs_embeds is None and decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
decoder_inputs_embeds = self.word_embeddings(decoder_input_ids)
if attention_mask is not None:
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
kwargs['attention_mask'] = torch.cat((prefix_attention_mask, attention_mask), dim=1)
if labels is not None:
if peft_config.num_transformer_submodules == 1:
kwargs['labels'] = labels
elif peft_config.num_transformer_submodules == 2:
prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)
kwargs['labels'] = torch.cat((prefix_labels, labels), dim=1)
prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts[:, :peft_config.num_virtual_tokens], inputs_embeds), dim=1)
if peft_config.num_transformer_submodules == 1:
return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
elif peft_config.num_transformer_submodules == 2:
decoder_inputs_embeds = torch.cat((prompts[:, peft_config.num_virtual_tokens:], decoder_inputs_embeds), dim=1)
return self.base_model(inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs)
def generate(self, **kwargs):
peft_config = self.active_peft_config
self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation
self.base_model._prepare_encoder_decoder_kwargs_for_generation = self._prepare_encoder_decoder_kwargs_for_generation
try:
if not peft_config.is_prompt_learning:
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for (k, v) in kwargs.items() if k not in self.special_peft_forward_args}
outputs = self.base_model.generate(**kwargs)
else:
if 'input_ids' not in kwargs:
raise ValueError('input_ids must be provided for Peft model generation')
if kwargs.get('position_ids', None) is not None:
warnings.warn('Position ids are not supported for parameter efficient tuning. Ignoring position ids.')
kwargs['position_ids'] = None
if kwargs.get('token_type_ids', None) is not None:
warnings.warn('Token type ids are not supported for parameter efficient tuning. Ignoring token type ids')
kwargs['token_type_ids'] = None
if peft_config.peft_type == PeftType.PREFIX_TUNING:
outputs = self.base_model.generate(**kwargs)
elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING, PeftType.MULTITASK_PROMPT_TUNING]:
kwargs = deepcopy(kwargs)
if 'encoder_outputs' in kwargs:
del kwargs['encoder_outputs']
warnings.warn('`encoder_outputs` should not be passed to `generate` when using prompt tuning. Ignoring it.')
input_ids = kwargs.pop('input_ids')
inputs_embeds = self.word_embeddings(input_ids)
batch_size = inputs_embeds.shape[0]
prompts = self.get_prompt(batch_size=batch_size, task_ids=kwargs.pop('task_ids', None))
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts[:, :peft_config.num_virtual_tokens], inputs_embeds), dim=1)
kwargs['inputs_embeds'] = inputs_embeds
if 'attention_mask' in kwargs:
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(kwargs['attention_mask'].device)
kwargs['attention_mask'] = torch.cat((prefix_attention_mask, kwargs['attention_mask']), dim=1)
return self.base_model.generate(**kwargs)
else:
raise NotImplementedError
except:
self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
self.base_model._prepare_encoder_decoder_kwargs_for_generation = self.base_model_prepare_encoder_decoder_kwargs_for_generation
raise
else:
self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
self.base_model._prepare_encoder_decoder_kwargs_for_generation = self.base_model_prepare_encoder_decoder_kwargs_for_generation
return outputs
def prepare_inputs_for_generation(self, *args, task_ids: torch.Tensor=None, **kwargs):
peft_config = self.active_peft_config
model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)
if peft_config.peft_type == PeftType.POLY:
model_kwargs['task_ids'] = task_ids
if model_kwargs['past_key_values'] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:
batch_size = model_kwargs['decoder_input_ids'].shape[0]
past_key_values = self.get_prompt(batch_size)
model_kwargs['past_key_values'] = past_key_values
return model_kwargs
class PeftModelForTokenClassification(PeftModel):
def __init__(self, model: torch.nn.Module, peft_config: PeftConfig=None, adapter_name: str='default', **kwargs) -> None:
super().__init__(model, peft_config, adapter_name, **kwargs)
classifier_module_names = ['classifier', 'score']
if self.modules_to_save is None:
self.modules_to_save = set(classifier_module_names)
else:
self.modules_to_save.update(classifier_module_names)
if hasattr(peft_config, 'modules_to_save'):
if peft_config.modules_to_save is None:
peft_config.modules_to_save = classifier_module_names[:]
else:
peft_config.modules_to_save.extend(classifier_module_names)
for (name, _) in self.base_model.named_children():
if any((module_name in name for module_name in self.modules_to_save)):
self.cls_layer_name = name
break
_set_trainable(self, adapter_name)
def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None:
if hasattr(peft_config, 'modules_to_save'):
classifier_module_names = ['classifier', 'score']
if peft_config.modules_to_save is None:
peft_config.modules_to_save = classifier_module_names[:]
else:
peft_config.modules_to_save.extend(classifier_module_names)
return super().add_adapter(adapter_name, peft_config)
def forward(self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs):
peft_config = self.active_peft_config
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if not peft_config.is_prompt_learning:
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for (k, v) in kwargs.items() if k not in self.special_peft_forward_args}
if peft_config.peft_type == PeftType.POLY:
kwargs['task_ids'] = task_ids
return self.base_model(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs)
batch_size = _get_batch_size(input_ids, inputs_embeds)
if attention_mask is not None:
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
if kwargs.get('position_ids', None) is not None:
warnings.warn('Position ids are not supported for parameter efficient tuning. Ignoring position ids.')
kwargs['position_ids'] = None
kwargs.update({'attention_mask': attention_mask, 'labels': labels, 'output_attentions': output_attentions, 'output_hidden_states': output_hidden_states, 'return_dict': return_dict})
if peft_config.peft_type == PeftType.PREFIX_TUNING:
return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
else:
if kwargs.get('token_type_ids', None) is not None:
kwargs['token_type_ids'] = torch.cat((torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs['token_type_ids']), dim=1).long()
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
def _prefix_tuning_forward(self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs):
batch_size = _get_batch_size(input_ids, inputs_embeds)
past_key_values = self.get_prompt(batch_size)
fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
kwargs.update({'input_ids': input_ids, 'attention_mask': attention_mask, 'inputs_embeds': inputs_embeds, 'output_attentions': output_attentions, 'output_hidden_states': output_hidden_states, 'return_dict': return_dict, 'past_key_values': past_key_values})
if 'past_key_values' in fwd_params:
return self.base_model(labels=labels, **kwargs)
else:
transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
if 'past_key_values' not in fwd_params:
raise ValueError('Model does not support past key values which are required for prefix tuning.')
outputs = transformer_backbone_name(**kwargs)
sequence_output = outputs[0]
if 'dropout' in [name for (name, _) in list(self.base_model.named_children())]:
sequence_output = self.base_model.dropout(sequence_output)
logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
class PeftModelForQuestionAnswering(PeftModel):
def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str='default', **kwargs) -> None:
super().__init__(model, peft_config, adapter_name, **kwargs)
qa_module_names = ['qa_outputs']
if self.modules_to_save is None:
self.modules_to_save = set(qa_module_names)
else:
self.modules_to_save.update(qa_module_names)
if hasattr(peft_config, 'modules_to_save'):
if peft_config.modules_to_save is None:
peft_config.modules_to_save = qa_module_names[:]
else:
peft_config.modules_to_save.extend(qa_module_names)
for (name, _) in self.base_model.named_children():
if any((module_name in name for module_name in self.modules_to_save)):
self.cls_layer_name = name
break
_set_trainable(self, adapter_name)
def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None:
if hasattr(peft_config, 'modules_to_save'):
qa_module_names = ['qa_outputs']
if peft_config.modules_to_save is None:
peft_config.modules_to_save = qa_module_names[:]
else:
peft_config.modules_to_save.extend(qa_module_names)
return super().add_adapter(adapter_name, peft_config)
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs):
peft_config = self.active_peft_config
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if not peft_config.is_prompt_learning:
if peft_config.peft_type == PeftType.POLY:
kwargs['task_ids'] = task_ids
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for (k, v) in kwargs.items() if k not in self.special_peft_forward_args}
return self.base_model(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, start_positions=start_positions, end_positions=end_positions, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs)
batch_size = _get_batch_size(input_ids, inputs_embeds)
if attention_mask is not None:
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
if kwargs.get('position_ids', None) is not None:
warnings.warn('Position ids are not supported for parameter efficient tuning. Ignoring position ids.')
kwargs['position_ids'] = None
kwargs.update({'attention_mask': attention_mask, 'start_positions': start_positions, 'end_positions': end_positions, 'output_attentions': output_attentions, 'output_hidden_states': output_hidden_states, 'return_dict': return_dict})
if peft_config.peft_type == PeftType.PREFIX_TUNING:
return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
else:
if kwargs.get('token_type_ids', None) is not None:
kwargs['token_type_ids'] = torch.cat((torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs['token_type_ids']), dim=1).long()
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
prompts = self.get_prompt(batch_size=batch_size)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
def _prefix_tuning_forward(self, input_ids=None, attention_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs):
batch_size = _get_batch_size(input_ids, inputs_embeds)
past_key_values = self.get_prompt(batch_size)
fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
kwargs.update({'input_ids': input_ids, 'attention_mask': attention_mask, 'inputs_embeds': inputs_embeds, 'output_attentions': output_attentions, 'output_hidden_states': output_hidden_states, 'return_dict': return_dict, 'past_key_values': past_key_values})
if 'past_key_values' in fwd_params:
return self.base_model(start_positions=start_positions, end_positions=end_positions, **kwargs)
else:
transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
if 'past_key_values' not in fwd_params:
raise ValueError('Model does not support past key values which are required for prefix tuning.')
outputs = transformer_backbone_name(**kwargs)
sequence_output = outputs[0]
if 'dropout' in [name for (name, _) in list(self.base_model.named_children())]:
sequence_output = self.base_model.dropout(sequence_output)
logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)
(start_logits, end_logits) = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
class PeftModelForFeatureExtraction(PeftModel):
def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str='default', **kwargs):
super().__init__(model, peft_config, adapter_name, **kwargs)
def forward(self, input_ids=None, attention_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs):
peft_config = self.active_peft_config
if not peft_config.is_prompt_learning:
if peft_config.peft_type == PeftType.POLY:
kwargs['task_ids'] = task_ids
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for (k, v) in kwargs.items() if k not in self.special_peft_forward_args}
return self.base_model(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs)
batch_size = _get_batch_size(input_ids, inputs_embeds)
if attention_mask is not None:
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
if kwargs.get('position_ids', None) is not None:
warnings.warn('Position ids are not supported for parameter efficient tuning. Ignoring position ids.')
kwargs['position_ids'] = None
if kwargs.get('token_type_ids', None) is not None:
warnings.warn('Token type ids are not supported for parameter efficient tuning. Ignoring token type ids')
kwargs['token_type_ids'] = None
kwargs.update({'attention_mask': attention_mask, 'output_attentions': output_attentions, 'output_hidden_states': output_hidden_states, 'return_dict': return_dict})
if peft_config.peft_type == PeftType.PREFIX_TUNING:
kwargs['past_key_values'] = self.get_prompt(batch_size)
return self.base_model(input_ids=input_ids, **kwargs)
else:
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
prompts = self.get_prompt(batch_size=batch_size)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
@dataclass
class TunerLayerStatus:
name: str
module_type: str
enabled: bool
active_adapters: list[str]
merged_adapters: list[str]
requires_grad: dict[str, bool | Literal['irregular']]
available_adapters: list[str]
devices: dict[str, list[str]]
def get_layer_status(model: torch.nn.Module) -> list[TunerLayerStatus]:
if isinstance(model, PeftModel):
base_model = model.base_model
if not isinstance(base_model, BaseTuner):
raise TypeError('get_layer_status() got an invalid PeftModel instance; prefix tuning and adaption prompt are not supported.')
else:
base_model = model
layer_status: list[TunerLayerStatus] = []
for (name, module) in base_model.named_modules():
if not isinstance(module, BaseTunerLayer):
continue
mapping_requires_grad_list: dict[str, list[bool]] = collections.defaultdict(list)
for adapter_module_name in module.adapter_layer_names:
adapter_module = getattr(module, adapter_module_name)
if isinstance(adapter_module, torch.nn.ModuleDict):
for (key, submodule) in adapter_module.items():
for param in submodule.parameters():
mapping_requires_grad_list[key].append(param.requires_grad)
elif isinstance(adapter_module, torch.nn.ParameterDict):
for (key, param) in adapter_module.items():
mapping_requires_grad_list[key].append(param.requires_grad)
else:
pass
def check_irrgular(vals: list[bool]) -> bool | Literal['irregular']:
if all(vals):
return True
if not any(vals):
return False
return 'irregular'
requires_grad = {key: check_irrgular(vals) for (key, vals) in mapping_requires_grad_list.items()}
devices_dd = collections.defaultdict(list)
for adapter_module_name in module.adapter_layer_names + module.other_param_names:
adapter_module = getattr(module, adapter_module_name)
if isinstance(adapter_module, torch.nn.ModuleDict):
for (key, submodule) in adapter_module.items():
devices_dd[key].extend([param.device.type for param in submodule.parameters()])
elif isinstance(adapter_module, torch.nn.ParameterDict) or adapter_module.__class__.__name__ == 'BufferDict':
for (key, param) in adapter_module.items():
devices_dd[key].append(param.device.type)
devices = {key: sorted(set(val)) for (key, val) in devices_dd.items()}
status = TunerLayerStatus(name=name, module_type=repr(module).partition('(')[0], enabled=not module.disable_adapters, active_adapters=module.active_adapters, merged_adapters=module.merged_adapters, requires_grad=requires_grad, available_adapters=sorted(module._get_available_adapters()), devices=devices)
layer_status.append(status)
if not layer_status:
raise ValueError("No adapter layers found in the model, please ensure that it's a PEFT model or that you have PEFT adapters injected in the model.")
return layer_status
@dataclass
class TunerModelStatus:
base_model_type: str
adapter_model_type: str
peft_types: dict[str, str]
trainable_params: int
total_params: int
num_adapter_layers: int
enabled: bool | Literal['irregular']
active_adapters: list[str] | Literal['irregular']
merged_adapters: list[str] | Literal['irregular']
requires_grad: dict[str, bool | Literal['irregular']]
available_adapters: list[str]
devices: dict[str, list[str]]
def get_model_status(model: torch.nn.Module) -> TunerModelStatus:
if isinstance(model, PeftModel):
if not isinstance(model.base_model, BaseTuner):
raise TypeError('get_model_status() got an invalid PeftModel instance; prefix tuning and adaption prompt are not supported.')
base_model_type = model.get_base_model().__class__.__name__
(trainable_params, total_params) = model.get_nb_trainable_parameters()
base_model = model.base_model
peft_types = {key: str(config.peft_type).partition('.')[-1] for (key, config) in base_model.peft_config.items()}
adapter_model_type = base_model.__class__.__name__
elif isinstance(model, PreTrainedModel):
base_model_type = model.__class__.__name__
(trainable_params, total_params) = PeftModel.get_nb_trainable_parameters(model)
base_model = model
peft_types = {}
adapter_model_type = 'None'
else:
base_model_type = 'other'
(trainable_params, total_params) = PeftModel.get_nb_trainable_parameters(model)
base_model = model
peft_types = {}
adapter_model_type = 'None'
layer_status = get_layer_status(model)
num_adapter_layers = len(layer_status)
enabled_set: set[bool] = {status.enabled for status in layer_status}
enabled: bool | Literal['irregular']
if len(enabled_set) == 1:
enabled = enabled_set.pop()
else:
enabled = 'irregular'
available_adapters: list[str] = sorted(set().union(*(status.available_adapters for status in layer_status)))
all_active_adapters: set[tuple[str, ...]] = {tuple(status.active_adapters) for status in layer_status}
active_adapters: list[str] | Literal['irregular']
if not all_active_adapters:
active_adapters = []
elif len(all_active_adapters) == 1:
active_adapters = list(all_active_adapters.pop())
else:
active_adapters = 'irregular'
merged_all: set[str] = set()
for status in layer_status:
merged_all.update(status.merged_adapters)
merged_adapters: list[str] | Literal['irregular'] = sorted(merged_all)
for status in layer_status:
unmerged = set(status.available_adapters) - set(status.merged_adapters)
if unmerged & merged_all:
merged_adapters = 'irregular'
break
requires_grad_all: dict[str, list[bool | Literal['irregular']]] = collections.defaultdict(list)
for status in layer_status:
for (key, val) in status.requires_grad.items():
requires_grad_all[key].append(val)
def check_irrgular(vals: list[bool | Literal['irregular']]) -> bool | Literal['irregular']:
if all((val is True for val in vals)):
return True
if all((val is False for val in vals)):
return False
return 'irregular'
requires_grad = {key: check_irrgular(vals) for (key, vals) in requires_grad_all.items()}
devices_dd = collections.defaultdict(list)
for status in layer_status:
for (key, val) in status.devices.items():
devices_dd[key].extend(val)
devices = {key: sorted(set(val)) for (key, val) in devices_dd.items()}
adapter_model_status = TunerModelStatus(base_model_type=base_model_type, adapter_model_type=adapter_model_type, peft_types=peft_types, trainable_params=trainable_params, total_params=total_params, num_adapter_layers=num_adapter_layers, enabled=enabled, active_adapters=active_adapters, merged_adapters=merged_adapters, requires_grad=requires_grad, available_adapters=available_adapters, devices=devices)
return adapter_model_status
# File: peft-main/src/peft/tuners/__init__.py
from .adaption_prompt import AdaptionPromptConfig, AdaptionPromptModel
from .lora import LoraConfig, LoraModel, LoftQConfig, LoraRuntimeConfig
from .loha import LoHaConfig, LoHaModel
from .lokr import LoKrConfig, LoKrModel
from .ia3 import IA3Config, IA3Model
from .adalora import AdaLoraConfig, AdaLoraModel
from .boft import BOFTConfig, BOFTModel
from .p_tuning import PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType
from .prefix_tuning import PrefixEncoder, PrefixTuningConfig
from .prompt_tuning import PromptEmbedding, PromptTuningConfig, PromptTuningInit
from .multitask_prompt_tuning import MultitaskPromptEmbedding, MultitaskPromptTuningConfig, MultitaskPromptTuningInit
from .oft import OFTConfig, OFTModel
from .mixed import MixedModel
from .poly import PolyConfig, PolyModel
from .ln_tuning import LNTuningConfig, LNTuningModel
from .vera import VeraConfig, VeraModel
from .fourierft import FourierFTConfig, FourierFTModel
from .xlora import XLoraConfig, XLoraModel
from .hra import HRAConfig, HRAModel
from .vblora import VBLoRAConfig, VBLoRAModel
# File: peft-main/src/peft/tuners/_buffer_dict.py
from __future__ import annotations
import collections
from collections import OrderedDict
import torch
from torch.nn import Module
class BufferDict(Module):
def __init__(self, buffers=None, persistent: bool=False):
super().__init__()
if buffers is not None:
self.update(buffers)
self.persistent = persistent
def __getitem__(self, key):
return self._buffers[key]
def __setitem__(self, key, buffer):
self.register_buffer(key, buffer, persistent=self.persistent)
def __delitem__(self, key):
del self._buffers[key]
def __len__(self):
return len(self._buffers)
def __iter__(self):
return iter(self._buffers.keys())
def __contains__(self, key):
return key in self._buffers
def clear(self):
self._buffers.clear()
def pop(self, key):
v = self[key]
del self[key]
return v
def keys(self):
return self._buffers.keys()
def items(self):
return self._buffers.items()
def values(self):
return self._buffers.values()
def update(self, buffers):
if not isinstance(buffers, collections.abc.Iterable):
raise TypeError('BuffersDict.update should be called with an iterable of key/value pairs, but got ' + type(buffers).__name__)
if isinstance(buffers, collections.abc.Mapping):
if isinstance(buffers, (OrderedDict, BufferDict)):
for (key, buffer) in buffers.items():
self[key] = buffer
else:
for (key, buffer) in sorted(buffers.items()):
self[key] = buffer
else:
for (j, p) in enumerate(buffers):
if not isinstance(p, collections.abc.Iterable):
raise TypeError('BufferDict update sequence element #' + str(j) + ' should be Iterable; is' + type(p).__name__)
if not len(p) == 2:
raise ValueError('BufferDict update sequence element #' + str(j) + ' has length ' + str(len(p)) + '; 2 is required')
self[p[0]] = p[1]
def extra_repr(self):
child_lines = []
for (k, p) in self._buffers.items():
size_str = 'x'.join((str(size) for size in p.size()))
device_str = '' if not p.is_cuda else f' (GPU {p.get_device()})'
parastr = f'Buffer containing: [{torch.typename(p)} of size {size_str}{device_str}]'
child_lines.append(' (' + k + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
def __call__(self, input):
raise RuntimeError('BufferDict should not be called.')
# File: peft-main/src/peft/tuners/adalora/__init__.py
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from .config import AdaLoraConfig
from .gptq import SVDQuantLinear
from .layer import AdaLoraLayer, RankAllocator, SVDLinear
from .model import AdaLoraModel
__all__ = ['AdaLoraConfig', 'AdaLoraLayer', 'AdaLoraModel', 'SVDLinear', 'RankAllocator', 'SVDQuantLinear']
def __getattr__(name):
if name == 'SVDLinear8bitLt' and is_bnb_available():
from .bnb import SVDLinear8bitLt
return SVDLinear8bitLt
if name == 'SVDLinear4bit' and is_bnb_4bit_available():
from .bnb import SVDLinear4bit
return SVDLinear4bit
raise AttributeError(f'module {__name__} has no attribute {name}')
# File: peft-main/src/peft/tuners/adalora/bnb.py
from typing import Any
import torch
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from .layer import AdaLoraLayer
if is_bnb_available():
class SVDLinear8bitLt(torch.nn.Module, AdaLoraLayer):
def __init__(self, base_layer: torch.nn.Module, adapter_name: str, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, init_lora_weights: bool=True, **kwargs) -> None:
super().__init__()
AdaLoraLayer.__init__(self, base_layer)
self.get_base_layer().weight.requires_grad = False
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
def forward(self, x: torch.Tensor) -> torch.Tensor:
result = self.base_layer(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
if x.dtype != torch.float32:
x = x.float()
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
lora_E = self.lora_E[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
ranknum = self.ranknum[active_adapter] + 1e-05
output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T
if requires_conversion:
output = output.to(expected_dtype)
output = output * scaling / ranknum
result = result + output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'adalora.' + rep
if is_bnb_4bit_available():
class SVDLinear4bit(torch.nn.Module, AdaLoraLayer):
def __init__(self, base_layer: torch.nn.Module, adapter_name: str, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, init_lora_weights: bool=True, **kwargs) -> None:
super().__init__()
AdaLoraLayer.__init__(self, base_layer)
self.get_base_layer().weight.requires_grad = False
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
result = self.base_layer(x, *args, **kwargs)
if self.disable_adapters:
return result
result = result.clone()
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
lora_E = self.lora_E[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
ranknum = self.ranknum[active_adapter] + 1e-05
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
compute_dtype = lora_A.dtype
if x.dtype != compute_dtype:
x = x.to(compute_dtype)
output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T
if requires_conversion:
output = output.to(expected_dtype)
output = output * scaling / ranknum
result += output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'adalora.' + rep
# File: peft-main/src/peft/tuners/adalora/config.py
import warnings
from dataclasses import dataclass, field
from typing import Optional
from peft.tuners.lora import LoraConfig
from peft.utils import PeftType
@dataclass
class AdaLoraConfig(LoraConfig):
target_r: int = field(default=8, metadata={'help': 'Target Lora matrix dimension.'})
init_r: int = field(default=12, metadata={'help': 'Initial Lora matrix dimension.'})
tinit: int = field(default=0, metadata={'help': 'The steps of initial warmup.'})
tfinal: int = field(default=0, metadata={'help': 'The steps of final warmup.'})
deltaT: int = field(default=1, metadata={'help': 'Step interval of rank allocation.'})
beta1: float = field(default=0.85, metadata={'help': 'Hyperparameter of EMA.'})
beta2: float = field(default=0.85, metadata={'help': 'Hyperparameter of EMA.'})
orth_reg_weight: float = field(default=0.5, metadata={'help': 'The orthogonal regularization coefficient.'})
total_step: Optional[int] = field(default=None, metadata={'help': 'The total training steps.'})
rank_pattern: Optional[dict] = field(default=None, metadata={'help': 'The saved rank pattern.'})
def __post_init__(self):
self.peft_type = PeftType.ADALORA
if self.use_dora:
raise ValueError(f'{self.peft_type} does not support DoRA.')
if self.loftq_config:
raise ValueError(f'{self.peft_type} does not support LOFTQ.')
self.target_modules = set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
if isinstance(self.target_modules, str) and self.layers_to_transform is not None:
raise ValueError('`layers_to_transform` cannot be used when `target_modules` is a str.')
if isinstance(self.target_modules, str) and self.layers_pattern is not None:
raise ValueError('`layers_pattern` cannot be used when `target_modules` is a str.')
if self.r != 8:
warnings.warn('Note that `r` is not used in AdaLora and will be ignored.If you intended to set the initial rank, use `init_r` instead.')
# File: peft-main/src/peft/tuners/adalora/gptq.py
import torch
from .layer import AdaLoraLayer
class SVDQuantLinear(torch.nn.Module, AdaLoraLayer):
def __init__(self, base_layer, adapter_name, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, init_lora_weights: bool=True, **kwargs) -> None:
super().__init__()
AdaLoraLayer.__init__(self, base_layer)
self.quant_linear_module = base_layer
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
def forward(self, x: torch.Tensor) -> torch.Tensor:
result = self.quant_linear_module(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
lora_E = self.lora_E[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
ranknum = self.ranknum[active_adapter] + 1e-05
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
if x.dtype != torch.float32:
x = x.float()
output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T * scaling / ranknum
if requires_conversion:
output = output.to(expected_dtype)
result += output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'adalora.' + rep
# File: peft-main/src/peft/tuners/adalora/layer.py
import warnings
from typing import Any, List, Optional
import packaging
import torch
import transformers
from torch import nn
from peft.tuners.lora import LoraLayer
from peft.tuners.tuners_utils import check_adapters_to_merge
from peft.utils import transpose
if packaging.version.parse(transformers.__version__) >= packaging.version.parse('4.33.0'):
from transformers.integrations import deepspeed_config
else:
from transformers.deepspeed import deepspeed_config
class AdaLoraLayer(LoraLayer):
adapter_layer_names = ('lora_A', 'lora_B', 'lora_E', 'lora_embedding_A', 'lora_embedding_B')
other_param_names = ('r', 'lora_alpha', 'scaling', 'lora_dropout', 'ranknum')
def __init__(self, base_layer: nn.Module) -> None:
super().__init__(base_layer)
self.lora_E = nn.ParameterDict({})
self.lora_A = nn.ParameterDict({})
self.lora_B = nn.ParameterDict({})
self.ranknum = nn.ParameterDict({})
def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
if r < 0:
raise ValueError(f'`r` should be a positive integer or 0, but the value passed is {r}')
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
lora_dropout_layer = nn.Identity()
self.lora_dropout[adapter_name] = lora_dropout_layer
self.lora_A[adapter_name] = nn.Parameter(torch.randn(r, self.in_features))
self.lora_E[adapter_name] = nn.Parameter(torch.randn(r, 1))
self.lora_B[adapter_name] = nn.Parameter(torch.randn(self.out_features, r))
self.ranknum[adapter_name] = nn.Parameter(torch.randn(1), requires_grad=False)
self.ranknum[adapter_name].data.fill_(float(r))
self.ranknum[adapter_name].requires_grad = False
self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r)
if init_lora_weights:
self.reset_lora_parameters(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_lora_parameters(self, adapter_name):
if adapter_name in self.lora_A.keys():
nn.init.zeros_(self.lora_E[adapter_name])
nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02)
nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02)
class SVDLinear(nn.Module, AdaLoraLayer):
def __init__(self, base_layer: nn.Module, adapter_name: str, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, fan_in_fan_out: bool=False, init_lora_weights: bool=True, **kwargs) -> None:
super().__init__()
AdaLoraLayer.__init__(self, base_layer)
self.get_base_layer().weight.requires_grad = False
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
def merge(self, safe_merge: bool=False, adapter_names: Optional[List[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
base_layer = self.get_base_layer()
if active_adapter in self.lora_A.keys():
if safe_merge:
orig_weights = base_layer.weight.data.clone()
orig_weights += self.get_delta_weight(active_adapter)
if not torch.isfinite(orig_weights).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += self.get_delta_weight(active_adapter)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.lora_A.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def get_delta_weight(self, adapter) -> torch.Tensor:
return transpose(self.lora_B[adapter] @ (self.lora_A[adapter] * self.lora_E[adapter]), self.fan_in_fan_out) * self.scaling[adapter] / (self.ranknum[adapter] + 1e-05)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
lora_E = self.lora_E[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
ranknum = self.ranknum[active_adapter] + 1e-05
x = x.to(lora_A.dtype)
result += dropout(x) @ (lora_A * lora_E).T @ lora_B.T * scaling / ranknum
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'adalora.' + rep
class RankAllocator:
def __init__(self, model, peft_config, adapter_name):
self.peft_config = peft_config
self.adapter_name = adapter_name
self.beta1 = peft_config.beta1
self.beta2 = peft_config.beta2
assert self.beta1 > 0 and self.beta1 < 1
assert self.beta2 > 0 and self.beta2 < 1
self.reset_ipt()
self._set_budget_scheduler(model)
def set_total_step(self, total_step):
self.peft_config.total_step = total_step
def reset_ipt(self):
self.ipt = {}
self.exp_avg_ipt = {}
self.exp_avg_unc = {}
def _set_budget_scheduler(self, model):
self.init_bgt = 0
self.name_set = set()
for (n, p) in model.named_parameters():
if f'lora_A.{self.adapter_name}' in n:
self.init_bgt += p.size(0)
self.name_set.add(n.replace('lora_A', '%s'))
self.name_set = sorted(self.name_set)
self.target_bgt = self.peft_config.target_r * len(self.name_set)
def budget_schedule(self, step: int):
tinit = self.peft_config.tinit
tfinal = self.peft_config.tfinal
total_step = self.peft_config.total_step
if step <= tinit:
budget = self.init_bgt
mask_ind = False
elif step > total_step - tfinal:
budget = self.target_bgt
mask_ind = True
else:
mul_coeff = 1 - (step - tinit) / (total_step - tfinal - tinit)
budget = int((self.init_bgt - self.target_bgt) * mul_coeff ** 3 + self.target_bgt)
mask_ind = True if step % self.peft_config.deltaT == 0 else False
return (budget, mask_ind)
def update_ipt(self, model):
for (n, p) in model.named_parameters():
if 'lora_' in n and self.adapter_name in n:
if n not in self.ipt:
self.ipt[n] = torch.zeros_like(p)
self.exp_avg_ipt[n] = torch.zeros_like(p)
self.exp_avg_unc[n] = torch.zeros_like(p)
with torch.no_grad():
if deepspeed_config() is not None:
import deepspeed
grad = deepspeed.utils.safe_get_full_grad(p)
self.ipt[n] = (p * grad).abs().detach()
else:
self.ipt[n] = (p * p.grad).abs().detach()
self.exp_avg_ipt[n] = self.beta1 * self.exp_avg_ipt[n] + (1 - self.beta1) * self.ipt[n]
self.exp_avg_unc[n] = self.beta2 * self.exp_avg_unc[n] + (1 - self.beta2) * (self.ipt[n] - self.exp_avg_ipt[n]).abs()
def _element_score(self, n):
return self.exp_avg_ipt[n] * self.exp_avg_unc[n]
def _combine_ipt(self, ipt_E, ipt_AB):
ipt_AB = ipt_AB.sum(dim=1, keepdim=False)
sum_ipt = ipt_E.view(-1) + ipt_AB.view(-1)
return sum_ipt
def mask_to_budget(self, model, budget):
value_ipt = {}
vector_ipt = {}
triplet_ipt = {}
for (n, p) in model.named_parameters():
if f'lora_A.{self.adapter_name}' in n:
entry_ipt = self._element_score(n)
comb_ipt = torch.mean(entry_ipt, dim=1, keepdim=True)
name_m = n.replace('lora_A', '%s')
if name_m not in vector_ipt:
vector_ipt[name_m] = [comb_ipt]
else:
vector_ipt[name_m].append(comb_ipt)
if f'lora_B.{self.adapter_name}' in n:
entry_ipt = self._element_score(n)
comb_ipt = torch.mean(entry_ipt, dim=0, keepdim=False).view(-1, 1)
name_m = n.replace('lora_B', '%s')
if name_m not in vector_ipt:
vector_ipt[name_m] = [comb_ipt]
else:
vector_ipt[name_m].append(comb_ipt)
if f'lora_E.{self.adapter_name}' in n:
entry_ipt = self._element_score(n)
name_m = n.replace('lora_E', '%s')
value_ipt[name_m] = entry_ipt
all_score = []
for name_m in vector_ipt:
ipt_E = value_ipt[name_m]
ipt_AB = torch.cat(vector_ipt[name_m], dim=1)
sum_ipt = self._combine_ipt(ipt_E, ipt_AB)
name_E = name_m % 'lora_E'
triplet_ipt[name_E] = sum_ipt.view(-1, 1)
all_score.append(sum_ipt.view(-1))
mask_threshold = torch.kthvalue(torch.cat(all_score), k=self.init_bgt - budget)[0].item()
rank_pattern = {}
with torch.no_grad():
for (n, p) in model.named_parameters():
if f'lora_E.{self.adapter_name}' in n:
p.masked_fill_(triplet_ipt[n] <= mask_threshold, 0.0)
rank_pattern[n] = (~(triplet_ipt[n] <= mask_threshold)).view(-1).tolist()
return rank_pattern
def update_and_allocate(self, model, global_step, force_mask=False):
if global_step < self.peft_config.total_step - self.peft_config.tfinal:
self.update_ipt(model)
(budget, mask_ind) = self.budget_schedule(global_step)
if mask_ind or force_mask:
rank_pattern = self.mask_to_budget(model, budget)
else:
rank_pattern = None
return (budget, rank_pattern)
def mask_using_rank_pattern(self, model, rank_pattern):
is_adapter_name_truncated = False
if self.adapter_name not in next(iter(rank_pattern.keys())):
is_adapter_name_truncated = True
with torch.no_grad():
for (n, p) in model.named_parameters():
if f'lora_E.{self.adapter_name}' in n:
key = n if not is_adapter_name_truncated else n.replace(f'.{self.adapter_name}', '')
mask = torch.Tensor(rank_pattern[key]).unsqueeze(-1).to(p.device)
p.masked_fill_(~mask.bool(), 0.0)
# File: peft-main/src/peft/tuners/adalora/model.py
import warnings
import torch
from transformers.pytorch_utils import Conv1D
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.lora import LoraConfig, LoraModel
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, _freeze_adapter, _get_submodules, get_auto_gptq_quant_linear, get_quantization_config
from peft.utils.integrations import gather_params_ctx
from .gptq import SVDQuantLinear
from .layer import AdaLoraLayer, RankAllocator, SVDLinear
class AdaLoraModel(LoraModel):
def __init__(self, model, config, adapter_name):
super().__init__(model, config, adapter_name)
traininable_mode_counter = 0
for config in self.peft_config.values():
if not config.inference_mode:
traininable_mode_counter += 1
if traininable_mode_counter > 1:
raise ValueError('AdaLoraModel supports only 1 trainable adapter. When using multiple adapters, set inference_mode to True for all adapters except the one you want to train.')
if self.peft_config[adapter_name].inference_mode:
_freeze_adapter(self.model, adapter_name)
else:
self.trainable_adapter_name = adapter_name
self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)
def _check_new_adapter_config(self, config: LoraConfig) -> None:
super()._check_new_adapter_config(config)
traininable_mode_counter = 0
for config_ in self.peft_config.values():
if not config_.inference_mode:
traininable_mode_counter += 1
if traininable_mode_counter > 1:
raise ValueError(f'{self.__class__.__name__} supports only 1 trainable adapter. When using multiple adapters, set inference_mode to True for all adapters except the one you want to train.')
def _create_and_replace(self, lora_config, adapter_name, target, target_name, parent, current_key):
kwargs = {'r': lora_config.init_r, 'lora_alpha': lora_config.lora_alpha, 'lora_dropout': lora_config.lora_dropout, 'fan_in_fan_out': lora_config.fan_in_fan_out, 'init_lora_weights': lora_config.init_lora_weights, 'loaded_in_8bit': getattr(self.model, 'is_loaded_in_8bit', False), 'loaded_in_4bit': getattr(self.model, 'is_loaded_in_4bit', False)}
if (kwargs['loaded_in_8bit'] or kwargs['loaded_in_4bit']) and (not is_bnb_available()):
raise ImportError('To use AdaLora with 8-bit quantization, please install the `bitsandbytes` package. You can install it with `pip install bitsandbytes`.')
quantization_config = get_quantization_config(self.model, method='gptq')
if quantization_config is not None:
kwargs['gptq_quantization_config'] = quantization_config
if not isinstance(target, AdaLoraLayer):
new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs)
if adapter_name not in self.active_adapters:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
else:
target.update_layer(adapter_name, lora_config.init_r, lora_config.lora_alpha, lora_config.lora_dropout, lora_config.init_lora_weights)
@staticmethod
def _create_new_module(lora_config, adapter_name, target, **kwargs):
if is_bnb_available():
import bitsandbytes as bnb
from .bnb import SVDLinear8bitLt
if is_bnb_4bit_available():
from .bnb import SVDLinear4bit
gptq_quantization_config = kwargs.get('gptq_quantization_config', None)
AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
loaded_in_8bit = kwargs.pop('loaded_in_8bit', False)
loaded_in_4bit = kwargs.pop('loaded_in_4bit', False)
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
kwargs.update({'has_fp16_weights': target_base_layer.state.has_fp16_weights, 'memory_efficient_backward': target_base_layer.state.memory_efficient_backward, 'threshold': target_base_layer.state.threshold, 'index': target_base_layer.index})
new_module = SVDLinear8bitLt(target, adapter_name, **kwargs)
elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit):
fourbit_kwargs = kwargs.copy()
fourbit_kwargs.update({'compute_dtype': target_base_layer.compute_dtype, 'compress_statistics': target_base_layer.weight.compress_statistics, 'quant_type': target_base_layer.weight.quant_type})
new_module = SVDLinear4bit(target, adapter_name, **fourbit_kwargs)
elif AutoGPTQQuantLinear is not None and isinstance(target, AutoGPTQQuantLinear):
new_module = SVDQuantLinear(target, adapter_name, **kwargs)
else:
if isinstance(target_base_layer, torch.nn.Linear):
if kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. Setting fan_in_fan_out to False.')
kwargs['fan_in_fan_out'] = lora_config.fan_in_fan_out = False
elif isinstance(target_base_layer, Conv1D):
if not kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True.')
kwargs['fan_in_fan_out'] = lora_config.fan_in_fan_out = True
else:
raise ValueError(f'Target module {target} is not supported. Currently, only `torch.nn.Linear` and `Conv1D` are supported.')
new_module = SVDLinear(target, adapter_name, **kwargs)
return new_module
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config['model_type'] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:
raise ValueError('Please specify `target_modules` in `peft_config`')
peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[model_config['model_type']]
return peft_config
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'model':
raise
return getattr(self.model, name)
def forward(self, *args, **kwargs):
outputs = self.model.forward(*args, **kwargs)
if getattr(outputs, 'loss', None) is not None and isinstance(outputs.loss, torch.Tensor):
orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight
if orth_reg_weight <= 0:
raise ValueError('orth_reg_weight should be greater than 0. ')
regu_loss = 0
num_param = 0
for (n, p) in self.model.named_parameters():
if ('lora_A' in n or 'lora_B' in n) and self.trainable_adapter_name in n:
if p.shape == torch.Size([0]):
with gather_params_ctx(p, fwd_module=self):
para_cov = p @ p.T if 'lora_A' in n else p.T @ p
else:
para_cov = p @ p.T if 'lora_A' in n else p.T @ p
I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov))
I.requires_grad = False
num_param += 1
regu_loss += torch.norm(para_cov - I, p='fro')
if num_param > 0:
regu_loss = regu_loss / num_param
else:
regu_loss = 0
outputs.loss += orth_reg_weight * regu_loss
return outputs
def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):
lora_config = self.peft_config[adapter_name]
for (name, rank_idx) in rank_pattern.items():
if isinstance(rank_idx, list):
rank = sum(rank_idx)
elif isinstance(rank_idx, torch.Tensor):
rank_idx = rank_idx.view(-1)
rank = rank_idx.sum().item()
else:
raise ValueError('Unexpected type of rank_idx')
key = '.'.join(name.split('.')[0:-2]) if adapter_name in name else '.'.join(name.split('.')[0:-1])
(_, target, _) = _get_submodules(self.model, key)
lora_E_weights = target.lora_E[adapter_name][rank_idx]
lora_A_weights = target.lora_A[adapter_name][rank_idx]
lora_B_weights = target.lora_B[adapter_name][:, rank_idx]
ranknum = target.ranknum[adapter_name]
target.update_layer(adapter_name, rank, lora_config.lora_alpha, lora_config.lora_dropout, lora_config.init_lora_weights)
with torch.no_grad():
if rank > 0:
target.lora_E[adapter_name].copy_(lora_E_weights)
target.lora_A[adapter_name].copy_(lora_A_weights)
target.lora_B[adapter_name].copy_(lora_B_weights)
target.ranknum[adapter_name].copy_(ranknum)
def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):
for (name, rank_idx) in rank_pattern.items():
rank = sum(rank_idx)
prefix = '.'.join(name.split('.')[0:-2]) if adapter_name in name else '.'.join(name.split('.')[0:-1])
for layer in ['lora_E', 'lora_A', 'lora_B']:
key = f'base_model.model.{prefix}.{layer}.{adapter_name}'
if layer != 'lora_B':
state_dict[key] = state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]
else:
state_dict[key] = state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]
return state_dict
def update_and_allocate(self, global_step):
lora_config = self.peft_config[self.trainable_adapter_name]
if global_step < lora_config.total_step - lora_config.tfinal:
(_, rank_pattern) = self.rankallocator.update_and_allocate(self.model, global_step)
if rank_pattern:
lora_config.rank_pattern = rank_pattern
elif global_step == lora_config.total_step - lora_config.tfinal:
(_, rank_pattern) = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)
lora_config.rank_pattern = rank_pattern
self.rankallocator.reset_ipt()
elif global_step > lora_config.total_step - lora_config.tfinal:
self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)
else:
return None
def add_weighted_adapter(self, *args, **kwargs):
raise TypeError(f'{self.__class__.__name__} does not support add_weighted_adapter method.')
# File: peft-main/src/peft/tuners/adaption_prompt/config.py
from collections import namedtuple
from dataclasses import dataclass, field
from peft.config import PeftConfig
from peft.utils import PeftType
from .utils import llama_compute_query_states
@dataclass
class AdaptionPromptConfig(PeftConfig):
target_modules: str = field(default=None, metadata={'help': 'Name of the attention submodules to insert adaption prompts into.'})
adapter_len: int = field(default=None, metadata={'help': 'Number of adapter tokens to insert'})
adapter_layers: int = field(default=None, metadata={'help': 'Number of adapter layers (from the top)'})
def __post_init__(self):
self.peft_type = PeftType.ADAPTION_PROMPT
@property
def is_adaption_prompt(self) -> bool:
return True
ModelTypeConfig = namedtuple('ModelTypeConfig', ['compute_query_states', 'target_modules', 'k_proj_layer', 'v_proj_layer', 'o_proj_layer'])
TRANSFORMERS_MODEL_CONFIG = {'llama': ModelTypeConfig(compute_query_states=llama_compute_query_states, target_modules='self_attn', k_proj_layer='k_proj', v_proj_layer='v_proj', o_proj_layer='o_proj'), 'mistral': ModelTypeConfig(compute_query_states=llama_compute_query_states, target_modules='self_attn', k_proj_layer='k_proj', v_proj_layer='v_proj', o_proj_layer='o_proj')}
def prepare_config(peft_config: AdaptionPromptConfig, model) -> AdaptionPromptConfig:
if model.config.model_type not in TRANSFORMERS_MODEL_CONFIG:
raise ValueError("Unsupported model type for adaption prompt: '{model.config.model_type}'.")
model_config = TRANSFORMERS_MODEL_CONFIG[model.config.model_type]
if peft_config.target_modules is None:
peft_config.target_modules = model_config.target_modules
return peft_config
# File: peft-main/src/peft/tuners/adaption_prompt/layer.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .config import TRANSFORMERS_MODEL_CONFIG
class AdaptedAttention(nn.Module):
def __init__(self, model_type: str, adapter_len: int, model):
assert not isinstance(model, AdaptedAttention)
super().__init__()
self.model_type = model_type
self.model = model
self.adapter_len = adapter_len
device = next(model.parameters()).device
target_dtype = model.q_proj.weight.dtype if model.q_proj.weight.dtype not in [torch.int8, torch.uint8] else torch.float32
self.adaption_prompt = nn.Parameter(torch.empty(1, adapter_len, self.model.hidden_size, device=device, dtype=target_dtype).normal_())
self.adaption_gate = nn.Parameter(torch.zeros(1, device=device, dtype=target_dtype))
def forward(self, **kwargs):
if kwargs.get('output_attention', False):
raise NotImplementedError('output_attention is not currently supported.')
(output, _, past_key_value) = self.model(**kwargs)
bsz = output.shape[0]
q_len = output.shape[1]
embed_dim = output.shape[2]
k_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].k_proj_layer
v_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].v_proj_layer
o_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].o_proj_layer
factor = self.model.k_proj.in_features // self.model.k_proj.out_features
if k_proj_layer == v_proj_layer:
(_, key, value) = getattr(self.model, k_proj_layer)(self.adaption_prompt).split(embed_dim, dim=2)
else:
key = getattr(self.model, k_proj_layer)(self.adaption_prompt)
value = getattr(self.model, v_proj_layer)(self.adaption_prompt)
adapter_k = key.view(1, self.adapter_len, self.model.num_heads // factor, self.model.head_dim).repeat(bsz, 1, 1, 1).transpose(1, 2)
adapter_v = value.view(1, self.adapter_len, self.model.num_heads // factor, self.model.head_dim).repeat(bsz, 1, 1, 1).transpose(1, 2)
adapter_k = torch.repeat_interleave(adapter_k, repeats=factor, dim=1)
adapter_v = torch.repeat_interleave(adapter_v, repeats=factor, dim=1)
compute_query_states = TRANSFORMERS_MODEL_CONFIG[self.model_type].compute_query_states
query_states = compute_query_states(model=self.model, **kwargs)
previous_dtype = query_states.dtype
scores = torch.matmul(query_states, adapter_k.transpose(2, 3).to(previous_dtype)) / math.sqrt(self.model.head_dim)
scores = self.adaption_gate * F.softmax(scores, dim=-1, dtype=torch.float32).to(previous_dtype)
adapter_output = torch.matmul(scores, adapter_v).transpose(1, 2).reshape(bsz, q_len, -1)
if o_proj_layer is not None:
adapter_output = getattr(self.model, o_proj_layer)(adapter_output)
output = output + adapter_output
output = output.to(previous_dtype)
return (output, None, past_key_value)
# File: peft-main/src/peft/tuners/adaption_prompt/model.py
from typing import Dict, List
import torch.nn as nn
from peft.utils import _freeze_adapter, _get_submodules
from .config import AdaptionPromptConfig, prepare_config
from .layer import AdaptedAttention
from .utils import is_adaption_prompt_trainable
class AdaptionPromptModel(nn.Module):
def __init__(self, model, configs: Dict, adapter_name: str):
super().__init__()
self.model = model
self.peft_config: Dict[str, AdaptionPromptConfig] = {}
self._parents: Dict[str, List[nn.Module]] = {}
self._cached_adapters: Dict[str, List] = {}
self._active_adapter = None
self._enabled = True
self.forward = self.model.forward
self.add_adapter(adapter_name, configs[adapter_name])
self._mark_only_adaption_prompts_as_trainable(self.model)
def add_adapter(self, adapter_name: str, config: AdaptionPromptConfig) -> None:
config = prepare_config(config, self.model)
if adapter_name in self.peft_config:
raise ValueError(f"Adapter with name '{adapter_name}' already exists.")
parents = []
for (name, _) in self.model.named_modules():
if name.endswith(config.target_modules):
(par, _, _) = _get_submodules(self.model, name)
parents.append(par)
if len(parents) < config.adapter_layers:
raise ValueError(f"Config specifies more adapter layers '{config.adapter_layers}' than the model has '{len(parents)}'.")
parents = parents[-config.adapter_layers:]
self._parents[adapter_name] = parents
if self._active_adapter is not None and self._enabled:
self._remove_adapted_attentions(self._active_adapter)
self._active_adapter = adapter_name
self.peft_config[adapter_name] = config
self._create_adapted_attentions(config, parents)
if not self._enabled:
self._remove_adapted_attentions(self._active_adapter)
if config.inference_mode:
_freeze_adapter(self.model, adapter_name)
def set_adapter(self, adapter_name: str) -> None:
if self._active_adapter == adapter_name:
return
if adapter_name not in self.peft_config:
raise ValueError(f"Adapter with name '{adapter_name}' does not exist.")
if self._enabled:
self._remove_adapted_attentions(self._active_adapter)
self._set_adapted_attentions(adapter_name)
self._active_adapter = adapter_name
def enable_adapter_layers(self):
self._enabled = True
self._set_adapted_attentions(self._active_adapter)
def disable_adapter_layers(self):
self._enabled = False
self._remove_adapted_attentions(self._active_adapter)
def _create_adapted_attentions(self, config: AdaptionPromptConfig, parents: List[nn.Module]) -> None:
for par in parents:
attn = AdaptedAttention(model_type=self.model.config.model_type, adapter_len=config.adapter_len, model=getattr(par, config.target_modules))
setattr(par, config.target_modules, attn)
def _set_adapted_attentions(self, adapter_name: str) -> None:
cached = self._cached_adapters[adapter_name]
del self._cached_adapters[adapter_name]
config = self.peft_config[adapter_name]
for (i, par) in enumerate(self._parents[adapter_name]):
setattr(par, config.target_modules, cached[i])
def _remove_adapted_attentions(self, adapter_name: str) -> None:
config = self.peft_config[adapter_name]
adapted_attentions = []
for par in self._parents[adapter_name]:
attn = getattr(par, config.target_modules)
adapted_attentions.append(attn)
setattr(par, config.target_modules, attn.model)
self._cached_adapters[adapter_name] = adapted_attentions
def _mark_only_adaption_prompts_as_trainable(self, model: nn.Module) -> None:
for (n, p) in model.named_parameters():
if not is_adaption_prompt_trainable(n):
p.requires_grad = False
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'model':
raise
return getattr(self.model, name)
# File: peft-main/src/peft/tuners/adaption_prompt/utils.py
import inspect
import torch
import torch.nn as nn
def llama_rotate_half(x: torch.Tensor) -> torch.Tensor:
x1 = x[..., :x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=-1)
def llama_apply_rotary_pos_emb(q, cos, sin, position_ids):
if len(cos.shape) == 4:
gather_indices = position_ids[:, None, :, None]
gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
else:
cos = cos[position_ids].unsqueeze(1)
sin = sin[position_ids].unsqueeze(1)
q_embed = q * cos + llama_rotate_half(q) * sin
return q_embed
def llama_compute_query_states(model: nn.Module, **kwargs) -> torch.Tensor:
hidden_states = kwargs.get('hidden_states')
position_ids = kwargs.get('position_ids')
past_key_value = kwargs.get('past_key_value')
(bsz, q_len, _) = hidden_states.size()
query_states = model.q_proj(hidden_states).view(bsz, q_len, model.num_heads, model.head_dim).transpose(1, 2)
factor = model.k_proj.in_features // model.k_proj.out_features
value_states = model.v_proj(hidden_states).view(bsz, q_len, model.num_heads // factor, model.head_dim).transpose(1, 2)
seq_len = q_len
if past_key_value is not None:
if isinstance(past_key_value, tuple):
seq_len += past_key_value[0].shape[-2]
else:
seq_len += past_key_value.get_seq_length(model.layer_idx)
if 'position_ids' not in inspect.signature(model.rotary_emb.forward).parameters:
(cos, sin) = model.rotary_emb(value_states, seq_len=seq_len)
return llama_apply_rotary_pos_emb(query_states, cos, sin, position_ids)
past_seen_tokens = 0
if position_ids is None:
if past_key_value is None:
new_cache_positions = torch.arange(q_len, q_len + q_len, device=value_states.device)
else:
past_seen_tokens = past_key_value.get_usable_length(q_len, model.layer_idx)
new_cache_positions = torch.arange(past_seen_tokens, past_seen_tokens + q_len, device=value_states.device)
position_ids = new_cache_positions.unsqueeze(0)
rotary_emb_kwargs = {'position_ids': position_ids}
if 'seq_len' in inspect.signature(model.rotary_emb.forward).parameters:
rotary_emb_kwargs['seq_len'] = q_len + past_seen_tokens
(cos, sin) = model.rotary_emb(value_states, **rotary_emb_kwargs)
if len(cos.shape) == 3:
cos = cos.unsqueeze(1)
sin = sin.unsqueeze(1)
return query_states * cos + llama_rotate_half(query_states) * sin
def is_adaption_prompt_trainable(params: str) -> bool:
return params.split('.')[-1].startswith('adaption_')
# File: peft-main/src/peft/tuners/boft/config.py
from dataclasses import dataclass, field
from typing import List, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class BOFTConfig(PeftConfig):
boft_block_size: int = field(default=4, metadata={'help': 'BOFT block size across different layers.', 'note': 'You can only specify either boft_block_size or boft_block_num, but not both simultaneously, because boft_block_size x boft_block_num = layer dimension.'})
boft_block_num: int = field(default=0, metadata={'help': 'Number of BOFT blocks per injected layer.', 'note': 'You can only specify either boft_block_size or boft_block_num, but not both simultaneously, because boft_block_size x boft_block_num = layer dimension.'})
boft_n_butterfly_factor: int = field(default=1, metadata={'help': 'Number of butterfly factors.', 'note': ('for example, boft_n_butterfly_factor=2, the effective block size of OFT becomes twice as big and the number of blocks become half.', 'note: for boft_n_butterfly_factor=1, BOFT is the same as vanilla OFT.')})
target_modules: Optional[Union[List[str], str]] = field(default=None, metadata={'help': 'List of module names or regex expression of the module names to replace with BOFT.', 'example': "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "})
boft_dropout: float = field(default=0.0, metadata={'help': 'BOFT multiplicative dropout'})
fan_in_fan_out: bool = field(default=False, metadata={'help': 'Set this to True if the layer to replace stores weight like (fan_in, fan_out)'})
bias: str = field(default='none', metadata={'help': "Bias type for BOFT. Can be 'none', 'all' or 'boft_only'"})
modules_to_save: Optional[List[str]] = field(default=None, metadata={'help': 'List of modules apart from BOFT layers to be set as trainable and saved in the final checkpoint. ', 'note': ('For example, in Sequence Classification or Token Classification tasks, ', 'the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.')})
init_weights: bool = field(default=True, metadata={'help': ("Whether to initialize the weights of the BOFT layers with their default initialization. Don't change ", "this setting, except if you know exactly what you're doing.")})
layers_to_transform: Optional[Union[List[int], int]] = field(default=None, metadata={'help': 'The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index.'})
layers_pattern: Optional[str] = field(default=None, metadata={'help': 'The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern.'})
def __post_init__(self):
self.peft_type = PeftType.BOFT
self.target_modules = set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
if self.boft_block_size == 0 and self.boft_block_num == 0:
raise ValueError('You must specify either boft_block_size or boft_block_num.')
if not (self.boft_block_size != 0) ^ (self.boft_block_num != 0):
raise ValueError(f'You can only specify either boft_block_size ({self.boft_block_size}) or boft_block_num ({self.boft_block_num}), but not both simultaneously, because boft_block_size x boft_block_num != in_features.')
# File: peft-main/src/peft/tuners/boft/layer.py
from __future__ import annotations
import math
import os
import warnings
from contextlib import contextmanager
from typing import Any, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
_FBD_CUDA = None
@contextmanager
def patch_environment(**kwargs):
existing_vars = {}
for (key, value) in kwargs.items():
key = key.upper()
if key in os.environ:
existing_vars[key] = os.environ[key]
os.environ[key] = str(value)
yield
for key in kwargs:
key = key.upper()
if key in existing_vars:
os.environ[key] = existing_vars[key]
else:
os.environ.pop(key, None)
def get_fbd_cuda():
global _FBD_CUDA
if _FBD_CUDA is not None:
return _FBD_CUDA
from torch.utils.cpp_extension import load
curr_dir = os.path.dirname(__file__)
try:
with patch_environment(CC='gcc', CXX='gcc'):
fbd_cuda = load(name='fbd_cuda', sources=[f'{curr_dir}/fbd/fbd_cuda.cpp', f'{curr_dir}/fbd/fbd_cuda_kernel.cu'], verbose=True)
except Exception as e:
warnings.warn(f'Failed to load the CUDA extension: {e}, check if ninja is available.')
warnings.warn('Setting boft_n_butterfly_factor to 1 to speed up the finetuning process.')
fbd_cuda = None
_FBD_CUDA = fbd_cuda
return _FBD_CUDA
class FastBlockDiag(Function):
@staticmethod
def forward(ctx, input):
output = get_fbd_cuda().forward(input)[0]
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
grad_input = get_fbd_cuda().backward(grad_output, input)[0]
return grad_input
class MultiplicativeDropoutLayer(nn.Module):
def __init__(self, p=0.0):
super().__init__()
self.p = p
def forward(self, x):
if self.training:
if x.shape[-1] != x.shape[-2]:
raise ValueError('The last two dimensions of input should be the same!')
(N, D, H, _) = x.shape
n_random = torch.randint(0, N, (1,)).item()
num_to_replace = int(self.p * D)
num_zeros = D - num_to_replace
mask = torch.cat([torch.ones(num_to_replace, device=x.device), torch.zeros(num_zeros, device=x.device)])
mask = mask[torch.randperm(D)].view(1, D, 1, 1)
full_mask = torch.zeros(N, D, 1, 1, device=x.device)
full_mask[n_random] = mask
eye_matrix = torch.eye(H, device=x.device).repeat(N, D, 1, 1)
x = (1 - full_mask) * x + full_mask * eye_matrix
return x
class BOFTLayer(BaseTunerLayer):
adapter_layer_names = ('boft_R', 'boft_s')
other_param_names = ('boft_block_size', 'boft_block_num', 'boft_dropout')
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.boft_block_size = {}
self.boft_block_num = {}
self.boft_dropout = nn.ModuleDict({})
self.boft_R = nn.ParameterDict({})
self.boft_s = nn.ParameterDict({})
self._disable_adapters = False
self.merged_adapters = []
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
(in_features, out_features) = (base_layer.in_features, base_layer.out_features)
elif isinstance(base_layer, nn.Conv2d):
(in_features, out_features) = (base_layer.in_channels, base_layer.out_channels)
else:
raise ValueError(f'Unsupported layer type {type(base_layer)}')
self.in_features = in_features
self.out_features = out_features
def set_scale(self, adapter, scale):
if adapter not in self.scaling:
return
warnings.warn('Scaling operation for BOFT not supported! Automatically set scale to 1.')
def scale_layer(self, scale: float) -> None:
if scale == 1:
return
for active_adapter in self.active_adapters:
if active_adapter not in self.boft_R.keys():
continue
warnings.warn('Scaling operation for BOFT not supported! Automatically set scale to 1.')
def unscale_layer(self, scale=None) -> None:
for active_adapter in self.active_adapters:
if active_adapter not in self.boft_R.keys():
continue
warnings.warn('Unscaling operation for BOFT not supported! Keeping scale to 1.')
def update_layer(self, adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights):
boft_n_butterfly_factor = boft_n_butterfly_factor - 1
if boft_n_butterfly_factor < 0:
raise ValueError(f'You can only specify boft_n_butterfly_factor {boft_n_butterfly_factor + 1} to be a positive integer number.')
if boft_dropout > 0.0:
boft_dropout_layer = MultiplicativeDropoutLayer(p=boft_dropout)
else:
boft_dropout_layer = nn.Identity()
self.boft_dropout.update(nn.ModuleDict({adapter_name: boft_dropout_layer}))
if boft_block_size == 0 and boft_block_num != 0:
if self.in_features % boft_block_num != 0:
raise ValueError(f'in_features ({self.in_features}) must be divisible by boft_block_num ({boft_block_num})!')
if boft_n_butterfly_factor != 0:
if boft_n_butterfly_factor > int(math.log2(boft_block_num)):
raise ValueError(f'Invalid combination of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_num ({boft_block_num})!')
if boft_block_num % 2 ** boft_n_butterfly_factor != 0:
raise ValueError(f'boft_block_num ({boft_block_num}) must be a multiple of 2 raised to the power of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1})!')
boft_block_size = int(self.in_features // boft_block_num)
elif boft_block_size != 0 and boft_block_num == 0:
if self.in_features % boft_block_size != 0:
raise ValueError(f'in_features ({self.in_features}) must be divisible by boft_block_size ({boft_block_size})!')
if boft_n_butterfly_factor != 0:
if self.in_features < boft_block_size * 2 ** boft_n_butterfly_factor:
raise ValueError(f'Invalid combination of in_features ({self.in_features}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!')
if self.in_features % (boft_block_size * 2 ** boft_n_butterfly_factor) != 0:
raise ValueError(f'Invalid combination of in_features ({self.in_features}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!')
boft_block_num = int(self.in_features // boft_block_size)
else:
raise ValueError(f'You can only specify either boft_block_size ({boft_block_size}) or boft_block_num ({boft_block_num}), but not both simultaneously or setting bothto be 0, because boft_block_size x boft_block_num != in_features.')
if boft_n_butterfly_factor != 0:
if boft_block_num % 2 != 0:
raise ValueError(f'boft_block_num ({boft_block_num}) must be an even number!')
if boft_block_size % 2 != 0:
raise ValueError(f'boft_block_size ({boft_block_size}) must be an even number!')
P = torch.empty((boft_n_butterfly_factor + 1, self.in_features, self.in_features))
for i in range(boft_n_butterfly_factor + 1):
perm = self.block_butterfly_perm(self.in_features, int(boft_block_num / 2 ** i), int(boft_block_size / 2), boft_n_butterfly_factor)
perm_mat = self.perm2mat(perm)
P[i] = perm_mat
self.register_buffer('boft_P', P, persistent=False)
self.boft_R[adapter_name] = nn.Parameter(torch.zeros(boft_n_butterfly_factor + 1, boft_block_num, boft_block_size, boft_block_size))
self.boft_s[adapter_name] = nn.Parameter(torch.ones(int(self.out_features), 1))
self.reset_boft_parameters(adapter_name, init_weights)
self.boft_block_size[adapter_name] = boft_block_size
self.boft_block_num[adapter_name] = boft_block_num
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_boft_parameters(self, adapter_name, init_weights):
if init_weights is False:
nn.init.normal_(self.boft_R[adapter_name], mean=0.0, std=0.1)
nn.init.normal_(self.boft_s[adapter_name], mean=1.0, std=0.1)
return
if adapter_name in self.boft_R.keys():
if init_weights is True:
nn.init.zeros_(self.boft_R[adapter_name])
nn.init.ones_(self.boft_s[adapter_name])
else:
raise ValueError(f'Unknown initialization init_weights={init_weights!r}')
def perm2mat(self, indices):
n = len(indices)
perm_mat = torch.zeros((n, n))
for (i, idx) in enumerate(indices):
perm_mat[i, idx] = 1
return perm_mat
def block_butterfly_perm(self, n, b, r=3, n_butterfly_factor=1):
if n_butterfly_factor == 0:
return torch.arange(n)
if b * r * 2 > n:
raise ValueError('Invalid number of blocks!')
block_size = int(n // b)
indices = torch.arange(n)
def sort_block(b, r):
step = b / r
initial_order = torch.arange(b)
sorted_order = torch.empty(b, dtype=torch.long)
evens = torch.arange(0, step, 2)
odds = torch.arange(1, step, 2)
sorted_seq = torch.cat((evens, odds), dim=0)
for (i, pos) in enumerate(sorted_seq):
sorted_order[int(i * r):int(i * r + r)] = initial_order[int(pos * r):int(pos * r + r)]
return sorted_order
sorted_order = sort_block(block_size, r)
for i in range(0, n, block_size):
block_end = i + block_size
tmp_indices = indices[i:block_end]
indices[i:block_end] = tmp_indices[sorted_order]
return indices
def cayley_batch(self, data):
(b, r, c) = data.shape
skew_mat = 0.5 * (data - data.transpose(1, 2))
id_mat = torch.eye(r, device=data.device).unsqueeze(0).expand(b, r, c)
Q = torch.linalg.solve(id_mat + skew_mat, id_mat - skew_mat, left=False)
return Q
class Linear(nn.Module, BOFTLayer):
def __init__(self, base_layer, adapter_name: str, boft_block_size: int=8, boft_block_num: int=0, boft_n_butterfly_factor: int=0, boft_dropout: float=0.1, fan_in_fan_out: bool=False, init_weights: Union[bool, str]=True, is_target_conv_1d_layer: bool=False, **kwargs) -> None:
super().__init__()
BOFTLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
if not get_fbd_cuda():
self.fbd_cuda_available = False
boft_n_butterfly_factor = 1
else:
self.fbd_cuda_available = True
self.update_layer(adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights)
self.is_target_conv_1d_layer = is_target_conv_1d_layer
def merge(self, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.boft_R.keys():
base_layer = self.get_base_layer()
if safe_merge:
orig_weight = base_layer.weight.data.clone()
(butterfly_oft_mat, boft_s) = self.get_delta_weight(active_adapter)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = torch.mm(butterfly_oft_mat, orig_weight)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = orig_weight * boft_s
if not torch.isfinite(orig_weight).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
self.base_layer.weight.data = orig_weight.contiguous()
else:
(butterfly_oft_mat, boft_s) = self.get_delta_weight(active_adapter)
orig_weight = base_layer.weight.data.clone()
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = torch.mm(butterfly_oft_mat, orig_weight)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = orig_weight * boft_s
self.base_layer.weight.data = orig_weight.contiguous()
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.boft_R.keys():
(butterfly_oft_mat, boft_s) = self.get_delta_weight(active_adapter)
orig_weight = self.get_base_layer().weight.data.clone()
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = torch.mm(butterfly_oft_mat.t(), orig_weight)
orig_weight = torch.transpose(orig_weight, 0, 1)
self.get_base_layer().weight.data = orig_weight * (1 / boft_s)
def get_delta_weight(self, adapter) -> tuple[torch.Tensor, torch.Tensor]:
boft_R = self.boft_R[adapter]
boft_s = self.boft_s[adapter]
(N, D, H, _) = boft_R.shape
boft_R = boft_R.view(N * D, H, H)
orth_rotate_butterfly = self.cayley_batch(boft_R)
orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H)
if self.fbd_cuda_available:
block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly)
else:
orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0)
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
boft_P = self.boft_P.to(block_diagonal_butterfly.device)
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
butterfly_oft_mat = butterfly_oft_mat_batch[0]
for i in range(1, butterfly_oft_mat_batch.shape[0]):
butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat
return (butterfly_oft_mat, boft_s)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
boft_rotation = torch.eye(self.in_features, device=x.device, dtype=previous_dtype)
boft_scale = torch.ones((int(self.out_features), 1), device=x.device, dtype=previous_dtype)
for active_adapter in self.active_adapters:
if active_adapter not in self.boft_R.keys():
continue
boft_R = self.boft_R[active_adapter]
boft_s = self.boft_s[active_adapter]
dropout = self.boft_dropout[active_adapter]
(N, D, H, _) = boft_R.shape
boft_R = boft_R.view(N * D, H, H)
orth_rotate_butterfly = self.cayley_batch(boft_R)
orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H)
orth_rotate_butterfly = dropout(orth_rotate_butterfly)
if self.fbd_cuda_available:
block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly)
else:
orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0)
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
boft_P = self.boft_P.to(x)
block_diagonal_butterfly = block_diagonal_butterfly.to(x)
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
butterfly_oft_mat = butterfly_oft_mat_batch[0]
for i in range(1, butterfly_oft_mat_batch.shape[0]):
butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat
boft_rotation = butterfly_oft_mat @ boft_rotation
boft_scale = boft_s * boft_scale
x = x.to(self.get_base_layer().weight.data.dtype)
orig_weight = self.get_base_layer().weight.data
orig_weight = torch.transpose(orig_weight, 0, 1)
boft_rotation = boft_rotation.to(previous_dtype)
orig_weight = orig_weight.to(previous_dtype)
rotated_weight = torch.mm(boft_rotation, orig_weight)
rotated_weight = torch.transpose(rotated_weight, 0, 1)
scaled_rotated_weight = rotated_weight * boft_scale
scaled_rotated_weight = scaled_rotated_weight.to(previous_dtype)
if self.base_layer.bias is not None:
self.base_layer.bias = self.base_layer.bias.to(previous_dtype)
result = F.linear(input=x, weight=scaled_rotated_weight, bias=self.base_layer.bias)
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'boft.' + rep
class Conv2d(nn.Module, BOFTLayer):
def __init__(self, base_layer: nn.Module, adapter_name: str, boft_block_size: int=8, boft_block_num: int=0, boft_n_butterfly_factor: int=0, boft_dropout: float=0.1, init_weights: Union[bool, str]=True, **kwargs) -> None:
super().__init__()
BOFTLayer.__init__(self, base_layer)
self._active_adapter = adapter_name
if not get_fbd_cuda():
self.fbd_cuda_available = False
boft_n_butterfly_factor = 1
else:
self.fbd_cuda_available = True
self.update_layer(adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights)
def update_layer(self, adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights):
boft_n_butterfly_factor = boft_n_butterfly_factor - 1
if boft_n_butterfly_factor < 0:
raise ValueError(f'You can only specify boft_n_butterfly_factor {boft_n_butterfly_factor + 1} to be a positive integer number.')
if boft_dropout > 0.0:
boft_dropout_layer = MultiplicativeDropoutLayer(p=boft_dropout)
else:
boft_dropout_layer = nn.Identity()
self.boft_dropout.update(nn.ModuleDict({adapter_name: boft_dropout_layer}))
base_layer = self.get_base_layer()
conv_filter_dim = self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0]
if not (boft_block_size != 0) ^ (boft_block_num != 0):
raise ValueError(f'You can only specify either boft_block_size ({boft_block_size}) or boft_block_num ({boft_block_num}), but not both simultaneously, because boft_block_size x boft_block_num != in_features.')
if boft_block_size == 0 and boft_block_num != 0:
if conv_filter_dim % boft_block_num != 0:
raise ValueError(f'Convolutional kernel dimension ({conv_filter_dim}) must be divisible by boft_block_num ({boft_block_num})!')
if boft_n_butterfly_factor != 0:
if boft_n_butterfly_factor > int(math.log2(boft_block_num)):
raise ValueError(f'Invalid combination of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_num ({boft_block_num})!')
if boft_block_num % 2 ** boft_n_butterfly_factor != 0:
raise ValueError(f'boft_block_num ({boft_block_num}) must be a multiple of 2 raised to the power of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1})!')
boft_block_size = int(conv_filter_dim // boft_block_num)
elif boft_block_size != 0 and boft_block_num == 0:
if conv_filter_dim % boft_block_size != 0:
raise ValueError(f'Convolutional kernel dimension ({conv_filter_dim}) must be divisible by boft_block_size ({boft_block_size})!')
if boft_n_butterfly_factor != 0:
if conv_filter_dim < boft_block_size * 2 ** boft_n_butterfly_factor:
raise ValueError(f'Invalid combination of convolutional kernel dimension ({conv_filter_dim}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!')
if conv_filter_dim % (boft_block_size * 2 ** boft_n_butterfly_factor) != 0:
raise ValueError(f'Invalid combination of convolutional kernel dimension ({conv_filter_dim}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!')
boft_block_num = int(conv_filter_dim // boft_block_size)
else:
raise ValueError('Unknown error!')
if boft_n_butterfly_factor != 0:
if boft_block_num % 2 != 0:
raise ValueError(f'boft_block_num ({boft_block_num}) must be an even number!')
if boft_block_size % 2 != 0:
raise ValueError(f'boft_block_size ({boft_block_size}) must be an even number!')
P = torch.empty((boft_n_butterfly_factor + 1, conv_filter_dim, conv_filter_dim))
for i in range(boft_n_butterfly_factor + 1):
perm = self.block_butterfly_perm(conv_filter_dim, int(boft_block_num / 2 ** i), int(boft_block_size / 2), boft_n_butterfly_factor)
perm_mat = self.perm2mat(perm)
P[i] = perm_mat
self.register_buffer('boft_P', P, persistent=False)
self.boft_R[adapter_name] = nn.Parameter(torch.zeros(boft_n_butterfly_factor + 1, boft_block_num, boft_block_size, boft_block_size))
self.boft_s[adapter_name] = nn.Parameter(torch.ones(1, int(self.out_features)))
self.reset_boft_parameters(adapter_name, init_weights)
self.boft_block_size[adapter_name] = boft_block_size
self.boft_block_num[adapter_name] = boft_block_num
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def merge(self, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.boft_R.keys():
base_layer = self.get_base_layer()
if safe_merge:
orig_weight = base_layer.weight.data.clone()
(butterfly_oft_mat, boft_s) = self.get_delta_weight(active_adapter)
orig_weight = orig_weight.view(self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0], self.out_features)
orig_weight = torch.mm(butterfly_oft_mat, orig_weight)
orig_weight = orig_weight * boft_s
orig_weight = orig_weight.view(self.out_features, self.in_features, base_layer.kernel_size[0], base_layer.kernel_size[0])
self.base_layer.weight.data = orig_weight.contiguous()
else:
(butterfly_oft_mat, boft_s) = self.get_delta_weight(active_adapter)
orig_weight = base_layer.weight.data.clone()
orig_weight = orig_weight.view(self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0], self.out_features)
orig_weight = torch.mm(butterfly_oft_mat, orig_weight)
orig_weight = orig_weight * boft_s
orig_weight = orig_weight.view(self.out_features, self.in_features, base_layer.kernel_size[0], base_layer.kernel_size[0])
self.base_layer.weight.data = orig_weight.contiguous()
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.boft_R.keys():
(butterfly_oft_mat, boft_s) = self.get_delta_weight(active_adapter)
orig_weight = self.get_base_layer().weight.data.clone()
orig_weight = orig_weight.view(self.in_features * self.get_base_layer().kernel_size[0] * self.get_base_layer().kernel_size[0], self.out_features)
orig_weight = torch.mm(butterfly_oft_mat.t(), orig_weight)
orig_weight = orig_weight * (1 / boft_s)
orig_weight = orig_weight.view(self.out_features, self.in_features, self.get_base_layer().kernel_size[0], self.get_base_layer().kernel_size[0])
self.get_base_layer().weight.data = orig_weight
def get_delta_weight(self, adapter) -> tuple[torch.Tensor, torch.Tensor]:
boft_R = self.boft_R[adapter]
boft_s = self.boft_s[adapter]
(N, D, H, _) = boft_R.shape
boft_R = boft_R.view(N * D, H, H)
orth_rotate_butterfly = self.cayley_batch(boft_R)
orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H)
if self.fbd_cuda_available:
block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly)
else:
orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0)
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
boft_P = self.boft_P.to(block_diagonal_butterfly.device)
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
butterfly_oft_mat = butterfly_oft_mat_batch[0]
for i in range(1, butterfly_oft_mat_batch.shape[0]):
butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat
return (butterfly_oft_mat, boft_s)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
boft_rotation = torch.eye(self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0], device=x.device, dtype=x.dtype)
boft_scale = torch.ones((1, int(self.out_features)), device=x.device, dtype=x.dtype)
for active_adapter in self.active_adapters:
if active_adapter not in self.boft_R.keys():
continue
boft_R = self.boft_R[active_adapter]
boft_s = self.boft_s[active_adapter]
dropout = self.boft_dropout[active_adapter]
(N, D, H, _) = boft_R.shape
boft_R = boft_R.view(N * D, H, H)
orth_rotate_butterfly = self.cayley_batch(boft_R)
orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H)
orth_rotate_butterfly = dropout(orth_rotate_butterfly)
if self.fbd_cuda_available:
block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly)
else:
orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0)
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
boft_P = self.boft_P.to(x)
block_diagonal_butterfly = block_diagonal_butterfly.to(x)
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
butterfly_oft_mat = butterfly_oft_mat_batch[0]
for i in range(1, butterfly_oft_mat_batch.shape[0]):
butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat
boft_rotation = butterfly_oft_mat @ boft_rotation
boft_scale = boft_s * boft_scale
x = x.to(self.base_layer.weight.data.dtype)
orig_weight = self.base_layer.weight.data
orig_weight = orig_weight.view(self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0], self.out_features)
rotated_weight = torch.mm(boft_rotation, orig_weight)
scaled_rotated_weight = rotated_weight * boft_scale
scaled_rotated_weight = scaled_rotated_weight.view(self.out_features, self.in_features, self.base_layer.kernel_size[0], self.base_layer.kernel_size[0])
result = F.conv2d(input=x, weight=scaled_rotated_weight, bias=self.base_layer.bias, padding=self.base_layer.padding[0], stride=self.base_layer.stride[0])
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'boft.' + rep
# File: peft-main/src/peft/tuners/boft/model.py
import warnings
from dataclasses import asdict
from enum import Enum
from typing import List, Optional
import torch
from torch import nn
from tqdm import tqdm
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists, onload_layer
from peft.utils import TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _get_submodules
from .config import BOFTConfig
from .layer import BOFTLayer, Conv2d, Linear
class BOFTModel(BaseTuner):
prefix: str = 'boft_'
def __init__(self, model, config, adapter_name) -> None:
super().__init__(model, config, adapter_name)
def _check_new_adapter_config(self, config: BOFTConfig) -> None:
if len(self.peft_config) > 1 and config.bias != 'none':
raise ValueError(f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.")
@staticmethod
def _check_target_module_exists(boft_config, key):
return check_target_module_exists(boft_config, key)
def _create_and_replace(self, boft_config, adapter_name, target, target_name, parent, current_key, **optional_kwargs):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
bias = hasattr(target, 'bias') and target.bias is not None
kwargs = {'boft_block_size': boft_config.boft_block_size, 'boft_block_num': boft_config.boft_block_num, 'boft_n_butterfly_factor': boft_config.boft_n_butterfly_factor, 'boft_dropout': boft_config.boft_dropout, 'fan_in_fan_out': boft_config.fan_in_fan_out, 'init_weights': boft_config.init_weights}
kwargs['bias'] = bias
if not isinstance(target, BOFTLayer):
new_module = self._create_new_module(boft_config, adapter_name, target, **kwargs)
if adapter_name not in self.active_adapters:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
else:
target.update_layer(adapter_name, boft_block_size=boft_config.boft_block_size, boft_block_num=boft_config.boft_block_num, boft_n_butterfly_factor=boft_config.boft_n_butterfly_factor, boft_dropout=boft_config.boft_dropout, init_weights=boft_config.init_weights)
def _replace_module(self, parent, child_name, new_module, child):
setattr(parent, child_name, new_module)
if hasattr(child, 'base_layer'):
child = child.base_layer
if not hasattr(new_module, 'base_layer'):
new_module.weight = child.weight
if hasattr(child, 'bias'):
new_module.bias = child.bias
if getattr(child, 'state', None) is not None:
if hasattr(new_module, 'base_layer'):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
for (name, module) in new_module.named_modules():
if self.prefix in name:
module.to(child.weight.device)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for (n, p) in model.named_parameters():
if self.prefix not in n:
p.requires_grad = False
for active_adapter in self.active_adapters:
bias = self.peft_config[active_adapter].bias
if bias == 'none':
continue
if bias == 'all':
for (n, p) in model.named_parameters():
if 'bias' in n:
p.requires_grad = True
elif bias == 'boft_only':
for (name, m) in model.named_modules():
if isinstance(m, BOFTLayer) and hasattr(m, 'bias') and (m.bias is not None):
m.bias.requires_grad = True
else:
raise NotImplementedError(f'Requested bias: {bias}, is not implemented.')
@staticmethod
def _create_new_module(boft_config, adapter_name, target, **kwargs):
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
if kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. Setting fan_in_fan_out to False.')
kwargs['fan_in_fan_out'] = boft_config.fan_in_fan_out = False
new_module = Linear(target, adapter_name, **kwargs)
elif isinstance(target_base_layer, torch.nn.Conv2d):
new_module = Conv2d(target, adapter_name, **kwargs)
else:
raise ValueError(f'Target module {target} is not supported. Currently, only `torch.nn.Linear` and `torch.nn.Conv2d` are supported.')
return new_module
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'model':
raise
return getattr(self.model, name)
def get_peft_config_as_dict(self, inference: bool=False):
config_dict = {}
for (key, value) in self.peft_config.items():
config = {k: v.value if isinstance(v, Enum) else v for (k, v) in asdict(value).items()}
if inference:
config['inference_mode'] = True
config_dict[key] = config
return config
def _set_adapter_layers(self, enabled=True):
for module in self.model.modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self):
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self):
for active_adapter in self.active_adapters:
val = self.peft_config[active_adapter].bias
if val != 'none':
msg = f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same output as the the base model would without adaption."
warnings.warn(msg)
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name):
for module in self.model.modules():
if isinstance(module, BOFTLayer):
if module.merged:
warnings.warn('Adapter cannot be set when the model is merged. Unmerging the model first.')
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config['model_type'] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:
raise ValueError('Please specify `target_modules` in `peft_config`')
peft_config.target_modules = set(TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config['model_type']])
return peft_config
def _unload_and_optionally_merge(self, merge=True, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[List[str]]=None):
if merge:
self._check_merge_allowed()
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
desc = 'Unloading ' + ('and merging ' if merge else '') + 'model'
for key in tqdm(key_list, disable=not progressbar, desc=desc):
try:
(parent, target, target_name) = _get_submodules(self.model, key)
except AttributeError:
continue
with onload_layer(target):
if hasattr(target, 'base_layer'):
if merge:
target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
new_module = target.modules_to_save[target.active_adapter]
if hasattr(new_module, 'base_layer'):
if merge:
new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
new_module = new_module.get_base_layer()
setattr(parent, target_name, new_module)
return self.model
def delete_adapter(self, adapter_name: str) -> None:
if adapter_name not in list(self.peft_config.keys()):
raise ValueError(f'Adapter {adapter_name} does not exist')
del self.peft_config[adapter_name]
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
new_adapter = None
for key in key_list:
(_, target, _) = _get_submodules(self.model, key)
if isinstance(target, BOFTLayer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapters[:]
self.active_adapter = new_adapter or []
def merge_and_unload(self, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[List[str]]=None) -> torch.nn.Module:
return self._unload_and_optionally_merge(progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names)
def unload(self) -> torch.nn.Module:
return self._unload_and_optionally_merge(merge=False)
# File: peft-main/src/peft/tuners/fourierft/config.py
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class FourierFTConfig(PeftConfig):
n_frequency: int = field(default=1000, metadata={'help': "Num of learnable frequencies for the Discrete Fourier Transform. 'n_frequency' is an integer that isgreater than 0 and less than or equal to d^2 (assuming the weight W has dimensions of d by d).Additionally, it is the number of trainable parameters required to update each delta W weight.'n_frequency' will affect the performance and efficiency for PEFT. Specifically, it has little impact ontraining speed, but higher values of it (typically) result in larger GPU memory costs and better accuracy.With the same `target_modules`, the number of parameters of LoRA is (2*d*r/n_frequency) times that of FourierFT.The following examples of settings regarding 'n_frequency' can be used as reference for users. For NLUtasks with the RoBERTa-large model, adopting 'n_frequency': 1000 can almost achieve similar results as'r': 8 in LoRA. At this time, the number of parameters of LoRA is about 16 times that of FourierFT.For image classification tasks with Vit-large models, adopting 'n_frequency': 3000 can almost achievesimilar results as 'r': 16 in LoRA, where the number of parameters of LoRA is about 11 times that of FourierFT."})
scaling: float = field(default=150.0, metadata={'help': "The scaling value for the delta W matrix. This is an important hyperparameter used for scaling, similar to the'lora_alpha' parameter in the LoRA method. 'scaling' can be determined during the hyperparameter search process.However, if users want to skip this process, one can refer to the settings in the following scenarios.This parameter can be set to 100.0 or 150.0 for both RoBERTa-base and RoBERTa-large models across all NLU (GLUE) tasks.This parameter can be set to 300.0 for both LLaMA family models for all instruction tuning.This parameter can be set to 300.0 for both ViT-base and ViT-large models across all image classification tasks."})
random_loc_seed: Optional[int] = field(default=777, metadata={'help': 'Seed for the random location of the frequencies.'})
fan_in_fan_out: bool = field(default=False, metadata={'help': 'Set this to True if the layer to replace stores weight like (fan_in, fan_out)'})
target_modules: Optional[Union[list[str], str]] = field(default=None, metadata={'help': "List of module names or regex expression of the module names to replace with FourierFT.For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. Only linear layers are supported."})
bias: str = field(default='none', metadata={'help': "Bias type for FourierFT. Can be 'none', 'all' or 'fourier_only'."})
modules_to_save: Optional[list[str]] = field(default=None, metadata={'help': 'List of modules apart from FourierFT layers to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.'})
layers_to_transform: Optional[Union[list[int], int]] = field(default=None, metadata={'help': 'The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index.'})
layers_pattern: Optional[str] = field(default=None, metadata={'help': 'The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern.'})
n_frequency_pattern: Optional[dict] = field(default_factory=dict, metadata={'help': 'The mapping from layer names or regexp expression to n_frequency which are different from the default specified.For example, `{model.decoder.layers.0.encoder_attn.k_proj: 500`}.'})
init_weights: bool = field(default=False, metadata={'help': 'The initialization of the Fourier weights. Set this to False if the spectrum should be initialized to a standard normal distribution.Set this to True if the spectrum should be initialized to zeros.'})
def __post_init__(self):
self.peft_type = PeftType.FOURIERFT
self.target_modules = set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
if isinstance(self.target_modules, str) and self.layers_to_transform is not None:
raise ValueError('`layers_to_transform` cannot be used when `target_modules` is a str.')
if isinstance(self.target_modules, str) and self.layers_pattern is not None:
raise ValueError('`layers_pattern` cannot be used when `target_modules` is a str.')
# File: peft-main/src/peft/tuners/fourierft/layer.py
import warnings
from typing import Any, List, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
class FourierFTLayer(BaseTunerLayer):
adapter_layer_names = ('fourierft_spectrum',)
other_param_names = ('fourierft_n_frequency', 'fourierft_scaling', 'fourierft_random_loc_seed')
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.fourierft_n_frequency = {}
self.fourierft_scaling = {}
self.fourierft_spectrum = nn.ParameterDict({})
self.indices = {}
self.fourierft_random_loc_seed = {}
self._disable_adapters = False
self.merged_adapters = []
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
(self.in_features, self.out_features) = (base_layer.in_features, base_layer.out_features)
elif isinstance(base_layer, Conv1D):
(self.in_features, self.out_features) = base_layer.weight.ds_shape if hasattr(base_layer.weight, 'ds_shape') else base_layer.weight.shape
else:
raise ValueError(f'Unsupported layer type {type(base_layer)}')
def update_layer(self, adapter_name, n_frequency, scaling, init_weights, random_loc_seed):
if n_frequency <= 0:
raise ValueError(f'`n_frequency` should be a positive integer value but the value passed is {n_frequency}')
if n_frequency > self.in_features * self.out_features:
raise ValueError(f'`n_frequency` should be less than or equal to the product of the input and output dimensions but the value passed is {n_frequency} and the product is {self.in_features * self.out_features}')
self.fourierft_n_frequency[adapter_name] = n_frequency
self.fourierft_random_loc_seed[adapter_name] = random_loc_seed
self.indices[adapter_name] = torch.randperm(self.out_features * self.in_features, generator=torch.Generator().manual_seed(self.fourierft_random_loc_seed[adapter_name]))[:n_frequency]
self.indices[adapter_name] = torch.stack([self.indices[adapter_name] // self.in_features, self.indices[adapter_name] % self.in_features], dim=0)
self.fourierft_scaling[adapter_name] = scaling
self.fourierft_spectrum[adapter_name] = nn.Parameter(torch.randn(n_frequency), requires_grad=True)
if init_weights:
self.reset_fourier_parameters(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
@torch.no_grad()
def reset_fourier_parameters(self, adapter_name):
if adapter_name in self.fourierft_spectrum.keys():
nn.init.zeros_(self.fourierft_spectrum[adapter_name])
def get_delta_weight(self, adapter) -> torch.Tensor:
spectrum = self.fourierft_spectrum[adapter]
indices = self.indices[adapter].to(spectrum.device)
dense_spectrum = torch.zeros(self.out_features, self.in_features, device=spectrum.device, dtype=spectrum.dtype)
dense_spectrum[indices[0, :], indices[1, :]] = spectrum
delta_weight = torch.fft.ifft2(dense_spectrum).real * self.fourierft_scaling[adapter]
return delta_weight
class FourierFTLinear(nn.Module, FourierFTLayer):
def __init__(self, base_layer, adapter_name: str, n_frequency: int=1000, scaling: float=150.0, fan_in_fan_out: bool=False, init_weights: Union[bool, str]=False, random_loc_seed: int=777, **kwargs) -> None:
super().__init__()
FourierFTLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(adapter_name, n_frequency, scaling, init_weights, random_loc_seed)
def merge(self, safe_merge: bool=False, adapter_names: Optional[List[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.fourierft_spectrum.keys():
base_layer = self.get_base_layer()
if safe_merge:
orig_weights = base_layer.weight.data.clone()
orig_weights += self.get_delta_weight(active_adapter)
if not torch.isfinite(orig_weights).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += self.get_delta_weight(active_adapter)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.fourierft_spectrum.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def get_delta_weight(self, adapter) -> torch.Tensor:
return super().get_delta_weight(adapter)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.fourierft_spectrum.keys():
continue
delta_w = self.get_delta_weight(active_adapter)
x = x.to(delta_w.dtype)
result = result + F.linear(x, delta_w)
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'fourierft.' + rep
# File: peft-main/src/peft/tuners/fourierft/model.py
from __future__ import annotations
import re
import warnings
from dataclasses import asdict
from enum import Enum
from itertools import chain
from typing import Optional
import torch
from tqdm import tqdm
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
from peft.utils import TRANSFORMERS_MODELS_TO_FOURIERFT_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _get_submodules
from .config import FourierFTConfig
from .layer import FourierFTLayer, FourierFTLinear
class FourierFTModel(BaseTuner):
prefix: str = 'fourierft_'
def __init__(self, model, config, adapter_name) -> None:
super().__init__(model, config, adapter_name)
def _check_new_adapter_config(self, config: FourierFTConfig) -> None:
if len(self.peft_config) > 1 and config.bias != 'none':
raise ValueError(f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.")
@staticmethod
def _check_target_module_exists(fourierft_config, key):
return check_target_module_exists(fourierft_config, key)
def _create_and_replace(self, fourierft_config, adapter_name, target, target_name, parent, current_key, **optional_kwargs):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
pattern_keys = list(chain(fourierft_config.n_frequency_pattern.keys()))
target_name_key = next(filter(lambda key: re.match(f'.*\\.{key}$', current_key), pattern_keys), current_key)
n_frequency = fourierft_config.n_frequency_pattern.get(target_name_key, fourierft_config.n_frequency)
scaling = fourierft_config.scaling
random_loc_seed = fourierft_config.random_loc_seed
bias = hasattr(target, 'bias') and target.bias is not None
kwargs = {'n_frequency': n_frequency, 'scaling': scaling, 'fan_in_fan_out': fourierft_config.fan_in_fan_out, 'init_weights': fourierft_config.init_weights, 'random_loc_seed': fourierft_config.random_loc_seed}
kwargs['bias'] = bias
if isinstance(target, FourierFTLayer):
target.update_layer(adapter_name, n_frequency, scaling, fourierft_config.init_weights, random_loc_seed)
else:
new_module = self._create_new_module(fourierft_config, adapter_name, target, **kwargs)
if adapter_name != self.active_adapter:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
def _replace_module(self, parent, child_name, new_module, child):
setattr(parent, child_name, new_module)
if hasattr(child, 'base_layer'):
child = child.base_layer
if not hasattr(new_module, 'base_layer'):
new_module.weight = child.weight
if hasattr(child, 'bias'):
new_module.bias = child.bias
if getattr(child, 'state', None) is not None:
if hasattr(new_module, 'base_layer'):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
for (name, module) in new_module.named_modules():
if 'fourierft_' in name:
module.to(child.weight.device)
def _mark_only_adapters_as_trainable(self, model: torch.nn.Module) -> None:
for (n, p) in model.named_parameters():
if self.prefix not in n:
p.requires_grad = False
for active_adapter in self.active_adapters:
bias = self.peft_config[active_adapter].bias
if bias == 'none':
continue
if bias == 'all':
for (n, p) in model.named_parameters():
if 'bias' in n:
p.requires_grad = True
elif bias == 'fourier_only':
for m in model.modules():
if isinstance(m, FourierFTLayer) and hasattr(m, 'bias') and (m.bias is not None):
m.bias.requires_grad = True
else:
raise NotImplementedError(f'Requested bias: {bias}, is not implemented.')
@staticmethod
def _create_new_module(fourierft_config, adapter_name, target, **kwargs):
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
if kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. Setting fan_in_fan_out to False.')
kwargs['fan_in_fan_out'] = fourierft_config.fan_in_fan_out = False
elif isinstance(target_base_layer, Conv1D):
kwargs['is_target_conv_1d_layer'] = True
if not kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True.')
kwargs['fan_in_fan_out'] = fourierft_config.fan_in_fan_out = True
else:
raise ValueError(f'Target module {target} is not supported. Currently, only the following modules are supported: `torch.nn.Linear`.')
new_module = FourierFTLinear(target, adapter_name, **kwargs)
return new_module
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'model':
raise
return getattr(self.model, name)
def get_peft_config_as_dict(self, inference: bool=False):
config_dict = {}
for (key, value) in self.peft_config.items():
config = {k: v.value if isinstance(v, Enum) else v for (k, v) in asdict(value).items()}
if inference:
config['inference_mode'] = True
config_dict[key] = config
return config
def _set_adapter_layers(self, enabled: bool=True) -> None:
for module in self.model.modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self) -> None:
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self) -> None:
for active_adapter in self.active_adapters:
val = self.peft_config[active_adapter].bias
if val != 'none':
msg = f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same output as the the base model would without adaption."
warnings.warn(msg)
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name: str | list[str]) -> None:
for module in self.model.modules():
if isinstance(module, FourierFTLayer):
if module.merged:
warnings.warn('Adapter cannot be set when the model is merged. Unmerging the model first.')
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config['model_type'] not in TRANSFORMERS_MODELS_TO_FOURIERFT_TARGET_MODULES_MAPPING:
raise ValueError('Please specify `target_modules` in `peft_config`')
peft_config.target_modules = set(TRANSFORMERS_MODELS_TO_FOURIERFT_TARGET_MODULES_MAPPING[model_config['model_type']])
return peft_config
def _unload_and_optionally_merge(self, merge=True, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None):
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
desc = 'Unloading ' + ('and merging ' if merge else '') + 'model'
for key in tqdm(key_list, disable=not progressbar, desc=desc):
try:
(parent, target, target_name) = _get_submodules(self.model, key)
except AttributeError:
continue
if hasattr(target, 'base_layer'):
if merge:
target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
setattr(parent, target_name, target.modules_to_save[target.active_adapter])
return self.model
def delete_adapter(self, adapter_name: str):
if adapter_name not in list(self.peft_config.keys()):
raise ValueError(f'Adapter {adapter_name} does not exist')
del self.peft_config[adapter_name]
key_list = [key for (key, _) in self.model.named_modules() if 'fourierft' not in key]
new_adapter = None
for key in key_list:
(_, target, _) = _get_submodules(self.model, key)
if isinstance(target, FourierFTLayer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapter[:]
self.active_adapter = new_adapter or []
def merge_and_unload(self, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> torch.nn.Module:
return self._unload_and_optionally_merge(progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names)
def unload(self) -> torch.nn.Module:
return self._unload_and_optionally_merge(merge=False)
# File: peft-main/src/peft/tuners/hra/config.py
from dataclasses import dataclass, field
from typing import List, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class HRAConfig(PeftConfig):
r: int = field(default=8, metadata={'help': 'The rank of HRA across different layers.', 'note': "It is best to set 'r' to an even number; otherwise, the default initialization method will not work."})
apply_GS: bool = field(default=False, metadata={'help': 'Whether to apply Gram-Schmidt orthogonalization or not.'})
target_modules: Optional[Union[List[str], str]] = field(default=None, metadata={'help': 'List of module names or regex expression of the module names to replace with HRA.', 'example': "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "})
init_weights: bool = field(default=True, metadata={'help': "Whether to initialize the weights of the HRA layers with their default initialization. Don't change this setting, except if you know exactly what you're doing."})
layers_to_transform: Optional[Union[List[int], int]] = field(default=None, metadata={'help': 'The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index.'})
layers_pattern: Optional[str] = field(default=None, metadata={'help': 'The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern.'})
bias: str = field(default='none', metadata={'help': "Bias type for HRA. Can be 'none', 'all' or 'hra_only'"})
modules_to_save: Optional[List[str]] = field(default=None, metadata={'help': 'List of modules apart from HRA layers to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.'})
def __post_init__(self):
self.peft_type = PeftType.HRA
self.target_modules = set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
if isinstance(self.target_modules, str) and self.layers_to_transform is not None:
raise ValueError('`layers_to_transform` cannot be used when `target_modules` is a str.')
if isinstance(self.target_modules, str) and self.layers_pattern is not None:
raise ValueError('`layers_pattern` cannot be used when `target_modules` is a str.')
# File: peft-main/src/peft/tuners/hra/layer.py
import math
import warnings
from typing import Any, List, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
class HRALayer(BaseTunerLayer):
adapter_layer_names = ('hra_u',)
other_param_names = ('hra_r', 'hra_apply_GS')
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.hra_r = {}
self.hra_apply_GS = {}
self.hra_u = nn.ParameterDict({})
self._disable_adapters = False
self.merged_adapters = []
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
(self.in_features, self.out_features) = (base_layer.in_features, base_layer.out_features)
elif isinstance(base_layer, nn.Conv2d):
(self.in_features, self.out_features) = (base_layer.in_channels, base_layer.out_channels)
else:
raise ValueError(f'Unsupported layer type {type(base_layer)}')
def update_layer(self, adapter_name: str, r: int, apply_GS: bool, init_weights: bool, **kwargs) -> None:
if r <= 0:
raise ValueError(f'`r` should be a positive integer value but the value passed is {r}')
self.hra_r[adapter_name] = r
self.hra_apply_GS[adapter_name] = apply_GS
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
self.hra_u[adapter_name] = nn.Parameter(torch.empty(self.in_features, r), requires_grad=True)
elif isinstance(base_layer, nn.Conv2d):
self.hra_u[adapter_name] = nn.Parameter(torch.empty(self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0], r), requires_grad=True)
else:
raise TypeError(f'HRA is not implemented for base layers of type {type(base_layer).__name__}')
if init_weights:
self.reset_hra_parameters(adapter_name)
else:
self.reset_hra_parameters_random(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_hra_parameters(self, adapter_name: str):
if self.hra_r[adapter_name] % 2 != 0:
warnings.warn('The symmetric initialization can NOT be performed when r is odd!')
nn.init.kaiming_uniform_(self.hra_u[adapter_name], a=math.sqrt(5))
else:
shape = self.hra_u[adapter_name].shape
half_u = torch.zeros(shape[0], shape[1] // 2)
nn.init.kaiming_uniform_(half_u, a=math.sqrt(5))
self.hra_u[adapter_name] = nn.Parameter(torch.repeat_interleave(half_u, 2, dim=1))
def reset_hra_parameters_random(self, adapter_name: str):
nn.init.kaiming_uniform_(self.hra_u[adapter_name], a=math.sqrt(5))
def scale_layer(self, scale: float) -> None:
if scale == 1:
return
for active_adapter in self.active_adapters:
if active_adapter not in self.hra_u.keys():
continue
warnings.warn('Scaling operation for HRA not supported! Automatically set scale to 1.')
def unscale_layer(self, scale=None) -> None:
for active_adapter in self.active_adapters:
if active_adapter not in self.hra_u.keys():
continue
warnings.warn('Unscaling operation for HRA not supported! Keeping scale at 1.')
class HRALinear(nn.Module, HRALayer):
def __init__(self, base_layer, adapter_name: str, r: int=0, apply_GS: bool=False, init_weights: Union[bool, str]=True, **kwargs) -> None:
super().__init__()
HRALayer.__init__(self, base_layer, **kwargs)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, apply_GS, init_weights, **kwargs)
def merge(self, safe_merge: bool=False, adapter_names: Optional[List[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.hra_u.keys():
base_layer = self.get_base_layer()
if safe_merge:
orig_weight = base_layer.weight.data.clone()
delta_weight = self.get_delta_weight(active_adapter)
orig_weight = torch.mm(orig_weight, delta_weight)
if not torch.isfinite(orig_weight).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
self.base_layer.weight.data = orig_weight
else:
delta_weight = self.get_delta_weight(active_adapter)
self.base_layer.weight.data = torch.mm(self.base_layer.weight.data, delta_weight)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.hra_u.keys():
orig_weight = self.get_base_layer().weight.data.clone()
delta_weight = self.get_delta_weight(active_adapter, reverse=True)
self.get_base_layer().weight.data = torch.mm(orig_weight, delta_weight)
def get_delta_weight(self, adapter_name: str, reverse: bool=False) -> torch.Tensor:
rank = self.hra_r[adapter_name]
apply_GS = self.hra_apply_GS[adapter_name]
opt_u = self.hra_u[adapter_name]
shape = opt_u.shape
if apply_GS:
weight = [(opt_u[:, 0] / opt_u[:, 0].norm()).view(-1, 1)]
for i in range(1, rank):
ui = opt_u[:, i].view(-1, 1)
for j in range(i):
ui = ui - weight[j].t() @ ui * weight[j]
weight.append((ui / ui.norm()).view(-1, 1))
weight = torch.cat(weight, dim=1)
weight = torch.eye(shape[0], device=opt_u.device, dtype=opt_u.dtype) - 2 * weight @ weight.t()
else:
opt_u = opt_u / opt_u.norm(dim=0)
weight = torch.eye(shape[0], device=opt_u.device, dtype=opt_u.dtype)
if reverse:
indices = range(rank - 1, -1, -1)
else:
indices = range(rank)
for i in indices:
ui = opt_u[:, i].view(-1, 1)
weight = weight @ (torch.eye(shape[0], device=opt_u.device, dtype=opt_u.dtype) - 2 * ui @ ui.t())
return weight
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
new_weight = torch.eye(self.in_features, device=x.device)
for active_adapter in self.active_adapters:
if active_adapter not in self.hra_u.keys():
continue
delta_weight = self.get_delta_weight(active_adapter)
new_weight = torch.mm(new_weight, delta_weight)
x = x.to(self.get_base_layer().weight.data.dtype)
orig_weight = self.get_base_layer().weight.data
new_weight = torch.mm(orig_weight, new_weight)
result = F.linear(input=x, weight=new_weight, bias=self.base_layer.bias)
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'hra.' + rep
class HRAConv2d(nn.Module, HRALayer):
def __init__(self, base_layer, adapter_name: str, r: int=0, apply_GS: bool=False, init_weights: Union[bool, str]=True, **kwargs):
super().__init__()
HRALayer.__init__(self, base_layer)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, apply_GS, init_weights, **kwargs)
def merge(self, safe_merge: bool=False, adapter_names: Optional[List[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.hra_u.keys():
base_layer = self.get_base_layer()
if safe_merge:
orig_weight = base_layer.weight.data.clone()
orig_weight = orig_weight.view(self.out_features, self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0])
delta_weight = self.get_delta_weight(active_adapter)
orig_weight = torch.mm(orig_weight, delta_weight)
orig_weight = orig_weight.view(self.out_features, self.in_features, self.base_layer.kernel_size[0], self.base_layer.kernel_size[0])
if not torch.isfinite(orig_weight).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
self.base_layer.weight.data = orig_weight
else:
orig_weight = base_layer.weight.data
orig_weight = orig_weight.view(self.out_features, self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0])
delta_weight = self.get_delta_weight(active_adapter)
orig_weight = torch.mm(orig_weight, delta_weight)
orig_weight = orig_weight.view(self.out_features, self.in_features, self.base_layer.kernel_size[0], self.base_layer.kernel_size[0])
self.base_layer.weight.data = orig_weight
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.hra_u.keys():
orig_weight = self.get_base_layer().weight.data.clone()
orig_weight = orig_weight.view(self.out_features, self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0])
delta_weight = self.get_delta_weight(active_adapter, reverse=True)
orig_weight = torch.mm(orig_weight, delta_weight)
orig_weight = orig_weight.view(self.out_features, self.in_features, self.base_layer.kernel_size[0], self.base_layer.kernel_size[0])
self.get_base_layer().weight.data = orig_weight
def get_delta_weight(self, adapter_name: str, reverse: bool=False) -> torch.Tensor:
rank = self.hra_r[adapter_name]
apply_GS = self.hra_apply_GS[adapter_name]
opt_u = self.hra_u[adapter_name]
shape = opt_u.shape
if apply_GS:
weight = [(opt_u[:, 0] / opt_u[:, 0].norm()).view(-1, 1)]
for i in range(1, rank):
ui = opt_u[:, i].view(-1, 1)
for j in range(i):
ui = ui - weight[j].t() @ ui * weight[j]
weight.append((ui / ui.norm()).view(-1, 1))
weight = torch.cat(weight, dim=1)
weight = torch.eye(shape[0], device=opt_u.device, dtype=opt_u.dtype) - 2 * weight @ weight.t()
else:
opt_u = opt_u / opt_u.norm(dim=0)
weight = torch.eye(shape[0], device=opt_u.device, dtype=opt_u.dtype)
if reverse:
indices = range(rank - 1, -1, -1)
else:
indices = range(rank)
for i in indices:
ui = opt_u[:, i].view(-1, 1)
weight = weight @ (torch.eye(shape[0], device=opt_u.device, dtype=opt_u.dtype) - 2 * ui @ ui.t())
return weight
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
new_weight = torch.eye(self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0], device=x.device)
for active_adapter in self.active_adapters:
if active_adapter not in self.hra_u.keys():
continue
delta_weight = self.get_delta_weight(active_adapter)
new_weight = torch.mm(new_weight, delta_weight)
x = x.to(self.base_layer.weight.data.dtype)
orig_weight = self.base_layer.weight.data
orig_weight = orig_weight.view(self.out_features, self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0])
new_weight = torch.mm(orig_weight, new_weight)
new_weight = new_weight.view(self.out_features, self.in_features, self.base_layer.kernel_size[0], self.base_layer.kernel_size[0])
result = F.conv2d(input=x, weight=new_weight, bias=self.base_layer.bias, padding=self.base_layer.padding[0], stride=self.base_layer.stride[0])
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'hra.' + rep
# File: peft-main/src/peft/tuners/hra/model.py
import warnings
from dataclasses import asdict
from enum import Enum
from typing import List, Optional
import torch
from torch import nn
from tqdm import tqdm
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
from peft.utils import TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _get_submodules
from .config import HRAConfig
from .layer import HRAConv2d, HRALayer, HRALinear
class HRAModel(BaseTuner):
prefix: str = 'hra_'
def _check_new_adapter_config(self, config: HRAConfig) -> None:
if len(self.peft_config) > 1 and config.bias != 'none':
raise ValueError(f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.")
@staticmethod
def _check_target_module_exists(hra_config, key):
return check_target_module_exists(hra_config, key)
def _create_and_replace(self, hra_config, adapter_name, target, target_name, parent, current_key, **optional_kwargs):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
bias = hasattr(target, 'bias') and target.bias is not None
kwargs = {'r': hra_config.r, 'apply_GS': hra_config.apply_GS, 'init_weights': hra_config.init_weights}
kwargs['bias'] = bias
if not isinstance(target, HRALayer):
new_module = self._create_new_module(hra_config, adapter_name, target, **kwargs)
if adapter_name not in self.active_adapters:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
else:
target.update_layer(adapter_name, r=hra_config.r, apply_GS=hra_config.apply_GS, init_weights=hra_config.init_weights)
def _replace_module(self, parent, child_name, new_module, child):
setattr(parent, child_name, new_module)
if hasattr(child, 'base_layer'):
child = child.base_layer
if not hasattr(new_module, 'base_layer'):
new_module.weight = child.weight
if hasattr(child, 'bias'):
new_module.bias = child.bias
if getattr(child, 'state', None) is not None:
if hasattr(new_module, 'base_layer'):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
for (name, module) in new_module.named_modules():
if self.prefix in name:
module.to(child.weight.device)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for (n, p) in model.named_parameters():
if self.prefix not in n:
p.requires_grad = False
for active_adapter in self.active_adapters:
bias = self.peft_config[active_adapter].bias
if bias == 'none':
continue
if bias == 'all':
for (n, p) in model.named_parameters():
if 'bias' in n:
p.requires_grad = True
elif bias == 'hra_only':
for (name, m) in model.named_modules():
if isinstance(m, HRALayer) and hasattr(m, 'bias') and (m.bias is not None):
m.bias.requires_grad = True
else:
raise NotImplementedError(f'Requested bias: {bias}, is not implemented.')
@staticmethod
def _create_new_module(hra_config, adapter_name, target, **kwargs):
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
new_module = HRALinear(target, adapter_name, **kwargs)
elif isinstance(target_base_layer, torch.nn.Conv2d):
new_module = HRAConv2d(target, adapter_name, **kwargs)
else:
raise ValueError(f'Target module {target} is not supported. Currently, only `torch.nn.Linear` and `torch.nn.Conv2d` are supported.')
return new_module
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'base_model':
raise
return getattr(self.model, name)
def get_peft_config_as_dict(self, inference: bool=False):
config_dict = {}
for (key, value) in self.peft_config.items():
config = {k: v.value if isinstance(v, Enum) else v for (k, v) in asdict(value).items()}
if inference:
config['inference_mode'] = True
config_dict[key] = config
return config
def _set_adapter_layers(self, enabled=True):
for module in self.model.modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self):
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self):
for active_adapter in self.active_adapters:
val = self.peft_config[active_adapter].bias
if val != 'none':
msg = f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same output as the the base model would without adaption."
warnings.warn(msg)
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name):
for module in self.model.modules():
if isinstance(module, HRALayer):
if module.merged:
warnings.warn('Adapter cannot be set when the model is merged. Unmerging the model first.')
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config['model_type'] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:
raise ValueError('Please specify `target_modules` in `peft_config`')
peft_config.target_modules = set(TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config['model_type']])
return peft_config
def _unload_and_optionally_merge(self, merge=True, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[List[str]]=None):
self._unloading_checks(adapter_names)
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
desc = 'Unloading ' + ('and merging ' if merge else '') + 'model'
for key in tqdm(key_list, disable=not progressbar, desc=desc):
try:
(parent, target, target_name) = _get_submodules(self.model, key)
except AttributeError:
continue
if hasattr(target, 'base_layer'):
if merge:
target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
setattr(parent, target_name, target.modules_to_save[target.active_adapter])
return self.model
def delete_adapter(self, adapter_name: str) -> None:
if adapter_name not in list(self.peft_config.keys()):
raise ValueError(f'Adapter {adapter_name} does not exist')
del self.peft_config[adapter_name]
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
new_adapter = None
for key in key_list:
(_, target, _) = _get_submodules(self.model, key)
if isinstance(target, HRALayer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapters[:]
self.active_adapter = new_adapter or []
def merge_and_unload(self, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[List[str]]=None) -> torch.nn.Module:
return self._unload_and_optionally_merge(progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names)
def unload(self) -> torch.nn.Module:
return self._unload_and_optionally_merge(merge=False)
# File: peft-main/src/peft/tuners/ia3/__init__.py
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from .config import IA3Config
from .layer import Conv2d, IA3Layer, Linear
from .model import IA3Model
__all__ = ['Conv2d', 'IA3Config', 'IA3Layer', 'IA3Model', 'Linear']
def __getattr__(name):
if name == 'Linear8bitLt' and is_bnb_available():
from .bnb import Linear8bitLt
return Linear8bitLt
if name == 'Linear4bit' and is_bnb_4bit_available():
from .bnb import Linear4bit
return Linear4bit
raise AttributeError(f'module {__name__} has no attribute {name}')
# File: peft-main/src/peft/tuners/ia3/bnb.py
from typing import Any
import torch
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from .layer import IA3Layer
if is_bnb_available():
class Linear8bitLt(torch.nn.Module, IA3Layer):
def __init__(self, base_layer: torch.nn.Module, adapter_name: str, is_feedforward: bool, init_ia3_weights: bool=True, **kwargs) -> None:
super().__init__()
IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
self.get_base_layer().weight.requires_grad = False
self._active_adapter = adapter_name
self.update_layer(adapter_name, init_ia3_weights)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
if self.disable_adapters:
return self.base_layer(x)
ia3_scaling = 1
for active_adapter in self.active_adapters:
if active_adapter not in self.ia3_l.keys():
continue
ia3_scaling *= self.ia3_l[active_adapter].flatten()
requires_conversion = not torch.is_autocast_enabled() and x.dtype != torch.float32
if requires_conversion:
x = x.float()
if self.is_feedforward:
result = self.base_layer(x * ia3_scaling)
expected_dtype = result.dtype
else:
result = self.base_layer(x)
expected_dtype = result.dtype
result = result * ia3_scaling
if requires_conversion:
result = result.to(expected_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'ia3.' + rep
if is_bnb_4bit_available():
class Linear4bit(torch.nn.Module, IA3Layer):
def __init__(self, base_layer: torch.nn.Module, adapter_name: str, is_feedforward: bool, init_ia3_weights: bool=True, **kwargs) -> None:
super().__init__()
IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
self.get_base_layer().weight.requires_grad = False
self._active_adapter = adapter_name
self.update_layer(adapter_name, init_ia3_weights)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
if self.disable_adapters:
return self.base_layer(x)
ia3_scaling = 1
for active_adapter in self.active_adapters:
if active_adapter not in self.ia3_l.keys():
continue
ia3_scaling *= self.ia3_l[active_adapter].flatten()
requires_conversion = not torch.is_autocast_enabled() and x.dtype != torch.float32
if requires_conversion:
x = x.float()
if self.is_feedforward:
result = self.base_layer(x * ia3_scaling)
expected_dtype = result.dtype
else:
result = self.base_layer(x)
expected_dtype = result.dtype
result = result * ia3_scaling
result = result.clone()
if requires_conversion:
result = result.to(expected_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'ia3.' + rep
# File: peft-main/src/peft/tuners/ia3/config.py
from dataclasses import dataclass, field
from typing import List, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class IA3Config(PeftConfig):
target_modules: Optional[Union[List[str], str]] = field(default=None, metadata={'help': "List of module names or regex expression of the module names to replace with (IA)³.For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'.This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer.If not specified, modules will be chosen according to the model architecture, If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually."})
feedforward_modules: Optional[Union[List[str], str]] = field(default=None, metadata={'help': "List of module names or a regex expression of module names which are feedforwardFor example, ['output.dense']"})
fan_in_fan_out: bool = field(default=False, metadata={'help': 'Set this to True if the layer to replace stores weight like (fan_in, fan_out)'})
modules_to_save: Optional[List[str]] = field(default=None, metadata={'help': 'List of modules apart from (IA)^3 layers to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.'})
init_ia3_weights: bool = field(default=True, metadata={'help': 'Whether to initialize the vectors in the (IA)^3 layers.'})
def __post_init__(self):
self.peft_type = PeftType.IA3
self.target_modules = set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
self.feedforward_modules = set(self.feedforward_modules) if isinstance(self.feedforward_modules, list) else self.feedforward_modules
if isinstance(self.feedforward_modules, set) and isinstance(self.target_modules, set):
if not self.feedforward_modules.issubset(self.target_modules):
raise ValueError('`feedforward_modules` should be a subset of `target_modules`')
# File: peft-main/src/peft/tuners/ia3/layer.py
import warnings
from typing import Any, List, Optional
import torch
import torch.nn as nn
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils import transpose
class IA3Layer(BaseTunerLayer):
adapter_layer_names = ('ia3_l',)
def __init__(self, base_layer: nn.Module, is_feedforward: bool, **kwargs) -> None:
self.base_layer = base_layer
self.ia3_l = nn.ParameterDict({})
self._disable_adapters = False
self.merged_adapters = []
self.is_feedforward = is_feedforward
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
(in_features, out_features) = (base_layer.in_features, base_layer.out_features)
elif isinstance(base_layer, nn.Conv2d):
(in_features, out_features) = (base_layer.in_channels, base_layer.out_channels)
elif isinstance(base_layer, nn.Embedding):
(in_features, out_features) = (base_layer.num_embeddings, base_layer.embedding_dim)
elif isinstance(base_layer, Conv1D):
(in_features, out_features) = base_layer.weight.ds_shape if hasattr(base_layer.weight, 'ds_shape') else base_layer.weight.shape
else:
raise ValueError(f'Unsupported layer type {type(base_layer)}')
self.in_features = in_features
self.out_features = out_features
def update_layer(self, adapter_name, init_ia3_weights):
if self.is_feedforward:
weight = torch.randn((1, self.in_features))
else:
weight = torch.randn((self.out_features, 1))
self.ia3_l[adapter_name] = nn.Parameter(weight)
if init_ia3_weights:
self.reset_ia3_parameters(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_ia3_parameters(self, adapter_name):
if adapter_name in self.ia3_l.keys():
nn.init.constant_(self.ia3_l[adapter_name], 1.0)
class Linear(nn.Module, IA3Layer):
def __init__(self, base_layer: nn.Module, adapter_name: str, fan_in_fan_out: bool=False, is_feedforward: bool=False, is_target_conv_1d_layer: bool=False, init_ia3_weights: bool=True, **kwargs) -> None:
super().__init__()
IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
self.fan_in_fan_out = fan_in_fan_out
self.is_target_conv_1d_layer = is_target_conv_1d_layer
self._active_adapter = adapter_name
self.update_layer(adapter_name, init_ia3_weights)
def merge(self, safe_merge: bool=False, adapter_names: Optional[List[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.ia3_l.keys():
base_layer = self.get_base_layer()
ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out)
orig_dtype = base_layer.weight.data.dtype
if safe_merge:
orig_weights = base_layer.weight.data
orig_weights = torch.mul(orig_weights, ia3_l)
if not torch.isfinite(orig_weights).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
base_layer.weight.data = orig_weights.to(orig_dtype)
else:
base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_l).to(orig_dtype)
if not self.is_feedforward and base_layer.bias is not None:
scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
orig_dtype = base_layer.bias.data.dtype
base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data).to(orig_dtype)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
warnings.warn('Unmerge result can be inaccurate for (IA)^3.')
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.ia3_l.keys():
base_layer = self.get_base_layer()
ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) + 1e-08
orig_dtype = base_layer.weight.data.dtype
base_layer.weight.data = torch.div(base_layer.weight.data, ia3_l).to(orig_dtype)
if not self.is_feedforward and base_layer.bias is not None:
scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
orig_dtype = base_layer.bias.data.dtype
base_layer.bias.data = torch.div(base_layer.bias.data, scaling.data + 1e-08).to(orig_dtype)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
dtype = previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
ia3_scaling = 1
for active_adapter in self.active_adapters:
if active_adapter not in self.ia3_l.keys():
continue
dtype = self.ia3_l[active_adapter].dtype
ia3_scaling *= self.ia3_l[active_adapter].flatten()
if self.is_feedforward:
x = x.to(dtype)
interm = (x * ia3_scaling).to(previous_dtype)
result = self.base_layer(interm, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
result_dtype = result.dtype
result = (result * ia3_scaling).to(result_dtype)
return result
class Conv2d(nn.Module, IA3Layer):
def __init__(self, base_layer: nn.Module, adapter_name: str, fan_in_fan_out: bool=False, is_feedforward: bool=False, init_ia3_weights: bool=True, **kwargs) -> None:
super().__init__()
IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(adapter_name, init_ia3_weights)
def update_layer(self, adapter_name, init_ia3_weights):
if self.is_feedforward:
weight = torch.randn((1, self.in_features, 1, 1))
else:
weight = torch.randn((1, self.out_features, 1, 1))
self.ia3_l[adapter_name] = nn.Parameter(weight)
if init_ia3_weights:
self.reset_ia3_parameters(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def merge(self, safe_merge: bool=False, adapter_names: Optional[List[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.ia3_l.keys():
base_layer = self.get_base_layer()
ia3_scaling = self.ia3_l[active_adapter].data
if not self.is_feedforward:
ia3_scaling = ia3_scaling.permute(1, 0, 2, 3)
if safe_merge:
output_weight = torch.mul(base_layer.weight.data, ia3_scaling).clone()
if not torch.isfinite(output_weight).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
base_layer.weight.data = output_weight
else:
base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_scaling)
if not self.is_feedforward and base_layer.bias is not None:
scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
warnings.warn('Unmerge result can be inaccurate for (IA)^3.')
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.ia3_l.keys():
base_layer = self.get_base_layer()
ia3_scaling = self.ia3_l[active_adapter].data
if not self.is_feedforward:
ia3_scaling = ia3_scaling.permute(1, 0, 2, 3)
base_layer.weight.data = torch.div(base_layer.weight.data, ia3_scaling + 1e-08)
if not self.is_feedforward and base_layer.bias is not None:
scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape)
base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
dtype = previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
ia3_scaling = 1
for active_adapter in self.active_adapters:
if active_adapter not in self.ia3_l.keys():
continue
dtype = self.ia3_l[active_adapter].dtype
ia3_scaling *= self.ia3_l[active_adapter]
if self.is_feedforward:
x = x.to(dtype)
interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype)
result = self.base_layer(interm, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
result = result.to(dtype) * ia3_scaling
result = result.to(previous_dtype)
return result
# File: peft-main/src/peft/tuners/ia3/model.py
from __future__ import annotations
import re
import warnings
from dataclasses import asdict, replace
from enum import Enum
from typing import Optional
import torch
from torch import nn
from transformers.pytorch_utils import Conv1D
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
from peft.utils import TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _freeze_adapter, _get_submodules
from .layer import Conv2d, IA3Layer, Linear
class IA3Model(BaseTuner):
prefix: str = 'ia3_'
def __init__(self, model, config, adapter_name):
super().__init__(model, config, adapter_name)
@staticmethod
def _create_new_module(ia3_config, adapter_name, target, **kwargs):
if is_bnb_available():
import bitsandbytes as bnb
from .bnb import Linear8bitLt
if is_bnb_4bit_available():
from .bnb import Linear4bit
loaded_in_8bit = kwargs.pop('loaded_in_8bit', False)
loaded_in_4bit = kwargs.pop('loaded_in_4bit', False)
is_feedforward = kwargs.pop('is_feedforward', False)
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
eightbit_kwargs = kwargs.copy()
eightbit_kwargs.update({'has_fp16_weights': target_base_layer.state.has_fp16_weights, 'memory_efficient_backward': target_base_layer.state.memory_efficient_backward, 'threshold': target_base_layer.state.threshold, 'index': target_base_layer.index})
new_module = Linear8bitLt(target, adapter_name, is_feedforward=is_feedforward, **eightbit_kwargs)
elif loaded_in_4bit and isinstance(target_base_layer, bnb.nn.Linear4bit):
fourbit_kwargs = kwargs.copy()
fourbit_kwargs.update({'compute_dtype': target_base_layer.compute_dtype, 'compress_statistics': target_base_layer.weight.compress_statistics, 'quant_type': target_base_layer.weight.quant_type})
new_module = Linear4bit(target, adapter_name, is_feedforward=is_feedforward, **fourbit_kwargs)
elif isinstance(target, torch.nn.Conv2d):
new_module = Conv2d(target, adapter_name, is_feedforward=is_feedforward, **kwargs)
elif isinstance(target_base_layer, torch.nn.Linear):
if kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. Setting fan_in_fan_out to False.')
kwargs['fan_in_fan_out'] = ia3_config.fan_in_fan_out = False
new_module = Linear(target, adapter_name, is_feedforward=is_feedforward, **kwargs)
elif isinstance(target_base_layer, Conv1D):
if not kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True.')
kwargs['fan_in_fan_out'] = ia3_config.fan_in_fan_out = True
new_module = Linear(target, adapter_name, is_feedforward=is_feedforward, is_target_conv_1d_layer=True, **kwargs)
else:
raise ValueError(f'Target module {target} is not supported. Currently, only `torch.nn.Linear`, `torch.nn.Conv2d`, and `Conv1D` are supported.')
return new_module
@staticmethod
def _check_target_module_exists(ia3_config, key):
return check_target_module_exists(ia3_config, key)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for (n, p) in model.named_parameters():
if self.prefix not in n:
p.requires_grad = False
def _create_and_replace(self, ia3_config, adapter_name, target, target_name, parent, current_key):
is_feedforward = self._check_target_module_feedforward(ia3_config, current_key)
kwargs = {'fan_in_fan_out': ia3_config.fan_in_fan_out, 'init_ia3_weights': ia3_config.init_ia3_weights, 'is_feedforward': is_feedforward, 'loaded_in_8bit': getattr(self.model, 'is_loaded_in_8bit', False), 'loaded_in_4bit': getattr(self.model, 'is_loaded_in_4bit', False)}
if isinstance(target, IA3Layer):
target.update_layer(adapter_name, ia3_config.init_ia3_weights)
else:
new_module = self._create_new_module(ia3_config, adapter_name, target, **kwargs)
if adapter_name not in self.active_adapters:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _check_target_module_feedforward(ia3_config, key) -> bool:
if isinstance(ia3_config.feedforward_modules, str):
is_feedforward = bool(re.fullmatch(ia3_config.feedforward_modules, key))
else:
is_feedforward = any((key.endswith(target_key) for target_key in ia3_config.feedforward_modules))
return is_feedforward
def _replace_module(self, parent, child_name, new_module, child):
setattr(parent, child_name, new_module)
if hasattr(child, 'base_layer'):
child = child.base_layer
if not hasattr(new_module, 'base_layer'):
new_module.weight = child.weight
if hasattr(child, 'bias'):
new_module.bias = child.bias
if getattr(child, 'state', None) is not None:
if hasattr(new_module, 'base_layer'):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
for (name, module) in new_module.named_modules():
if self.prefix in name:
module.to(child.weight.device)
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'model':
raise
return getattr(self.model, name)
def get_peft_config_as_dict(self, inference: bool=False):
config_dict = {}
for (key, value) in self.peft_config.items():
config = {k: v.value if isinstance(v, Enum) else v for (k, v) in asdict(value).items()}
if inference:
config['inference_mode'] = True
config_dict[key] = config
return config
def _set_adapter_layers(self, enabled=True):
for module in self.model.modules():
if isinstance(module, (IA3Layer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self) -> None:
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self) -> None:
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name: str | list[str]) -> None:
for module in self.model.modules():
if isinstance(module, IA3Layer):
if module.merged:
warnings.warn('Adapter cannot be set when the model is merged. Unmerging the model first.')
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config['model_type'] not in TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING:
raise ValueError('Please specify `target_modules` in `peft_config`')
peft_config.target_modules = set(TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING[model_config['model_type']])
if peft_config.feedforward_modules is None:
if model_config['model_type'] not in TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING:
raise ValueError('Please specify `feedforward_modules` in `peft_config`')
peft_config.feedforward_modules = set(TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING[model_config['model_type']])
return peft_config
def _unload_and_optionally_merge(self, merge: bool=True, safe_merge: bool=False, adapter_names: Optional[list[str]]=None):
if getattr(self.model, 'is_loaded_in_8bit', False):
raise ValueError('Cannot merge ia3 layers when the model is loaded in 8-bit mode')
if getattr(self.model, 'is_loaded_in_4bit', False):
raise ValueError('Cannot merge ia3 layers when the model is loaded in 4-bit mode')
self._unloading_checks(adapter_names)
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
for key in key_list:
try:
(parent, target, target_name) = _get_submodules(self.model, key)
except AttributeError:
continue
if hasattr(target, 'base_layer'):
if merge:
target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
new_module = target.modules_to_save[target.active_adapter]
if hasattr(new_module, 'base_layer'):
if merge:
new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
new_module = new_module.get_base_layer()
setattr(parent, target_name, new_module)
return self.model
def merge_and_unload(self, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> torch.nn.Module:
return self._unload_and_optionally_merge(safe_merge=safe_merge, adapter_names=adapter_names)
def unload(self) -> torch.nn.Module:
return self._unload_and_optionally_merge(merge=False)
def delete_adapter(self, adapter_name: str) -> None:
if adapter_name not in self.peft_config:
raise ValueError(f'Adapter {adapter_name} does not exist')
del self.peft_config[adapter_name]
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
new_adapter = None
for key in key_list:
(_, target, _) = _get_submodules(self.model, key)
if isinstance(target, IA3Layer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapters[:]
self.active_adapter = new_adapter or []
def _check_add_weighted_adapter(self, adapters: list[str]) -> tuple[str, str]:
for adapter in adapters:
if adapter not in self.peft_config:
raise ValueError(f'Adapter {adapter} does not exist')
modules_to_save_wrappers = [module for module in self.modules() if isinstance(module, ModulesToSaveWrapper)]
if any((sum((adapter in wrapper.modules_to_save for adapter in adapters)) > 1 for wrapper in modules_to_save_wrappers)):
raise ValueError('Cannot add weighted adapters targeting the same module with modules_to_save.')
target_module_types = {type(self.peft_config[adapter].target_modules) for adapter in adapters}
feedforward_module_types = {type(self.peft_config[adapter].feedforward_modules) for adapter in adapters}
if len(target_module_types) > 1 or len(feedforward_module_types) > 1:
raise ValueError('All adapter configs should have the same type for target and feedforward modules.')
if str in target_module_types:
new_target_modules = '|'.join((f'({self.peft_config[adapter].target_modules})' for adapter in adapters))
else:
new_target_modules = set.union(*(self.peft_config[adapter].target_modules for adapter in adapters))
if str in feedforward_module_types:
new_feedforward_modules = '|'.join((f'({self.peft_config[adapter].feedforward_modules})' for adapter in adapters))
else:
new_feedforward_modules = set.union(*(self.peft_config[adapter].feedforward_modules for adapter in adapters))
return (new_target_modules, new_feedforward_modules)
def add_weighted_adapter(self, adapters: list[str], weights: list[float], adapter_name: str) -> None:
if adapter_name in list(self.peft_config.keys()):
return
(new_target_modules, new_feedforward_modules) = self._check_add_weighted_adapter(adapters=adapters)
self.peft_config[adapter_name] = replace(self.peft_config[adapters[0]], target_modules=new_target_modules, feedforward_modules=new_feedforward_modules)
self.inject_adapter(self.model, adapter_name)
_freeze_adapter(self.model, adapter_name)
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
for key in key_list:
(_, target, _) = _get_submodules(self.model, key)
if isinstance(target, IA3Layer):
if adapter_name in target.ia3_l:
target_ia3_l = target.ia3_l[adapter_name]
else:
continue
target_ia3_l.data = target_ia3_l.data.zero_()
for (adapter, weight) in zip(adapters, weights):
if adapter in target.ia3_l:
current_adapter_ia3_l = target.ia3_l[adapter]
else:
continue
target_ia3_l.data += current_adapter_ia3_l.data * weight
# File: peft-main/src/peft/tuners/ln_tuning/config.py
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class LNTuningConfig(PeftConfig):
target_modules: Optional[Union[list[str], str]] = field(default=None, metadata={'help': "List of module names or regex expression of the module names to replace with LNTuning.For example, '.*decoder.*' or '.*encoder.*'. If not specified, modules will be chosen according to the model architecture, If the architecture is not known, an error will be raised -- in this case, you shoud specify the target modules manually."})
modules_to_save: Optional[Union[list[str], str]] = field(default=None, metadata={'help': 'List of modules to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.'})
def __post_init__(self):
self.peft_type = PeftType.LN_TUNING
# File: peft-main/src/peft/tuners/ln_tuning/layer.py
import warnings
from copy import deepcopy
from typing import List, Optional
import torch
import torch.nn as nn
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
class LNTuningLayer(nn.Module, BaseTunerLayer):
adapter_layer_names = ('ln_tuning_layers',)
def __init__(self, base_layer: nn.Module, adapter_name: str):
super().__init__()
self.base_layer = base_layer
self.ln_tuning_layers = nn.ModuleDict({})
self.update_layer(self.base_layer, adapter_name)
self._active_adapter = adapter_name
self.merged_adapters = []
def update_layer(self, layer: nn.Module, adapter_name: str):
self.ln_tuning_layers[adapter_name] = deepcopy(layer)
def enable_adapters(self, enabled: bool) -> None:
if enabled:
self.set_adapter(self.active_adapters)
self._disable_adapters = False
else:
if self.merged:
self.unmerge()
for layer_name in self.adapter_layer_names:
layer = getattr(self, layer_name)
layer.requires_grad_(False)
self._disable_adapters = True
def merge(self, adapter_names: Optional[List[str]]=None):
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
if len(adapter_names) > 1:
raise ValueError(f'Trying to merge {len(adapter_names)} adapters, but LN tuning does not allow merging more than one adapter at a time')
merged_adapters = set(self.merged_adapters)
if merged_adapters:
warnings.warn(f'Already merged with {merged_adapters}. Unmerging first.')
self.unmerge()
(self.base_layer, self.ln_tuning_layers[adapter_names[0]]) = (self.ln_tuning_layers[adapter_names[0]], self.base_layer)
self.merged_adapters.append(adapter_names[0])
def unmerge(self):
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
merged_name = self.merged_adapters.pop()
(self.base_layer, self.ln_tuning_layers[merged_name]) = (self.ln_tuning_layers[merged_name], self.base_layer)
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
if len(self.active_adapters) != 1:
raise ValueError(f'Trying to run forward with {len(self.active_adapters)} active adapters, but LN tuning does not allow inference with more than one adapter at a time')
active_adapter = self.active_adapters[0]
result = self.ln_tuning_layers[active_adapter](x, *args, **kwargs)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'ln_tuning.' + rep
# File: peft-main/src/peft/tuners/ln_tuning/model.py
from __future__ import annotations
import warnings
from typing import Optional
from torch import nn
from torch.nn.modules import Module
from tqdm import tqdm
from peft.config import PeftConfig
from peft.tuners.tuners_utils import BaseTuner, _get_submodules, check_target_module_exists
from peft.utils import TRANSFORMERS_MODELS_TO_LNTUNING_TARGET_MODULES_MAPPING, ModulesToSaveWrapper
from .layer import LNTuningLayer
class LNTuningModel(BaseTuner):
prefix: str = 'ln_tuning_'
def __init__(self, model, config, adapter_name) -> None:
super().__init__(model, config, adapter_name)
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'model':
raise
return getattr(self.model, name)
@staticmethod
def _prepare_adapter_config(peft_config: PeftConfig, model_config: dict) -> PeftConfig:
if peft_config.target_modules is None:
if model_config['model_type'] not in TRANSFORMERS_MODELS_TO_LNTUNING_TARGET_MODULES_MAPPING:
raise ValueError('Please specify `target_modules` in `peft_config`')
peft_config.target_modules = set(TRANSFORMERS_MODELS_TO_LNTUNING_TARGET_MODULES_MAPPING[model_config['model_type']])
return peft_config
def _create_and_replace(self, peft_config: PeftConfig, adapter_name: str, target: Module, target_name: str, parent: Module, current_key: str) -> None:
new_module = self._create_new_module(peft_config, target, adapter_name)
if adapter_name != self.active_adapter:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
def _create_new_module(self, peft_config: PeftConfig, target: Module, adapter_name: str) -> Module:
if not isinstance(target, LNTuningLayer):
new_module = LNTuningLayer(target, adapter_name)
else:
new_module = target
new_module.update_layer(target.base_layer, adapter_name)
return new_module
def _replace_module(self, parent: Module, child_name: str, new_module: Module, child: Module) -> None:
setattr(parent, child_name, new_module)
if hasattr(child, 'base_layer'):
child = child.base_layer
if getattr(child, 'state', None) is not None:
if hasattr(new_module, 'base_layer'):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
for (name, module) in new_module.named_modules():
weight = child.qweight if hasattr(child, 'qweight') else child.weight
module.to(weight.device)
def _mark_only_adapters_as_trainable(self, model: Module):
for (n, p) in model.named_parameters():
if self.prefix not in n:
p.requires_grad = False
else:
p.requires_grad = True
def _check_target_module_exists(self, peft_config: PeftConfig, key: str) -> bool:
return check_target_module_exists(peft_config, key)
def _set_adapter_layers(self, enabled: bool) -> None:
for module in self.model.modules():
if isinstance(module, (LNTuningLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self) -> None:
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self) -> None:
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name: str) -> None:
for module in self.model.modules():
if isinstance(module, LNTuningLayer):
if module.merged:
warnings.warn('Adapter cannot be set when the model is merged. Unmerging the model first.')
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
def _unload_and_optionally_merge(self, merge=True, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None):
self._unloading_checks(adapter_names)
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
desc = 'Unloading adapters ' + ('and merging ' if merge else '') + 'model'
for key in tqdm(key_list, disable=not progressbar, desc=desc):
try:
(parent, target, target_name) = _get_submodules(self.model, key)
except AttributeError:
continue
if hasattr(target, 'base_layer'):
if merge:
target.merge(adapter_names)
self._replace_module(parent, target_name, target.get_base_layer(), target)
return self.model
def unload(self):
return self._unload_and_optionally_merge(merge=False)
def merge_and_unload(self, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> nn.Module:
return self._unload_and_optionally_merge(merge=True)
# File: peft-main/src/peft/tuners/loha/config.py
from dataclasses import dataclass, field
from typing import List, Optional, Union
from peft.tuners.lycoris_utils import LycorisConfig
from peft.utils import PeftType
@dataclass
class LoHaConfig(LycorisConfig):
r: int = field(default=8, metadata={'help': 'LoHa rank'})
alpha: int = field(default=8, metadata={'help': 'LoHa alpha'})
rank_dropout: float = field(default=0.0, metadata={'help': 'The dropout probability for rank dimension during training'})
module_dropout: float = field(default=0.0, metadata={'help': 'The dropout probability for disabling LoHa modules during training'})
use_effective_conv2d: bool = field(default=False, metadata={'help': 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)'})
target_modules: Optional[Union[List[str], str]] = field(default=None, metadata={'help': "List of module names or regex expression of the module names to replace with LoHa.For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."})
init_weights: bool = field(default=True, metadata={'help': "Whether to initialize the weights of the LoHa layers with their default initialization. Don't change this setting, except if you know exactly what you're doing."})
layers_to_transform: Optional[Union[List[int], int]] = field(default=None, metadata={'help': 'The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index.'})
layers_pattern: Optional[str] = field(default=None, metadata={'help': 'The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern.'})
modules_to_save: Optional[List[str]] = field(default=None, metadata={'help': 'List of modules apart from LoHA layers to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.'})
def __post_init__(self):
self.peft_type = PeftType.LOHA
self.target_modules = set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
# File: peft-main/src/peft/tuners/loha/layer.py
import math
from typing import Any, Set, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from peft.tuners.lycoris_utils import LycorisLayer
class LoHaLayer(nn.Module, LycorisLayer):
adapter_layer_names = ('hada_w1_a', 'hada_w1_b', 'hada_w2_a', 'hada_w2_b', 'hada_t1', 'hada_t2')
def __init__(self, base_layer: nn.Module):
super().__init__()
LycorisLayer.__init__(self, base_layer)
self.hada_w1_a = nn.ParameterDict({})
self.hada_w1_b = nn.ParameterDict({})
self.hada_w2_a = nn.ParameterDict({})
self.hada_w2_b = nn.ParameterDict({})
self.hada_t1 = nn.ParameterDict({})
self.hada_t2 = nn.ParameterDict({})
@property
def _available_adapters(self) -> Set[str]:
return {*self.hada_w1_a, *self.hada_w1_b, *self.hada_w2_a, *self.hada_w2_b, *self.hada_t1, *self.hada_t2}
def create_adapter_parameters(self, adapter_name: str, r: int, shape: Tuple[int, ...]):
if len(shape) == 4:
self.hada_t1[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0]))
self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1]))
self.hada_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0]))
self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1]))
else:
self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r))
self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1]))
self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r))
self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1]))
def reset_adapter_parameters(self, adapter_name: str):
if adapter_name in self.hada_w1_a.keys():
nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5))
nn.init.zeros_(self.hada_w2_b[adapter_name])
if adapter_name in self.hada_t1.keys():
nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5))
def reset_adapter_parameters_random(self, adapter_name: str):
if adapter_name in self.hada_w1_a.keys():
nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.hada_w2_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.hada_t1.keys():
nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5))
def update_layer(self, adapter_name: str, r: int, alpha: float, rank_dropout: float, module_dropout: float, init_weights: bool, use_effective_conv2d: bool=False, **kwargs) -> None:
if r <= 0:
raise ValueError(f'`r` should be a positive integer value but the value passed is {r}')
self.r[adapter_name] = r
self.alpha[adapter_name] = alpha
self.scaling[adapter_name] = alpha / r
self.rank_dropout[adapter_name] = rank_dropout
self.module_dropout[adapter_name] = module_dropout
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
shape = tuple(base_layer.weight.shape)
elif isinstance(base_layer, nn.Conv2d):
use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1)
if use_effective_conv2d:
shape = (base_layer.out_channels, base_layer.in_channels, *base_layer.kernel_size)
else:
shape = (base_layer.out_channels, base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1])
else:
raise TypeError(f'LoHa is not implemented for base layers of type {type(base_layer).__name__}')
self.create_adapter_parameters(adapter_name, r, shape)
if init_weights:
self.reset_adapter_parameters(adapter_name)
else:
self.reset_adapter_parameters_random(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
if adapter_name in self.hada_t1.keys():
weight = make_weight_cp(self.hada_t1[adapter_name], self.hada_w1_a[adapter_name], self.hada_w1_b[adapter_name], self.hada_t2[adapter_name], self.hada_w2_a[adapter_name], self.hada_w2_b[adapter_name], scale=torch.tensor(self.scaling[adapter_name]))
else:
weight = make_weight(self.hada_w1_a[adapter_name], self.hada_w1_b[adapter_name], self.hada_w2_a[adapter_name], self.hada_w2_b[adapter_name], scale=torch.tensor(self.scaling[adapter_name]))
base_layer = self.get_base_layer()
weight = weight.reshape(base_layer.weight.shape)
rank_dropout = self.rank_dropout[adapter_name]
if self.training and rank_dropout:
drop = (torch.rand(weight.size(0)) > rank_dropout).to(weight.dtype)
drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device)
drop /= drop.mean()
weight *= drop
return weight
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self._available_adapters:
continue
module_dropout = self.module_dropout[active_adapter]
if not self.training or (self.training and torch.rand(1) > module_dropout):
result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs)
result = result.to(previous_dtype)
return result
class Linear(LoHaLayer):
def __init__(self, base_layer: nn.Module, adapter_name: str='default', r: int=0, alpha: float=0.0, rank_dropout: float=0.0, module_dropout: float=0.0, init_weights: bool=True, **kwargs):
super().__init__(base_layer)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs)
def _get_delta_activations(self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
delta_weight = self.get_delta_weight(adapter_name)
return F.linear(input, delta_weight)
def __repr__(self) -> str:
rep = super().__repr__()
return 'loha.' + rep
class Conv2d(LoHaLayer):
def __init__(self, base_layer: nn.Module, adapter_name: str='default', r: int=0, alpha: float=0.0, rank_dropout: float=0.0, module_dropout: float=0.0, use_effective_conv2d: bool=False, init_weights: bool=True, **kwargs):
super().__init__(base_layer)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs)
def _get_delta_activations(self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
delta_weight = self.get_delta_weight(adapter_name)
base_layer = self.get_base_layer()
return F.conv2d(input, delta_weight, stride=base_layer.stride, padding=base_layer.padding, dilation=base_layer.dilation, groups=base_layer.groups)
def __repr__(self) -> str:
rep = super().__repr__()
return 'loha.' + rep
class HadaWeight(torch.autograd.Function):
@staticmethod
def forward(ctx, w1a, w1b, w2a, w2b, scale=torch.tensor(1)):
ctx.save_for_backward(w1a, w1b, w2a, w2b, scale)
diff_weight = w1a @ w1b * (w2a @ w2b) * scale
return diff_weight
@staticmethod
def backward(ctx, grad_out):
(w1a, w1b, w2a, w2b, scale) = ctx.saved_tensors
grad_out = grad_out * scale
temp = grad_out * (w2a @ w2b)
grad_w1a = temp @ w1b.T
grad_w1b = w1a.T @ temp
temp = grad_out * (w1a @ w1b)
grad_w2a = temp @ w2b.T
grad_w2b = w2a.T @ temp
del temp
return (grad_w1a, grad_w1b, grad_w2a, grad_w2b, None)
class HadaWeightCP(torch.autograd.Function):
@staticmethod
def forward(ctx, t1, w1a, w1b, t2, w2a, w2b, scale=torch.tensor(1)):
ctx.save_for_backward(t1, w1a, w1b, t2, w2a, w2b, scale)
rebuild1 = torch.einsum('i j k l, j r, i p -> p r k l', t1, w1b, w1a)
rebuild2 = torch.einsum('i j k l, j r, i p -> p r k l', t2, w2b, w2a)
return rebuild1 * rebuild2 * scale
@staticmethod
def backward(ctx, grad_out):
(t1, w1a, w1b, t2, w2a, w2b, scale) = ctx.saved_tensors
grad_out = grad_out * scale
temp = torch.einsum('i j k l, j r -> i r k l', t2, w2b)
rebuild = torch.einsum('i j k l, i r -> r j k l', temp, w2a)
grad_w = rebuild * grad_out
del rebuild
grad_w1a = torch.einsum('r j k l, i j k l -> r i', temp, grad_w)
grad_temp = torch.einsum('i j k l, i r -> r j k l', grad_w, w1a.T)
del grad_w, temp
grad_w1b = torch.einsum('i r k l, i j k l -> r j', t1, grad_temp)
grad_t1 = torch.einsum('i j k l, j r -> i r k l', grad_temp, w1b.T)
del grad_temp
temp = torch.einsum('i j k l, j r -> i r k l', t1, w1b)
rebuild = torch.einsum('i j k l, i r -> r j k l', temp, w1a)
grad_w = rebuild * grad_out
del rebuild
grad_w2a = torch.einsum('r j k l, i j k l -> r i', temp, grad_w)
grad_temp = torch.einsum('i j k l, i r -> r j k l', grad_w, w2a.T)
del grad_w, temp
grad_w2b = torch.einsum('i r k l, i j k l -> r j', t2, grad_temp)
grad_t2 = torch.einsum('i j k l, j r -> i r k l', grad_temp, w2b.T)
del grad_temp
return (grad_t1, grad_w1a, grad_w1b, grad_t2, grad_w2a, grad_w2b, None)
def make_weight(w1a, w1b, w2a, w2b, scale):
return HadaWeight.apply(w1a, w1b, w2a, w2b, scale)
def make_weight_cp(t1, w1a, w1b, t2, w2a, w2b, scale):
return HadaWeightCP.apply(t1, w1a, w1b, t2, w2a, w2b, scale)
# File: peft-main/src/peft/tuners/loha/model.py
import re
from itertools import chain
from typing import Dict, Type, Union
import torch
from torch import nn
from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner
from .layer import Conv2d, Linear, LoHaLayer
class LoHaModel(LycorisTuner):
prefix: str = 'hada_'
layers_mapping: Dict[Type[torch.nn.Module], Type[LoHaLayer]] = {torch.nn.Conv2d: Conv2d, torch.nn.Linear: Linear}
def _create_and_replace(self, config: LycorisConfig, adapter_name: str, target: Union[LoHaLayer, nn.Module], target_name: str, parent: nn.Module, current_key: str) -> None:
pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys()))
target_name_key = next(filter(lambda key: re.match(f'(.*\\.)?{key}$', current_key), pattern_keys), target_name)
kwargs = config.to_dict()
kwargs['r'] = config.rank_pattern.get(target_name_key, config.r)
kwargs['alpha'] = config.alpha_pattern.get(target_name_key, config.alpha)
if isinstance(target, LoHaLayer):
target.update_layer(adapter_name, **kwargs)
else:
new_module = self._create_new_module(config, adapter_name, target, **kwargs)
self._replace_module(parent, target_name, new_module, target)
# File: peft-main/src/peft/tuners/lokr/config.py
from dataclasses import dataclass, field
from typing import List, Optional, Union
from peft.tuners.lycoris_utils import LycorisConfig
from peft.utils import PeftType
@dataclass
class LoKrConfig(LycorisConfig):
r: int = field(default=8, metadata={'help': 'LoKr rank'})
alpha: int = field(default=8, metadata={'help': 'LoKr alpha'})
rank_dropout: float = field(default=0.0, metadata={'help': 'The dropout probability for rank dimension during training'})
module_dropout: float = field(default=0.0, metadata={'help': 'The dropout probability for disabling LoKr modules during training'})
use_effective_conv2d: bool = field(default=False, metadata={'help': 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)'})
decompose_both: bool = field(default=False, metadata={'help': 'Perform rank decomposition of left kronecker product matrix.'})
decompose_factor: int = field(default=-1, metadata={'help': 'Kronecker product decomposition factor.'})
target_modules: Optional[Union[List[str], str]] = field(default=None, metadata={'help': "List of module names or regex expression of the module names to replace with LoKr.For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."})
init_weights: bool = field(default=True, metadata={'help': "Whether to initialize the weights of the LoKr layers with their default initialization. Don't change this setting, except if you know exactly what you're doing."})
layers_to_transform: Optional[Union[List[int], int]] = field(default=None, metadata={'help': 'The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index.'})
layers_pattern: Optional[str] = field(default=None, metadata={'help': 'The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern.'})
modules_to_save: Optional[List[str]] = field(default=None, metadata={'help': 'List of modules apart from LoKr layers to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.'})
def __post_init__(self):
self.peft_type = PeftType.LOKR
# File: peft-main/src/peft/tuners/lokr/layer.py
import math
from typing import Any, Optional, Set, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from peft.tuners.lycoris_utils import LycorisLayer
class LoKrLayer(nn.Module, LycorisLayer):
adapter_layer_names = ('lokr_w1', 'lokr_w1_a', 'lokr_w1_b', 'lokr_w2', 'lokr_w2_a', 'lokr_w2_b', 'lokr_t2')
def __init__(self, base_layer: nn.Module) -> None:
super().__init__()
LycorisLayer.__init__(self, base_layer)
self.lokr_w1 = nn.ParameterDict({})
self.lokr_w1_a = nn.ParameterDict({})
self.lokr_w1_b = nn.ParameterDict({})
self.lokr_w2 = nn.ParameterDict({})
self.lokr_w2_a = nn.ParameterDict({})
self.lokr_w2_b = nn.ParameterDict({})
self.lokr_t2 = nn.ParameterDict({})
@property
def _available_adapters(self) -> Set[str]:
return {*self.lokr_w1, *self.lokr_w1_a, *self.lokr_w1_b, *self.lokr_w2, *self.lokr_w2_a, *self.lokr_w2_b, *self.lokr_t2}
def create_adapter_parameters(self, adapter_name: str, r: int, shape, use_w1: bool, use_w2: bool, use_effective_conv2d: bool):
if use_w1:
self.lokr_w1[adapter_name] = nn.Parameter(torch.empty(shape[0][0], shape[1][0]))
else:
self.lokr_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0][0], r))
self.lokr_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][0]))
if len(shape) == 4:
if use_w2:
self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1], *shape[2:]))
elif use_effective_conv2d:
self.lokr_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0][1]))
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1]))
else:
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1] * shape[2] * shape[3]))
elif use_w2:
self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1]))
else:
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1]))
def reset_adapter_parameters(self, adapter_name: str):
if adapter_name in self.lokr_w1:
nn.init.zeros_(self.lokr_w1[adapter_name])
else:
nn.init.zeros_(self.lokr_w1_a[adapter_name])
nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_w2:
nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
else:
nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_t2:
nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
def reset_adapter_parameters_random(self, adapter_name: str):
if adapter_name in self.lokr_w1:
nn.init.kaiming_uniform_(self.lokr_w1[adapter_name], a=math.sqrt(5))
else:
nn.init.kaiming_uniform_(self.lokr_w1_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_w2:
nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
else:
nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_t2:
nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
def update_layer(self, adapter_name: str, r: int, alpha: float, rank_dropout: float, module_dropout: float, init_weights: bool, use_effective_conv2d: bool, decompose_both: bool, decompose_factor: int, **kwargs) -> None:
if r <= 0:
raise ValueError(f'`r` should be a positive integer value but the value passed is {r}')
self.r[adapter_name] = r
self.alpha[adapter_name] = alpha
self.scaling[adapter_name] = alpha / r
self.rank_dropout[adapter_name] = rank_dropout
self.module_dropout[adapter_name] = module_dropout
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
(in_dim, out_dim) = (base_layer.in_features, base_layer.out_features)
(in_m, in_n) = factorization(in_dim, decompose_factor)
(out_l, out_k) = factorization(out_dim, decompose_factor)
shape = ((out_l, out_k), (in_m, in_n))
use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
use_w2 = not r < max(shape[0][1], shape[1][1]) / 2
use_effective_conv2d = False
elif isinstance(base_layer, nn.Conv2d):
(in_dim, out_dim) = (base_layer.in_channels, base_layer.out_channels)
k_size = base_layer.kernel_size
(in_m, in_n) = factorization(in_dim, decompose_factor)
(out_l, out_k) = factorization(out_dim, decompose_factor)
shape = ((out_l, out_k), (in_m, in_n), *k_size)
use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
use_w2 = r >= max(shape[0][1], shape[1][1]) / 2
use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1)
else:
raise TypeError(f'LoKr is not implemented for base layers of type {type(base_layer).__name__}')
self.create_adapter_parameters(adapter_name, r, shape, use_w1, use_w2, use_effective_conv2d)
if init_weights:
self.reset_adapter_parameters(adapter_name)
else:
self.reset_adapter_parameters_random(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
if adapter_name in self.lokr_w1:
w1 = self.lokr_w1[adapter_name]
else:
w1 = self.lokr_w1_a[adapter_name] @ self.lokr_w1_b[adapter_name]
if adapter_name in self.lokr_w2:
w2 = self.lokr_w2[adapter_name]
elif adapter_name in self.lokr_t2:
w2 = make_weight_cp(self.lokr_t2[adapter_name], self.lokr_w2_a[adapter_name], self.lokr_w2_b[adapter_name])
else:
w2 = self.lokr_w2_a[adapter_name] @ self.lokr_w2_b[adapter_name]
weight = make_kron(w1, w2)
weight = weight.reshape(self.get_base_layer().weight.shape)
rank_dropout = self.rank_dropout[adapter_name]
if self.training and rank_dropout:
drop = (torch.rand(weight.size(0)) > rank_dropout).float()
drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device)
drop /= drop.mean()
weight *= drop
return weight
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self._available_adapters:
continue
module_dropout = self.module_dropout[active_adapter]
if not self.training or (self.training and torch.rand(1) > module_dropout):
result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs)
result = result.to(previous_dtype)
return result
class Linear(LoKrLayer):
def __init__(self, base_layer: nn.Module, device: Optional[Union[str, torch.device]]=None, dtype: Optional[torch.dtype]=None, adapter_name: str='default', r: int=0, alpha: float=0.0, rank_dropout: float=0.0, module_dropout: float=0.0, init_weights: bool=True, **kwargs):
super().__init__(base_layer)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs)
def _get_delta_activations(self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
delta_weight = self.get_delta_weight(adapter_name)
return F.linear(input, delta_weight)
def __repr__(self) -> str:
rep = super().__repr__()
return 'lokr.' + rep
class Conv2d(LoKrLayer):
def __init__(self, base_layer: nn.Module, device: Optional[Union[str, torch.device]]=None, dtype: Optional[torch.dtype]=None, adapter_name: str='default', r: int=0, alpha: float=0.0, rank_dropout: float=0.0, module_dropout: float=0.0, use_effective_conv2d: bool=False, init_weights: bool=True, **kwargs):
super().__init__(base_layer)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs)
def _get_delta_activations(self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
delta_weight = self.get_delta_weight(adapter_name)
base_layer = self.get_base_layer()
return F.conv2d(input, delta_weight, stride=base_layer.stride, padding=base_layer.padding, dilation=base_layer.dilation, groups=base_layer.groups)
def __repr__(self) -> str:
rep = super().__repr__()
return 'lokr.' + rep
def factorization(dimension: int, factor: int=-1) -> Tuple[int, int]:
if factor > 0 and dimension % factor == 0:
m = factor
n = dimension // factor
return (m, n)
if factor == -1:
factor = dimension
(m, n) = (1, dimension)
length = m + n
while m < n:
new_m = m + 1
while dimension % new_m != 0:
new_m += 1
new_n = dimension // new_m
if new_m + new_n > length or new_m > factor:
break
else:
(m, n) = (new_m, new_n)
if m > n:
(n, m) = (m, n)
return (m, n)
def make_weight_cp(t, wa, wb):
rebuild2 = torch.einsum('i j k l, i p, j r -> p r k l', t, wa, wb)
return rebuild2
def make_kron(w1, w2, scale=1.0):
if len(w2.shape) == 4:
w1 = w1.unsqueeze(2).unsqueeze(2)
w2 = w2.contiguous()
rebuild = torch.kron(w1, w2)
return rebuild * scale
# File: peft-main/src/peft/tuners/lokr/model.py
import re
from itertools import chain
from typing import Dict, Type, Union
import torch
from torch import nn
from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner
from .layer import Conv2d, Linear, LoKrLayer
class LoKrModel(LycorisTuner):
prefix: str = 'lokr_'
layers_mapping: Dict[Type[torch.nn.Module], Type[LoKrLayer]] = {torch.nn.Conv2d: Conv2d, torch.nn.Linear: Linear}
def _create_and_replace(self, config: LycorisConfig, adapter_name: str, target: Union[LoKrLayer, nn.Module], target_name: str, parent: nn.Module, current_key: str) -> None:
pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys()))
target_name_key = next(filter(lambda key: re.match(f'(.*\\.)?{key}$', current_key), pattern_keys), target_name)
kwargs = config.to_dict()
kwargs['r'] = config.rank_pattern.get(target_name_key, config.r)
kwargs['alpha'] = config.alpha_pattern.get(target_name_key, config.alpha)
if isinstance(target, LoKrLayer):
target.update_layer(adapter_name, **kwargs)
else:
new_module = self._create_new_module(config, adapter_name, target, **kwargs)
self._replace_module(parent, target_name, new_module, target)
# File: peft-main/src/peft/tuners/lora/__init__.py
from peft.import_utils import is_bnb_4bit_available, is_bnb_available, is_eetq_available
from .config import LoftQConfig, LoraConfig, LoraRuntimeConfig
from .gptq import QuantLinear
from .layer import Conv2d, Embedding, Linear, LoraLayer
from .model import LoraModel
__all__ = ['LoraConfig', 'LoraRuntimeConfig', 'LoftQConfig', 'Conv2d', 'Embedding', 'LoraLayer', 'Linear', 'LoraModel', 'QuantLinear']
def __getattr__(name):
if name == 'Linear8bitLt' and is_bnb_available():
from .bnb import Linear8bitLt
return Linear8bitLt
if name == 'Linear4bit' and is_bnb_4bit_available():
from .bnb import Linear4bit
return Linear4bit
if name == 'EetqLoraLinear' and is_eetq_available():
from .eetq import EetqLoraLinear
return EetqLoraLinear
raise AttributeError(f'module {__name__} has no attribute {name}')
# File: peft-main/src/peft/tuners/lora/aqlm.py
from typing import Any, Optional
import torch
from peft.import_utils import is_aqlm_available
from peft.tuners.lora.layer import LoraLayer
from peft.tuners.tuners_utils import BaseTunerLayer
if is_aqlm_available():
from aqlm import QuantizedLinear
class AqlmLoraLinear(torch.nn.Module, LoraLayer):
def __init__(self, base_layer, adapter_name: str, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, init_lora_weights: bool=True, use_rslora: bool=False, **kwargs):
super().__init__()
LoraLayer.__init__(self, base_layer)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora)
def forward(self, x: torch.Tensor):
result = self.base_layer(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = x.to(lora_A.weight.dtype)
output = lora_B(lora_A(dropout(x)))
if requires_conversion:
output = output.to(expected_dtype)
output = output * scaling
result += output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.' + rep
def dispatch_aqlm(target: torch.nn.Module, adapter_name: str, **kwargs: Any) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_aqlm_available() and isinstance(target_base_layer, QuantizedLinear):
new_module = AqlmLoraLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.codes
return new_module
# File: peft-main/src/peft/tuners/lora/awq.py
import importlib.metadata as importlib_metadata
from typing import Any, Optional
import packaging.version
import torch
from peft.import_utils import is_auto_awq_available
from peft.tuners.lora.layer import LoraLayer
from peft.tuners.tuners_utils import BaseTunerLayer
if is_auto_awq_available():
from awq.modules.linear import WQLinear_GEMM
class AwqLoraLinear(torch.nn.Module, LoraLayer):
def __init__(self, base_layer, adapter_name, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, init_lora_weights: bool=True, use_rslora: bool=False, **kwargs):
super().__init__()
LoraLayer.__init__(self, base_layer)
self.quant_linear_module = base_layer
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora)
def forward(self, x: torch.Tensor):
result = self.quant_linear_module(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = x.to(lora_A.weight.dtype)
output = lora_B(lora_A(dropout(x)))
if requires_conversion:
output = output.to(expected_dtype)
output = output * scaling
result = result + output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.' + rep
def dispatch_awq(target: torch.nn.Module, adapter_name: str, **kwargs: Any) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_auto_awq_available() and isinstance(target_base_layer, WQLinear_GEMM):
AUTOAWQ_MINIMUM_VERSION = packaging.version.parse('0.2.0')
version_autoawq = packaging.version.parse(importlib_metadata.version('autoawq'))
if AUTOAWQ_MINIMUM_VERSION > version_autoawq:
raise ImportError(f'Found an incompatible version of auto-awq. Found version {version_autoawq}, but only versions above {AUTOAWQ_MINIMUM_VERSION} are supported for PEFT.')
new_module = AwqLoraLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.qweight
return new_module
# File: peft-main/src/peft/tuners/lora/bnb.py
from __future__ import annotations
import warnings
from typing import Any, Optional
import bitsandbytes as bnb
import torch
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils.integrations import dequantize_bnb_weight
from peft.utils.other import transpose
from .layer import LoraLayer
if is_bnb_available():
class Linear8bitLt(torch.nn.Module, LoraLayer):
def __init__(self, base_layer: torch.nn.Module, adapter_name: str, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, init_lora_weights: bool=True, use_rslora: bool=False, use_dora: bool=False, **kwargs) -> None:
super().__init__()
LoraLayer.__init__(self, base_layer)
self.fan_in_fan_out = False
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora)
def merge(self, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter not in self.lora_A.keys():
continue
warnings.warn('Merge lora module to 8-bit linear may get different generations due to rounding errors.')
lora_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
state = self.get_base_layer().state
if state.SCB is None:
state.SCB = weight.SCB
output = dequantize_bnb_weight(weight, state=state)
if not self.use_dora[active_adapter]:
w_data = output.to(lora_data.dtype).to(lora_data.device) + lora_data
else:
weight_norm = self.lora_magnitude_vector[active_adapter].get_weight_norm(output, lora_data, scaling=1).detach()
self._cache_store(f'{active_adapter}-weight_norm', weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
w_data = dora_factor.view(-1, 1) * (output + lora_data)
if safe_merge and (not torch.isfinite(w_data).all()):
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
self.get_base_layer().weight = bnb.nn.Int8Params(w_data.to('cpu'), requires_grad=False, has_fp16_weights=weight.has_fp16_weights).to(weight.device)
state.reset_grads()
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.lora_A.keys():
continue
warnings.warn('Unmerge lora module to 8-bit linear may get different generations due to rounding errors.')
lora_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
state = self.get_base_layer().state
if state.SCB is None:
state.SCB = weight.SCB
output = dequantize_bnb_weight(weight, state=state)
if not self.use_dora[active_adapter]:
w_data = output.to(lora_data.dtype).to(lora_data.device) - lora_data
else:
weight_norm = self._cache_pop(f'{active_adapter}-weight_norm')
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
w_data = output.data / dora_factor.view(-1, 1) - lora_data
self.get_base_layer().weight = bnb.nn.Int8Params(w_data.to('cpu'), requires_grad=False, has_fp16_weights=weight.has_fp16_weights).to(weight.device)
state.reset_grads()
def get_delta_weight(self, adapter):
return transpose(self.lora_B[adapter].weight @ self.lora_A[adapter].weight, False) * self.scaling[adapter]
def _mixed_batch_forward(self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any) -> torch.Tensor:
result = self.base_layer(x, *args, **kwargs)
unique_adapters = set(adapter_names)
sub_batch_indices_list = []
for adapter in unique_adapters:
sub_batch_indices_list.append([index for (index, item) in enumerate(adapter_names) if item == adapter])
for (i, active_adapter) in enumerate(unique_adapters):
if active_adapter == '__base__':
continue
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
compute_dtype = lora_A.weight.dtype
if x.dtype != compute_dtype:
x = x.to(compute_dtype)
sub_batch = x[sub_batch_indices_list[i]]
output = lora_B(lora_A(dropout(sub_batch))) * scaling
if requires_conversion:
output = output.to(expected_dtype)
result[sub_batch_indices_list[i]] += output
return result
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop('adapter_names', None)
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
compute_dtype = lora_A.weight.dtype
if x.dtype != compute_dtype:
x = x.to(compute_dtype)
if not self.use_dora[active_adapter]:
output = lora_B(lora_A(dropout(x))) * scaling
else:
x = dropout(x)
output = self.lora_magnitude_vector[active_adapter](x, lora_A=lora_A, lora_B=lora_B, scaling=scaling, base_layer=self.get_base_layer())
if requires_conversion:
output = output.to(expected_dtype)
result = result + output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.' + rep
def dispatch_bnb_8bit(target: torch.nn.Module, adapter_name: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
loaded_in_8bit = kwargs.get('loaded_in_8bit', False)
if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
eightbit_kwargs = kwargs.copy()
eightbit_kwargs.update({'has_fp16_weights': target.state.has_fp16_weights, 'memory_efficient_backward': target.state.memory_efficient_backward, 'threshold': target.state.threshold, 'index': target.index})
new_module = Linear8bitLt(target, adapter_name, **eightbit_kwargs)
return new_module
if is_bnb_4bit_available():
class Linear4bit(torch.nn.Module, LoraLayer):
def __init__(self, base_layer: torch.nn.Module, adapter_name: str, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, init_lora_weights: bool=True, use_rslora: bool=False, use_dora: bool=False, **kwargs) -> None:
super().__init__()
LoraLayer.__init__(self, base_layer)
self.fan_in_fan_out = False
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora)
def merge(self, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter not in self.lora_A.keys():
continue
warnings.warn('Merge lora module to 4-bit linear may get different generations due to rounding errors.')
weight = self.get_base_layer().weight
kwargs = weight.__dict__
lora_data = self.get_delta_weight(active_adapter)
output = dequantize_bnb_weight(weight, state=weight.quant_state)
if not self.use_dora[active_adapter]:
w_data = output + lora_data
else:
weight_norm = self.lora_magnitude_vector[active_adapter].get_weight_norm(output, lora_data, scaling=1).detach()
self._cache_store(f'{active_adapter}-weight_norm', weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
w_data = dora_factor.view(-1, 1) * (output + lora_data)
if safe_merge and (not torch.isfinite(w_data).all()):
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
if 'bnb_quantized' in kwargs:
kwargs['bnb_quantized'] = False
kwargs['requires_grad'] = False
kwargs.pop('data', None)
self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to('cpu'), **kwargs).to(weight.device)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.lora_A.keys():
continue
warnings.warn('Unmerge lora module to 4-bit linear may get different generations due to rounding errors.')
lora_data = self.get_delta_weight(active_adapter)
weight = self.get_base_layer().weight
kwargs = weight.__dict__
output = dequantize_bnb_weight(weight, state=weight.quant_state)
if not self.use_dora[active_adapter]:
w_data = output - lora_data
else:
weight_norm = self._cache_pop(f'{active_adapter}-weight_norm')
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
w_data = output.data / dora_factor.view(-1, 1) - lora_data
if 'bnb_quantized' in kwargs:
kwargs['bnb_quantized'] = False
kwargs['requires_grad'] = False
kwargs.pop('data', None)
self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to('cpu'), **kwargs).to(weight.device)
def get_delta_weight(self, adapter):
return transpose(self.lora_B[adapter].weight @ self.lora_A[adapter].weight, False) * self.scaling[adapter]
def _mixed_batch_forward(self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any) -> torch.Tensor:
result = self.base_layer(x, *args, **kwargs)
unique_adapters = set(adapter_names)
sub_batch_indices_list = []
for adapter in unique_adapters:
sub_batch_indices_list.append([index for (index, item) in enumerate(adapter_names) if item == adapter])
for (i, active_adapter) in enumerate(unique_adapters):
if active_adapter == '__base__':
continue
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = x.to(lora_A.weight.dtype)
sub_batch = x[sub_batch_indices_list[i]]
output = lora_B(lora_A(dropout(sub_batch))) * scaling
if requires_conversion:
output = output.to(expected_dtype)
result[sub_batch_indices_list[i]] += output
return result
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop('adapter_names', None)
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
result = result.clone()
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = x.to(lora_A.weight.dtype)
if not self.use_dora[active_adapter]:
output = lora_B(lora_A(dropout(x))) * scaling
else:
x = dropout(x)
output = self.lora_magnitude_vector[active_adapter](x, lora_A=lora_A, lora_B=lora_B, scaling=scaling, base_layer=self.get_base_layer())
if requires_conversion:
output = output.to(expected_dtype)
result = result + output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.' + rep
def dispatch_bnb_4bit(target: torch.nn.Module, adapter_name: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
loaded_in_4bit = kwargs.get('loaded_in_4bit', False)
if loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit):
fourbit_kwargs = kwargs.copy()
fourbit_kwargs.update({'compute_dtype': target_base_layer.compute_dtype, 'compress_statistics': target_base_layer.weight.compress_statistics, 'quant_type': target_base_layer.weight.quant_type})
new_module = Linear4bit(target, adapter_name, **fourbit_kwargs)
return new_module
# File: peft-main/src/peft/tuners/lora/config.py
from __future__ import annotations
import warnings
from dataclasses import dataclass, field
from typing import Literal, Optional, Union
from torch import nn
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class LoraRuntimeConfig:
ephemeral_gpu_offload: bool = field(default=False, metadata={'help': 'Whether to use ephemeral GPU offloading for models partially kept in CPU memory. Ephemeral GPU offloading result in the data involved in intense operations being momentarily copied over to the GPU, and the results copied back to CPU. There is a momentary VRAM overhead, but operations are generally orders of magnitude faster compared to performing them on the CPU. This is useful when parts of the model and/or components (such as adapters) are kept in CPU memory until they are needed. Rather than perform expensive operations on small data, the data is transferred to the GPU on-demand, the operation(s) performed, and the results moved back to CPU memory. Currently only affects DoRA initialization.'})
@dataclass
class LoftQConfig:
loftq_bits: int = field(default=4, metadata={'help': 'Quantization bits for LoftQ'})
loftq_iter: int = field(default=1, metadata={'help': 'Alternating iterations for LoftQ'})
@dataclass
class LoraConfig(PeftConfig):
r: int = field(default=8, metadata={'help': 'Lora attention dimension'})
target_modules: Optional[Union[list[str], str]] = field(default=None, metadata={'help': "List of module names or regex expression of the module names to replace with LoRA.For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'.This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer.If not specified, modules will be chosen according to the model architecture, If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually."})
lora_alpha: int = field(default=8, metadata={'help': 'Lora alpha'})
lora_dropout: float = field(default=0.0, metadata={'help': 'Lora dropout'})
fan_in_fan_out: bool = field(default=False, metadata={'help': 'Set this to True if the layer to replace stores weight like (fan_in, fan_out)'})
bias: Literal['none', 'all', 'lora_only'] = field(default='none', metadata={'help': "Bias type for Lora. Can be 'none', 'all' or 'lora_only'"})
use_rslora: bool = field(default=False, metadata={'help': "When set to True, uses Rank-Stabilized LoRA which sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it was proven to work better. Otherwise, it will use the original default value of `lora_alpha/r`."})
modules_to_save: Optional[list[str]] = field(default=None, metadata={'help': 'List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.'})
init_lora_weights: bool | Literal['gaussian', 'olora', 'pissa', 'pissa_niter_[number of iters]', 'loftq'] = field(default=True, metadata={'help': "How to initialize the weights of the LoRA layers. Passing `'True'` (default) results in the default initialization from the reference implementation from Microsoft. Passing `'gaussian'` results in Gaussian initialization scaled by the LoRA rank for linear and layers. Setting the initialization to `'False'` leads to completely random initialization and *is discouraged.*Passing `'olora'` results in OLoRA initialization.Passing `'pissa'` results in PiSSA initialization.Passing `'pissa_niter_[number of iters]'` initiates Fast-SVD-based PiSSA initialization, where [number of iters] indicates the number of subspace iterations to perform fsvd, and must be a nonnegative integer.Pass `'loftq'` to use LoftQ initialization"})
layers_to_transform: Optional[Union[list[int], int]] = field(default=None, metadata={'help': 'The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. This only works when target_modules is a list of str.'})
layers_pattern: Optional[Union[list[str], str]] = field(default=None, metadata={'help': 'The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern.This only works when target_modules is a list of str.'})
rank_pattern: Optional[dict] = field(default_factory=dict, metadata={'help': 'The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}'})
alpha_pattern: Optional[dict] = field(default_factory=dict, metadata={'help': 'The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}'})
megatron_config: Optional[dict] = field(default=None, metadata={'help': "The TransformerConfig from Megatron. It is used to create LoRA's parallel linear layer.You can get it like this, `core_transformer_config_from_args(get_args())`, these two functions being from Megatron.You need to specify this parameter when you want to apply LoRA to the ColumnParallelLinear and RowParallelLinear layers of megatron.It should be noted that we may not be able to use the `save_pretrained` and `from_pretrained` functions, because TransformerConfig may not necessarily be serialized.But when using megatron, we can use `get_peft_model_state_dict` function and megatron's framework, they can also save and load models and configurations."})
megatron_core: Optional[str] = field(default='megatron.core', metadata={'help': "The core module from Megatron, it is used to create LoRA's parallel linear layer. It only needs to be passed in when you need to use your own modified megatron core module. Otherwise, it will use the default value `megatron.core`. "})
loftq_config: Union[LoftQConfig, dict] = field(default_factory=dict, metadata={'help': "The configuration of LoftQ. If this is passed, then LoftQ will be used to quantize the backbone weights and initialize Lora layers. Also set `init_lora_weights='loftq'` in this case."})
use_dora: bool = field(default=False, metadata={'help': "Enable 'Weight-Decomposed Low-Rank Adaptation' (DoRA). This technique decomposes the updates of the weights into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the magnitude is handled by a separate learnable parameter. This can improve the performance of LoRA, especially at low ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a biggeroverhead than pure LoRA, so it is recommended to merge weights for inference."})
layer_replication: Optional[list[tuple[int, int]]] = field(default=None, metadata={'help': 'This enables using LoRA to effectively expand a transformer model to a larger size by repeating some layers. The transformation handles models (currently Llama, Bert or Falcon compatible architectures) with a module list in the model which it modifies to expand the number of modules. Base weights are shared so the memory usage is close to the original model. The intended use is these base weights remain fixed during finetuning but each layer has a separate LoRA adapter so the layers can be specialed via the adapter layers fit during fine tuning.The format is a list of [start, end) pairs which specify the layer ranges to stack. For example:\n Original model has 5 layers labelled by their position in the model: `[0, 1, 2, 3, 4]`\n layer_replication: `[[0, 4], [2, 5]]`\n Final model will have this arrangement of original layers: `[0, 1, 2, 3, 2, 3, 4]`\nThis format is based on what is used for pass-through merges in mergekit. It makes it simple to select sequential ranges of a model and stack them while reusing layers at either end of each sequence.'})
runtime_config: LoraRuntimeConfig = field(default_factory=LoraRuntimeConfig, metadata={'help': 'Runtime configurations'})
def to_dict(self):
rv = super().to_dict()
rv.pop('runtime_config')
return rv
def __post_init__(self):
self.peft_type = PeftType.LORA
self.target_modules = set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
if isinstance(self.target_modules, str) and self.layers_to_transform is not None:
raise ValueError('`layers_to_transform` cannot be used when `target_modules` is a str.')
if isinstance(self.target_modules, str) and self.layers_pattern is not None:
raise ValueError('`layers_pattern` cannot be used when `target_modules` is a str.')
if self.use_dora and self.megatron_config:
raise ValueError('DoRA does not support megatron_core, please set `use_dora=False`.')
if self.init_lora_weights == 'loftq':
import importlib
if not importlib.util.find_spec('scipy'):
raise ImportError("The required package 'scipy' is not installed. Please install it to continue.")
if self.loftq_config is None:
raise ValueError("`loftq_config` must be specified when `init_lora_weights` is 'loftq'.")
if self.use_rslora and (self.rank_pattern or self.alpha_pattern) and (isinstance(self.init_lora_weights, str) and self.init_lora_weights.startswith('pissa') or self.init_lora_weights == 'olora'):
msg = "Using Rank-Stabilized LoRA with rank_pattern/alpha_pattern and post-training conversion of modified base weights (PiSSA, OLoRA) means that you won't be able to pass `path_initial_model_for_weight_conversion` to `save_pretrained` to restore the initial values of the base weights; if you intend to do this, please ensure not to use rslora or rank_pattern/alpha_pattern."
warnings.warn(msg)
if self.loftq_config and (not isinstance(self.loftq_config, dict)):
self.loftq_config = vars(self.loftq_config)
self._custom_modules: Optional[dict[type[nn.Mmodule], type[nn.Module]]] = None
def _register_custom_module(self, mapping: dict[type[nn.Mmodule], type[nn.Module]]) -> None:
if self._custom_modules is None:
self._custom_modules = {}
self._custom_modules.update(mapping)
# File: peft-main/src/peft/tuners/lora/dora.py
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch import nn
from peft.utils.integrations import dequantize_module_weight, gather_params_ctx
from peft.utils.other import transpose
class DoraLinearLayer(nn.Module):
def __init__(self, fan_in_fan_out):
super().__init__()
self.fan_in_fan_out = fan_in_fan_out
def get_weight_norm(self, weight, lora_weight, scaling) -> torch.Tensor:
weight = transpose(weight, self.fan_in_fan_out)
weight = weight + scaling * lora_weight
weight_norm = torch.linalg.norm(weight, dim=1).to(weight.dtype)
return weight_norm
def update_layer(self, *, base_layer, lora_A, lora_B, scaling, place_on_cpu=False) -> None:
dtype_is_fp16 = lora_A.dtype == torch.float16
if dtype_is_fp16:
lora_A = lora_A.float()
lora_B = lora_B.float()
with gather_params_ctx(base_layer.parameters()):
if base_layer.__class__.__name__ == 'Linear4bit':
base_layer = deepcopy(base_layer)
weight = dequantize_module_weight(base_layer)
if weight.data.ndim == 4:
lora_weight = torch.mm(lora_B.flatten(start_dim=1), lora_A.flatten(start_dim=1))
lora_weight = lora_weight.reshape(weight.shape)
else:
lora_weight = lora_B @ lora_A
if dtype_is_fp16:
lora_weight = lora_weight.half()
weight_norm = self.get_weight_norm(weight.to(lora_A.device), lora_weight, scaling)
if place_on_cpu:
weight_norm = weight_norm.to('cpu')
self.weight = nn.Parameter(weight_norm, requires_grad=True)
def forward(self, x, *, lora_A, lora_B, scaling, base_layer):
lora_result = lora_B(lora_A(x))
x_eye = torch.eye(lora_A.weight.shape[1], device=lora_A.weight.device, dtype=x.dtype)
lora_weight = lora_B(lora_A(x_eye)).T
magnitude = self.weight
weight = dequantize_module_weight(base_layer)
weight = weight.to(x.dtype)
weight_norm = self.get_weight_norm(weight, lora_weight.detach(), scaling)
weight_norm = weight_norm.detach()
mag_norm_scale = (magnitude / weight_norm).view(1, -1)
result_dora = (mag_norm_scale - 1) * F.linear(x, transpose(weight, self.fan_in_fan_out)) + mag_norm_scale * lora_result * scaling
return result_dora
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.dora.' + rep
class DoraEmbeddingLayer(DoraLinearLayer):
def forward(self, x, *, lora_A, lora_B, scaling, base_layer, embed_fn):
lora_weight = (lora_A @ lora_B).T
magnitude = self.weight
weight = base_layer.weight
weight_norm = self.get_weight_norm(weight, lora_weight.detach(), scaling)
weight_norm = weight_norm.detach()
mag_norm_scale = magnitude / weight_norm
result_dora = mag_norm_scale * (embed_fn(x, lora_A) @ lora_B) * scaling
return (mag_norm_scale, result_dora)
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.dora.' + rep
class DoraConv2dLayer(DoraLinearLayer):
def get_weight_norm(self, weight, lora_weight, scaling) -> torch.Tensor:
weight = weight + scaling * lora_weight
weight_norm = weight.norm(p=2, dim=(1, 2, 3), keepdim=True).transpose(1, 0)
return weight_norm
def forward(self, x, *, lora_A, lora_B, scaling, base_layer):
weight = base_layer.weight
lora_weight = torch.mm(lora_B.weight.flatten(start_dim=1), lora_A.weight.flatten(start_dim=1))
lora_weight = lora_weight.reshape(weight.shape)
magnitude = self.weight
weight_norm = self.get_weight_norm(weight, lora_weight.detach(), scaling)
weight_norm = weight_norm.detach()
mag_norm_scale = magnitude / weight_norm
result_dora = (mag_norm_scale - 1) * F.conv2d(x, weight, bias=None, stride=base_layer.stride, padding=base_layer.padding, dilation=base_layer.dilation, groups=base_layer.groups) + mag_norm_scale * lora_B(lora_A(x)) * scaling
return result_dora
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.dora.' + rep
# File: peft-main/src/peft/tuners/lora/eetq.py
from typing import Any, List, Optional
import torch
from peft.import_utils import is_eetq_available
from peft.tuners.lora.layer import LoraLayer
from peft.tuners.tuners_utils import BaseTunerLayer
if is_eetq_available():
from eetq import EetqLinear
class EetqLoraLinear(torch.nn.Module, LoraLayer):
def __init__(self, base_layer, adapter_name, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, init_lora_weights: bool=True, use_rslora: bool=False, **kwargs):
super().__init__()
LoraLayer.__init__(self, base_layer)
self.quant_linear_module = base_layer
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora)
def forward(self, x: torch.Tensor):
result = self.quant_linear_module(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = x.to(lora_A.weight.dtype)
output = lora_B(lora_A(dropout(x)))
if requires_conversion:
output = output.to(expected_dtype)
output = output * scaling
result = result + output
return result
def merge(self, safe_merge: bool=False, adapter_names: Optional[List[str]]=None) -> None:
raise AttributeError('Merging LoRA layers is not supported for Eetq layers.')
def unmerge(self) -> None:
raise AttributeError('Unmerging LoRA layers is not supported for Eetq layers.')
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.' + rep
def dispatch_eetq(target: torch.nn.Module, adapter_name: str, **kwargs: Any) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_eetq_available() and isinstance(target_base_layer, EetqLinear):
new_module = EetqLoraLinear(target, adapter_name, **kwargs)
target.weight = target_base_layer.weight
if hasattr(target, 'bias'):
target.bias = target_base_layer.bias
return new_module
# File: peft-main/src/peft/tuners/lora/gptq.py
from typing import Any, Optional
import torch
from peft.tuners.lora.layer import LoraLayer
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import get_auto_gptq_quant_linear
class QuantLinear(torch.nn.Module, LoraLayer):
def __init__(self, base_layer, adapter_name: str, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, init_lora_weights: bool=True, use_rslora: bool=False, use_dora: bool=False, **kwargs):
super().__init__()
LoraLayer.__init__(self, base_layer)
if use_dora:
raise ValueError(f'{self.__class__.__name__} does not support DoRA yet, please set it to False')
self.quant_linear_module = base_layer
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora)
def forward(self, x: torch.Tensor):
result = self.quant_linear_module(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = x.to(lora_A.weight.dtype)
output = lora_B(lora_A(dropout(x)))
if requires_conversion:
output = output.to(expected_dtype)
output = output * scaling
result += output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.' + rep
def dispatch_gptq(target: torch.nn.Module, adapter_name: str, **kwargs: Any) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
gptq_quantization_config = kwargs.get('gptq_quantization_config', None)
AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
if AutoGPTQQuantLinear is not None and isinstance(target_base_layer, AutoGPTQQuantLinear):
new_module = QuantLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.qweight
return new_module
# File: peft-main/src/peft/tuners/lora/hqq.py
from __future__ import annotations
import copy
import warnings
from typing import Any, Optional
import torch
from peft.import_utils import is_hqq_available
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils.other import transpose
from .layer import LoraLayer
if is_hqq_available():
from hqq.core.quantize import HQQLinear
class HqqLoraLinear(torch.nn.Module, LoraLayer):
def __init__(self, base_layer: torch.nn.Module, adapter_name: str, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, init_lora_weights: bool=True, use_rslora: bool=False, use_dora: bool=False, **kwargs) -> None:
super().__init__()
LoraLayer.__init__(self, base_layer)
self.fan_in_fan_out = False
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora)
def merge(self, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter not in self.lora_A.keys():
continue
layer = self.get_base_layer()
quant_config = {**copy.deepcopy(layer.quant_config), 'offload_meta': layer.offload_meta}
lora_data = self.get_delta_weight(active_adapter)
output = layer.dequantize()
if not self.use_dora[active_adapter]:
w_data = output + lora_data
else:
weight_norm = self._get_weight_norm(output, lora_data, scaling=1).detach()
self._cache_store(f'{active_adapter}-weight_norm', weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
w_data = dora_factor.view(-1, 1) * (output + lora_data)
if safe_merge and (not torch.isfinite(w_data).all()):
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
new_hqq_layer = HQQLinear(None, quant_config, compute_dtype=layer.compute_dtype, device=layer.device)
quant_config.pop('offload_meta', None)
new_hqq_layer.quantize(w_data, **quant_config)
self.base_layer = new_hqq_layer
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.lora_A.keys():
continue
lora_data = self.get_delta_weight(active_adapter)
layer = self.get_base_layer()
quant_config = {**copy.deepcopy(layer.quant_config), 'offload_meta': layer.offload_meta}
output = layer.dequantize()
if not self.use_dora[active_adapter]:
w_data = output - lora_data
else:
weight_norm = self._cache_pop(f'{active_adapter}-weight_norm')
dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm
w_data = output.data / dora_factor.view(-1, 1) - lora_data
new_hqq_layer = HQQLinear(None, quant_config, compute_dtype=layer.compute_dtype, device=layer.device)
quant_config.pop('offload_meta', None)
new_hqq_layer.quantize(w_data, **quant_config)
self.base_layer = new_hqq_layer
def get_delta_weight(self, adapter):
return transpose(self.lora_B[adapter].weight @ self.lora_A[adapter].weight, False) * self.scaling[adapter]
def _mixed_batch_forward(self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any) -> torch.Tensor:
result = self.base_layer(x, *args, **kwargs)
unique_adapters = set(adapter_names)
sub_batch_indices_list = []
for adapter in unique_adapters:
sub_batch_indices_list.append([index for (index, item) in enumerate(adapter_names) if item == adapter])
for (i, active_adapter) in enumerate(unique_adapters):
if active_adapter == '__base__':
continue
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
compute_dtype = lora_A.weight.dtype
if x.dtype != compute_dtype:
x = x.to(compute_dtype)
sub_batch = x[sub_batch_indices_list[i]]
output = lora_B(lora_A(dropout(sub_batch))) * scaling
if requires_conversion:
output = output.to(expected_dtype)
result[sub_batch_indices_list[i]] += output
return result
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop('adapter_names', None)
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
compute_dtype = lora_A.weight.dtype
if x.dtype != compute_dtype:
x = x.to(compute_dtype)
if not self.use_dora[active_adapter]:
output = lora_B(lora_A(dropout(x))) * scaling
else:
output = self._apply_dora(x, lora_A, lora_B, scaling, active_adapter)
if requires_conversion:
output = output.to(expected_dtype)
result = result + output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.' + rep
def dispatch_hqq(target: torch.nn.Module, adapter_name: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_hqq_available() and isinstance(target_base_layer, HQQLinear):
new_module = HqqLoraLinear(target_base_layer, adapter_name, **kwargs)
return new_module
# File: peft-main/src/peft/tuners/lora/layer.py
from __future__ import annotations
import math
import warnings
from typing import Any, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from accelerate.utils.imports import is_xpu_available
from torch import svd_lowrank
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils.integrations import dequantize_module_weight, gather_params_ctx, get_bnb_param_type
from peft.utils.other import transpose
from .config import LoraConfig
from .dora import DoraConv2dLayer, DoraEmbeddingLayer, DoraLinearLayer
class LoraLayer(BaseTunerLayer):
adapter_layer_names = ('lora_A', 'lora_B', 'lora_embedding_A', 'lora_embedding_B')
other_param_names = ('r', 'lora_alpha', 'scaling', 'lora_dropout')
def __init__(self, base_layer: nn.Module, ephemeral_gpu_offload: bool=False, **kwargs) -> None:
self.base_layer = base_layer
self.r = {}
self.lora_alpha = {}
self.scaling = {}
self.lora_dropout = nn.ModuleDict({})
self.lora_A = nn.ModuleDict({})
self.lora_B = nn.ModuleDict({})
self.lora_embedding_A = nn.ParameterDict({})
self.lora_embedding_B = nn.ParameterDict({})
self._disable_adapters = False
self.merged_adapters = []
self.use_dora: dict[str, bool] = {}
self.lora_magnitude_vector = torch.nn.ModuleDict()
self._caches: dict[str, Any] = {}
self.ephemeral_gpu_offload: bool = ephemeral_gpu_offload
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
(in_features, out_features) = (base_layer.in_features, base_layer.out_features)
elif isinstance(base_layer, nn.Conv2d):
(in_features, out_features) = (base_layer.in_channels, base_layer.out_channels)
elif isinstance(base_layer, nn.Embedding):
(in_features, out_features) = (base_layer.num_embeddings, base_layer.embedding_dim)
elif isinstance(base_layer, Conv1D):
(in_features, out_features) = base_layer.weight.ds_shape if hasattr(base_layer.weight, 'ds_shape') else base_layer.weight.shape
elif hasattr(base_layer, 'infeatures') and hasattr(base_layer, 'outfeatures'):
(in_features, out_features) = (base_layer.infeatures, base_layer.outfeatures)
elif hasattr(base_layer, 'input_size') and hasattr(base_layer, 'output_size'):
(in_features, out_features) = (base_layer.input_size, base_layer.output_size)
elif hasattr(base_layer, 'codebooks') and base_layer.__class__.__name__ == 'QuantizedLinear':
(in_features, out_features) = (base_layer.in_features, base_layer.out_features)
elif hasattr(base_layer, 'w_bit') and base_layer.__class__.__name__ == 'WQLinear_GEMM':
(in_features, out_features) = (base_layer.in_features, base_layer.out_features)
elif base_layer.__class__.__name__ == 'EetqLinear':
(in_features, out_features) = (base_layer.in_features, base_layer.out_features)
elif hasattr(base_layer, 'W_q') and base_layer.__class__.__name__ == 'HQQLinear':
(in_features, out_features) = (base_layer.in_features, base_layer.out_features)
else:
if hasattr(base_layer, 'in_features') and hasattr(base_layer, 'out_features'):
(in_features, out_features) = (base_layer.in_features, base_layer.out_features)
else:
(in_features, out_features) = (None, None)
warnings.warn(f"Unsupported layer type '{type(base_layer)}' encountered, proceed at your own risk.", UserWarning)
self.in_features = in_features
self.out_features = out_features
def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora: bool=False):
if r <= 0:
raise ValueError(f'`r` should be a positive integer value but the value passed is {r}')
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
lora_dropout_layer = nn.Identity()
self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
self.lora_A[adapter_name] = nn.Linear(self.in_features, r, bias=False)
self.lora_B[adapter_name] = nn.Linear(r, self.out_features, bias=False)
if use_rslora:
self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
else:
self.scaling[adapter_name] = lora_alpha / r
if isinstance(init_lora_weights, str) and init_lora_weights.startswith('pissa'):
with gather_params_ctx(self.get_base_layer().weight):
self.pissa_init(adapter_name, init_lora_weights)
elif isinstance(init_lora_weights, str) and init_lora_weights.lower() == 'olora':
with gather_params_ctx(self.get_base_layer().weight):
self.olora_init(adapter_name)
elif init_lora_weights == 'loftq':
with gather_params_ctx(self.get_base_layer().weight):
self.loftq_init(adapter_name)
elif init_lora_weights:
self.reset_lora_parameters(adapter_name, init_lora_weights)
self._move_adapter_to_device_of_base_layer(adapter_name)
if use_dora:
self.dora_init(adapter_name)
self.use_dora[adapter_name] = True
else:
self.use_dora[adapter_name] = False
self.set_adapter(self.active_adapters)
def reset_lora_parameters(self, adapter_name, init_lora_weights):
if init_lora_weights is False:
return
if adapter_name in self.lora_A.keys():
if init_lora_weights is True:
nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5))
elif init_lora_weights.lower() == 'gaussian':
nn.init.normal_(self.lora_A[adapter_name].weight, std=1 / self.r[adapter_name])
else:
raise ValueError(f'Unknown initialization init_lora_weights={init_lora_weights!r}')
nn.init.zeros_(self.lora_B[adapter_name].weight)
if adapter_name in self.lora_embedding_A.keys():
nn.init.zeros_(self.lora_embedding_A[adapter_name])
nn.init.normal_(self.lora_embedding_B[adapter_name])
def olora_init(self, adapter_name):
base_layer = self.get_base_layer()
orig_weight = base_layer.weight
bnb_param_type = get_bnb_param_type(orig_weight)
dtype = orig_weight.dtype
if bnb_param_type:
weight_tensor = dequantize_module_weight(base_layer)
elif dtype in [torch.float32, torch.float16, torch.bfloat16]:
weight_tensor = orig_weight
else:
raise TypeError(f'Unsupported data type for the base layer. Got {dtype}.')
scale_factor = self.scaling[adapter_name]
r = self.r[adapter_name]
weight_tensor = weight_tensor.to(torch.float32)
(Q, R) = torch.linalg.qr(weight_tensor.data)
(Qr, Rr) = (Q[:, :r], R[:r])
self.lora_A[adapter_name].weight.data = Rr.contiguous()
self.lora_B[adapter_name].weight.data = Qr.contiguous()
weight_tensor.data -= scale_factor * self.lora_B[adapter_name].weight @ self.lora_A[adapter_name].weight
if bnb_param_type == '4bit':
weight_tensor = orig_weight.__class__(weight_tensor, quant_type=orig_weight.quant_type, quant_storage=orig_weight.quant_storage, compress_statistics=orig_weight.compress_statistics, module=orig_weight.module).to(orig_weight.device)
base_layer.weight = weight_tensor
elif bnb_param_type == '8bit':
weight_tensor = orig_weight.__class__(weight_tensor, requires_grad=orig_weight.requires_grad, has_fp16_weights=orig_weight.has_fp16_weights).to(orig_weight.device)
base_layer.weight = weight_tensor
else:
weight_tensor = weight_tensor.to(dtype)
base_layer.weight.data = weight_tensor
def pissa_init(self, adapter_name, init_lora_weights):
weight = self.get_base_layer().weight
dtype = weight.dtype
if dtype not in [torch.float32, torch.float16, torch.bfloat16]:
raise TypeError('Please initialize PiSSA under float32, float16, or bfloat16. Subsequently, re-quantize the residual model to help minimize quantization errors.')
weight = weight.to(torch.float32)
if init_lora_weights == 'pissa':
(V, S, Uh) = torch.linalg.svd(weight.data, full_matrices=False)
Vr = V[:, :self.r[adapter_name]]
Sr = S[:self.r[adapter_name]]
Sr /= self.scaling[adapter_name]
Uhr = Uh[:self.r[adapter_name]]
elif len(init_lora_weights.split('_niter_')) == 2:
(Vr, Sr, Ur) = svd_lowrank(weight.data, self.r[adapter_name], niter=int(init_lora_weights.split('_niter_')[-1]))
Sr /= self.scaling[adapter_name]
Uhr = Ur.t()
else:
raise ValueError(f"init_lora_weights should be 'pissa' or 'pissa_niter_[number of iters]', got {init_lora_weights} instead.")
lora_A = torch.diag(torch.sqrt(Sr)) @ Uhr
lora_B = Vr @ torch.diag(torch.sqrt(Sr))
self.lora_A[adapter_name].weight.data = lora_A
self.lora_B[adapter_name].weight.data = lora_B
weight = weight.data - self.scaling[adapter_name] * lora_B @ lora_A
weight = weight.to(dtype)
self.get_base_layer().weight.data = weight
def loftq_init(self, adapter_name):
from peft.utils.loftq_utils import loftq_init
weight = self.get_base_layer().weight
kwargs = {'num_bits': self.kwargs.get('loftq_bits', 4), 'reduced_rank': self.r[adapter_name], 'num_iter': self.kwargs.get('loftq_iter', 1)}
(qweight, lora_A, lora_B) = loftq_init(weight, **kwargs)
if adapter_name in self.lora_A.keys():
self.lora_A[adapter_name].weight.data = lora_A
self.lora_B[adapter_name].weight.data = lora_B
if adapter_name in self.lora_embedding_A.keys():
self.lora_embedding_A[adapter_name].weight.data = lora_A
self.lora_embedding_B[adapter_name].weight.data = lora_B
self.get_base_layer().weight.data = qweight
def dora_init(self, adapter_name: str) -> None:
if not self.lora_magnitude_vector:
self.adapter_layer_names = self.adapter_layer_names[:] + ('lora_magnitude_vector',)
dora_layer = DoraLinearLayer(fan_in_fan_out=getattr(self, 'fan_in_fan_out', False))
lora_A = self.lora_A[adapter_name].weight
lora_B = self.lora_B[adapter_name].weight
place_on_cpu = self.ephemeral_gpu_offload and (lora_A.device.type == 'cpu' or lora_B.device.type == 'cpu')
if self.ephemeral_gpu_offload:
if lora_A.device.type in ['cuda', 'xpu']:
lora_B = lora_B.to(lora_A.device)
else:
if lora_B.device.type not in ['cuda', 'xpu']:
if is_xpu_available():
lora_B = lora_B.to('xpu')
else:
lora_B = lora_B.to('cuda')
lora_A = lora_A.to(lora_B.device)
scaling = self.scaling[adapter_name]
dora_layer.update_layer(base_layer=self.get_base_layer(), lora_A=lora_A, lora_B=lora_B, scaling=scaling, place_on_cpu=place_on_cpu)
self.lora_magnitude_vector[adapter_name] = dora_layer
def _cache_store(self, key: str, value: Any) -> None:
self._caches[key] = value
def _cache_pop(self, key: str) -> Any:
value = self._caches.pop(key)
return value
def set_scale(self, adapter, scale):
if adapter not in self.scaling:
return
self.scaling[adapter] = scale * self.lora_alpha[adapter] / self.r[adapter]
def scale_layer(self, scale: float) -> None:
if scale == 1:
return
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
self.scaling[active_adapter] *= scale
def unscale_layer(self, scale=None) -> None:
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
if scale is None:
self.scaling[active_adapter] = self.lora_alpha[active_adapter] / self.r[active_adapter]
else:
self.scaling[active_adapter] /= scale
def _check_forward_args(self, x, *args, **kwargs):
adapter_names = kwargs.get('adapter_names', None)
if adapter_names is None:
return
if len(x) != len(adapter_names):
msg = f'Length of `adapter_names` should be the same as the number of inputs, but got {len(adapter_names)} and {len(x)} respectively.'
raise ValueError(msg)
if self.merged:
msg = 'Cannot pass `adapter_names` when there are merged adapters, please call `unmerge_adapter` first.'
raise ValueError(msg)
unique_adapters = set(self.active_adapters)
for adapter_name in unique_adapters:
if self.use_dora.get(adapter_name, False):
msg = 'Cannot pass `adapter_names` when DoRA is enabled.'
raise ValueError(msg)
def _mixed_batch_forward(self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any) -> torch.Tensor:
result = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
unique_adapters = set(adapter_names)
sub_batch_indices_list = []
for adapter in unique_adapters:
sub_batch_indices_list.append([index for (index, item) in enumerate(adapter_names) if item == adapter])
for (i, active_adapter) in enumerate(unique_adapters):
if active_adapter == '__base__':
continue
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
sub_batch = x[sub_batch_indices_list[i]].to(lora_A.weight.dtype)
lora_output = lora_B(lora_A(dropout(sub_batch))) * scaling
result[sub_batch_indices_list[i]] += lora_output.to(torch_result_dtype)
return result
class Linear(nn.Module, LoraLayer):
def __init__(self, base_layer, adapter_name: str, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, fan_in_fan_out: bool=False, is_target_conv_1d_layer: bool=False, init_lora_weights: Union[bool, str]=True, use_rslora: bool=False, use_dora: bool=False, **kwargs) -> None:
super().__init__()
LoraLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora)
self.is_target_conv_1d_layer = is_target_conv_1d_layer
def merge(self, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.lora_A.keys():
base_layer = self.get_base_layer()
if safe_merge:
orig_weights = base_layer.weight.data.clone()
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
orig_weights += delta_weight
else:
weight_norm = self.lora_magnitude_vector[active_adapter].get_weight_norm(orig_weights, transpose(delta_weight, self.fan_in_fan_out), scaling=1).detach()
self._cache_store(f'{active_adapter}-weight_norm', weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
dora_factor = transpose(dora_factor.view(-1, 1), self.fan_in_fan_out)
orig_weights = dora_factor * (orig_weights + delta_weight)
if not torch.isfinite(orig_weights).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
base_layer.weight.data = orig_weights
else:
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
base_layer.weight.data += delta_weight
else:
weight_norm = self.lora_magnitude_vector[active_adapter].get_weight_norm(base_layer.weight, transpose(delta_weight, self.fan_in_fan_out), scaling=1).detach()
self._cache_store(f'{active_adapter}-weight_norm', weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
dora_factor = transpose(dora_factor.view(-1, 1), self.fan_in_fan_out)
new_weight = dora_factor * (base_layer.weight.data + delta_weight)
base_layer.weight.data = new_weight
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.lora_A.keys():
weight = self.get_base_layer().weight
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
weight.data -= delta_weight
else:
weight_norm = self._cache_pop(f'{active_adapter}-weight_norm')
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
weight_orig = weight.data / dora_factor.view(-1, 1) - delta_weight
weight.data = weight_orig
def get_delta_weight(self, adapter) -> torch.Tensor:
device = self.lora_B[adapter].weight.device
dtype = self.lora_B[adapter].weight.dtype
cast_to_fp32 = device.type == 'cpu' and (dtype == torch.float16 or dtype == torch.bfloat16)
weight_A = self.lora_A[adapter].weight
weight_B = self.lora_B[adapter].weight
if cast_to_fp32:
weight_A = weight_A.float()
weight_B = weight_B.float()
output_tensor = transpose(weight_B @ weight_A, self.fan_in_fan_out) * self.scaling[adapter]
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
self.lora_A[adapter].weight.data = weight_A.to(dtype)
self.lora_B[adapter].weight.data = weight_B.to(dtype)
return output_tensor
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop('adapter_names', None)
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
x = x.to(lora_A.weight.dtype)
if not self.use_dora[active_adapter]:
result = result + lora_B(lora_A(dropout(x))) * scaling
else:
x = dropout(x)
result = result + self.lora_magnitude_vector[active_adapter](x, lora_A=lora_A, lora_B=lora_B, scaling=scaling, base_layer=self.get_base_layer())
result = result.to(torch_result_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.' + rep
class Embedding(nn.Module, LoraLayer):
def __init__(self, base_layer: nn.Module, adapter_name: str, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, init_lora_weights: Union[bool, str]=True, use_rslora: bool=False, use_dora: bool=False, **kwargs) -> None:
super().__init__()
LoraLayer.__init__(self, base_layer)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora)
def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora):
if r <= 0:
raise ValueError(f'`r` should be a positive integer value but the value passed is {r}')
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
lora_dropout_layer = nn.Identity()
self.lora_dropout[adapter_name] = lora_dropout_layer
weight_A = torch.randn((r, self.in_features))
weight_B = torch.randn((self.out_features, r))
self.lora_embedding_A[adapter_name] = nn.Parameter(weight_A)
self.lora_embedding_B[adapter_name] = nn.Parameter(weight_B)
if use_rslora:
self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
else:
self.scaling[adapter_name] = lora_alpha / r
if init_lora_weights == 'loftq':
self.loftq_init(adapter_name)
elif init_lora_weights:
self.reset_lora_parameters(adapter_name, init_lora_weights)
self._move_adapter_to_device_of_base_layer(adapter_name)
if use_dora:
self.dora_init(adapter_name)
self.use_dora[adapter_name] = True
else:
self.use_dora[adapter_name] = False
self.set_adapter(self.active_adapters)
def dora_init(self, adapter_name: str) -> None:
if self.lora_magnitude_vector is None:
self.adapter_layer_names = self.adapter_layer_names[:] + ('lora_magnitude_vector',)
dora_layer = DoraEmbeddingLayer(fan_in_fan_out=True)
lora_embedding_A = self.lora_embedding_A[adapter_name]
lora_embedding_B = self.lora_embedding_B[adapter_name]
scaling = self.scaling[adapter_name]
dora_layer.update_layer(base_layer=self.get_base_layer(), lora_A=lora_embedding_A, lora_B=lora_embedding_B, scaling=scaling)
self.lora_magnitude_vector[adapter_name] = dora_layer
def merge(self, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.lora_embedding_A.keys():
base_layer = self.get_base_layer()
if safe_merge:
orig_weights = base_layer.weight.data.clone()
orig_weights += self.get_delta_weight(active_adapter)
if not torch.isfinite(orig_weights).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += self.get_delta_weight(active_adapter)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.lora_embedding_A.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def get_delta_weight(self, adapter) -> torch.Tensor:
device = self.lora_embedding_B[adapter].device
dtype = self.lora_embedding_A[adapter].dtype
cast_to_fp32 = device.type == 'cpu' and (dtype == torch.float16 or dtype == torch.bfloat16)
weight_A = self.lora_embedding_A[adapter]
weight_B = self.lora_embedding_B[adapter]
if cast_to_fp32:
weight_A = weight_A.float()
weight_B = weight_B.float()
output_tensor = transpose(weight_B @ weight_A, True) * self.scaling[adapter]
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
self.lora_embedding_A[adapter] = weight_A.to(dtype)
self.lora_embedding_B[adapter] = weight_B.to(dtype)
return output_tensor
def _mixed_batch_forward(self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any) -> torch.Tensor:
result = self.base_layer(x, *args, **kwargs)
unique_adapters = set(adapter_names)
sub_batch_indices_list = []
for adapter in unique_adapters:
sub_batch_indices_list.append([index for (index, item) in enumerate(adapter_names) if item == adapter])
for (i, active_adapter) in enumerate(unique_adapters):
if active_adapter == '__base__':
continue
if active_adapter not in self.lora_embedding_A.keys():
continue
embedding_A = self.lora_embedding_A[active_adapter].T
embedding_B = self.lora_embedding_B[active_adapter].T
scaling = self.scaling[active_adapter]
sub_batch = x[sub_batch_indices_list[i]]
after_A = self._embed(sub_batch, embedding_A)
result[sub_batch_indices_list[i]] += after_A @ embedding_B * scaling
return result
def _embed(self, input: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
base_layer = self.get_base_layer()
return F.embedding(input, weight, padding_idx=base_layer.padding_idx, max_norm=base_layer.max_norm, norm_type=base_layer.norm_type, scale_grad_by_freq=base_layer.scale_grad_by_freq, sparse=base_layer.sparse)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop('adapter_names', None)
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_embedding_A:
continue
embedding_A = self.lora_embedding_A[active_adapter].T
embedding_B = self.lora_embedding_B[active_adapter].T
scaling = self.scaling[active_adapter]
if not self.use_dora[active_adapter]:
after_A = self._embed(x, embedding_A)
result = result + after_A @ embedding_B * scaling
else:
(mag_norm_scale, dora_result) = self.lora_magnitude_vector[active_adapter](x, lora_A=embedding_A, lora_B=embedding_B, scaling=scaling, base_layer=self.get_base_layer(), embed_fn=self._embed)
result = mag_norm_scale * result + dora_result
result = result.to(torch_result_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.' + rep
class Conv2d(nn.Module, LoraLayer):
def __init__(self, base_layer: nn.Module, adapter_name: str, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, init_lora_weights: Union[bool, str]=True, use_rslora: bool=False, use_dora: bool=False, **kwargs) -> None:
super().__init__()
LoraLayer.__init__(self, base_layer)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora)
def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora):
if r <= 0:
raise ValueError(f'`r` should be a positive integer value but the value passed is {r}')
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
lora_dropout_layer = nn.Identity()
self.lora_dropout[adapter_name] = lora_dropout_layer
base_layer = self.get_base_layer()
kernel_size = base_layer.kernel_size
stride = base_layer.stride
padding = base_layer.padding
self.lora_A[adapter_name] = nn.Conv2d(self.in_features, r, kernel_size, stride, padding, bias=False)
self.lora_B[adapter_name] = nn.Conv2d(r, self.out_features, (1, 1), (1, 1), bias=False)
if use_rslora:
self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
else:
self.scaling[adapter_name] = lora_alpha / r
if init_lora_weights == 'loftq':
self.loftq_init(adapter_name)
elif init_lora_weights:
self.reset_lora_parameters(adapter_name, init_lora_weights)
self._move_adapter_to_device_of_base_layer(adapter_name)
if use_dora:
self.dora_init(adapter_name)
self.use_dora[adapter_name] = True
else:
self.use_dora[adapter_name] = False
self.set_adapter(self.active_adapters)
def dora_init(self, adapter_name: str) -> None:
if self.lora_magnitude_vector is None:
self.adapter_layer_names = self.adapter_layer_names[:] + ('lora_magnitude_vector',)
dora_layer = DoraConv2dLayer(fan_in_fan_out=False)
lora_A = self.lora_A[adapter_name].weight
lora_B = self.lora_B[adapter_name].weight
scaling = self.scaling[adapter_name]
dora_layer.update_layer(base_layer=self.get_base_layer(), lora_A=lora_A, lora_B=lora_B, scaling=scaling)
self.lora_magnitude_vector[adapter_name] = dora_layer
def merge(self, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.lora_A.keys():
base_layer = self.get_base_layer()
if safe_merge:
orig_weights = base_layer.weight.data.clone()
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
orig_weights += delta_weight
else:
weight_norm = self.lora_magnitude_vector[active_adapter].get_weight_norm(orig_weights, delta_weight, scaling=1).detach()
self._cache_store(f'{active_adapter}-weight_norm', weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
orig_weights = dora_factor.view(-1, 1, 1, 1) * (orig_weights + delta_weight)
if not torch.isfinite(orig_weights).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
base_layer.weight.data = orig_weights
else:
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
base_layer.weight.data += delta_weight
else:
weight_norm = self.lora_magnitude_vector[active_adapter].get_weight_norm(base_layer.weight, delta_weight, scaling=1).detach()
self._cache_store(f'{active_adapter}-weight_norm', weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
new_weight = dora_factor.view(-1, 1, 1, 1) * (base_layer.weight.data + delta_weight)
base_layer.weight.data = new_weight
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.lora_A.keys():
weight = self.get_base_layer().weight
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
weight.data -= delta_weight
else:
weight_norm = self._cache_pop(f'{active_adapter}-weight_norm')
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
weight_orig = weight.data / dora_factor.view(-1, 1, 1, 1) - delta_weight
weight.data = weight_orig
def get_delta_weight(self, adapter) -> torch.Tensor:
device = self.lora_B[adapter].weight.device
dtype = self.lora_A[adapter].weight.dtype
cast_to_fp32 = device.type == 'cpu' and (dtype == torch.float16 or dtype == torch.bfloat16)
weight_A = self.lora_A[adapter].weight
weight_B = self.lora_B[adapter].weight
if cast_to_fp32:
weight_A = weight_A.float()
weight_B = weight_B.float()
if self.get_base_layer().weight.size()[2:4] == (1, 1):
output_tensor = (weight_B.squeeze(3).squeeze(2) @ weight_A.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(3) * self.scaling[adapter]
else:
output_tensor = F.conv2d(weight_A.permute(1, 0, 2, 3), weight_B).permute(1, 0, 2, 3) * self.scaling[adapter]
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
self.lora_A[adapter].weight.data = weight_A.to(dtype)
self.lora_B[adapter].weight.data = weight_B.to(dtype)
return output_tensor
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop('adapter_names', None)
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
x = x.to(lora_A.weight.dtype)
if not self.use_dora[active_adapter]:
result = result + lora_B(lora_A(dropout(x))) * scaling
else:
x = dropout(x)
result = result + self.lora_magnitude_vector[active_adapter](x, lora_A=lora_A, lora_B=lora_B, scaling=scaling, base_layer=self.get_base_layer())
result = result.to(torch_result_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.' + rep
def dispatch_default(target: torch.nn.Module, adapter_name: str, lora_config: LoraConfig, **kwargs) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Embedding):
embedding_kwargs = kwargs.copy()
embedding_kwargs.pop('fan_in_fan_out', None)
embedding_kwargs.update(lora_config.loftq_config)
new_module = Embedding(target, adapter_name, **embedding_kwargs)
elif isinstance(target_base_layer, torch.nn.Conv2d):
kwargs.update(lora_config.loftq_config)
new_module = Conv2d(target, adapter_name, **kwargs)
elif isinstance(target_base_layer, torch.nn.Linear):
if kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. Setting fan_in_fan_out to False.')
kwargs['fan_in_fan_out'] = lora_config.fan_in_fan_out = False
kwargs.update(lora_config.loftq_config)
new_module = Linear(target, adapter_name, **kwargs)
elif isinstance(target_base_layer, Conv1D):
if not kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True.')
kwargs['fan_in_fan_out'] = lora_config.fan_in_fan_out = True
kwargs.update(lora_config.loftq_config)
new_module = Linear(target, adapter_name, is_target_conv_1d_layer=True, **kwargs)
return new_module
# File: peft-main/src/peft/tuners/lora/model.py
from __future__ import annotations
import math
import operator
import re
import warnings
from contextlib import contextmanager
from dataclasses import asdict, replace
from enum import Enum
from functools import partial, reduce
from itertools import chain
from typing import Literal, Optional
import torch
from torch import nn
from tqdm import tqdm
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists, onload_layer, replicate_layers
from peft.utils import TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _freeze_adapter, _get_submodules, get_peft_model_state_dict, get_quantization_config
from peft.utils.merge_utils import dare_linear, dare_ties, magnitude_prune, task_arithmetic, ties
from .aqlm import dispatch_aqlm
from .awq import dispatch_awq
from .config import LoraConfig
from .eetq import dispatch_eetq
from .gptq import dispatch_gptq
from .hqq import dispatch_hqq
from .layer import Conv2d, LoraLayer, dispatch_default
from .tp_layer import dispatch_megatron
def _adapter_names_pre_forward_hook(target, args, kwargs, adapter_names):
kwargs['adapter_names'] = adapter_names
return (args, kwargs)
class LoraModel(BaseTuner):
prefix: str = 'lora_'
def __init__(self, model, config, adapter_name) -> None:
super().__init__(model, config, adapter_name)
def _check_new_adapter_config(self, config: LoraConfig) -> None:
if len(self.peft_config) > 1 and config.bias != 'none':
raise ValueError(f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.")
@staticmethod
def _check_target_module_exists(lora_config, key):
return check_target_module_exists(lora_config, key)
def _prepare_model(self, peft_config: LoraConfig, model: nn.Module):
if peft_config.layer_replication:
replicate_layers(model, peft_config.layer_replication)
def _create_and_replace(self, lora_config, adapter_name, target, target_name, parent, current_key):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
pattern_keys = list(chain(lora_config.rank_pattern.keys(), lora_config.alpha_pattern.keys()))
target_name_key = next(filter(lambda key: re.match(f'.*\\.{key}$', current_key), pattern_keys), current_key)
r = lora_config.rank_pattern.get(target_name_key, lora_config.r)
alpha = lora_config.alpha_pattern.get(target_name_key, lora_config.lora_alpha)
kwargs = {'r': r, 'lora_alpha': alpha, 'lora_dropout': lora_config.lora_dropout, 'fan_in_fan_out': lora_config.fan_in_fan_out, 'init_lora_weights': lora_config.init_lora_weights, 'use_rslora': lora_config.use_rslora, 'use_dora': lora_config.use_dora, 'ephemeral_gpu_offload': lora_config.runtime_config.ephemeral_gpu_offload, 'loaded_in_8bit': getattr(self.model, 'is_loaded_in_8bit', False), 'loaded_in_4bit': getattr(self.model, 'is_loaded_in_4bit', False)}
quant_methods = ['gptq', 'aqlm', 'awq']
for quant_method in quant_methods:
quantization_config = get_quantization_config(self.model, method=quant_method)
if quantization_config is not None:
kwargs[f'{quant_method}_quantization_config'] = quantization_config
from peft.tuners.adalora import AdaLoraLayer
if isinstance(target, LoraLayer) and (not isinstance(target, AdaLoraLayer)):
target.update_layer(adapter_name, r, lora_alpha=alpha, lora_dropout=lora_config.lora_dropout, init_lora_weights=lora_config.init_lora_weights, use_rslora=lora_config.use_rslora, use_dora=lora_config.use_dora)
else:
new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs)
if adapter_name not in self.active_adapters:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
def _replace_module(self, parent, child_name, new_module, child):
setattr(parent, child_name, new_module)
if hasattr(child, 'base_layer'):
child = child.base_layer
if not hasattr(new_module, 'base_layer'):
if hasattr(new_module, 'W_q'):
new_module.W_q = child.W_q
else:
new_module.weight = child.weight
if hasattr(child, 'bias'):
new_module.bias = child.bias
if getattr(child, 'state', None) is not None:
if hasattr(new_module, 'base_layer'):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
for (name, module) in new_module.named_modules():
if self.prefix in name or 'ranknum' in name:
weight = child.qweight if hasattr(child, 'qweight') else child.W_q if hasattr(child, 'W_q') else child.weight if hasattr(child, 'weight') else next(child.parameters())
module.to(weight.device)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for (n, p) in model.named_parameters():
if self.prefix not in n:
p.requires_grad = False
for active_adapter in self.active_adapters:
bias = self.peft_config[active_adapter].bias
if bias == 'none':
continue
if bias == 'all':
for (n, p) in model.named_parameters():
if 'bias' in n:
p.requires_grad = True
elif bias == 'lora_only':
for m in model.modules():
if isinstance(m, LoraLayer) and hasattr(m, 'bias') and (m.bias is not None):
m.bias.requires_grad = True
else:
raise NotImplementedError(f'Requested bias: {bias}, is not implemented.')
@staticmethod
def _create_new_module(lora_config, adapter_name, target, **kwargs):
dispatchers = []
if lora_config._custom_modules:
def dynamic_dispatch_func(target, adapter_name, lora_config, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
for (key, custom_cls) in lora_config._custom_modules.items():
if isinstance(target_base_layer, key):
new_module = custom_cls(target, adapter_name, **kwargs)
break
return new_module
dispatchers.append(dynamic_dispatch_func)
if is_bnb_available():
from .bnb import dispatch_bnb_8bit
dispatchers.append(dispatch_bnb_8bit)
if is_bnb_4bit_available():
from .bnb import dispatch_bnb_4bit
dispatchers.append(dispatch_bnb_4bit)
dispatchers.extend([dispatch_eetq, dispatch_aqlm, dispatch_awq, dispatch_gptq, dispatch_hqq, dispatch_megatron, dispatch_default])
new_module = None
for dispatcher in dispatchers:
new_module = dispatcher(target, adapter_name, lora_config=lora_config, **kwargs)
if new_module is not None:
break
if new_module is None:
raise ValueError(f'Target module {target} is not supported. Currently, only the following modules are supported: `torch.nn.Linear`, `torch.nn.Embedding`, `torch.nn.Conv2d`, `transformers.pytorch_utils.Conv1D`.')
return new_module
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'model':
raise
return getattr(self.model, name)
def get_peft_config_as_dict(self, inference: bool=False):
config_dict = {}
for (key, value) in self.peft_config.items():
config = {k: v.value if isinstance(v, Enum) else v for (k, v) in asdict(value).items()}
if inference:
config['inference_mode'] = True
config_dict[key] = config
return config
def _set_adapter_layers(self, enabled: bool=True) -> None:
for module in self.model.modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self) -> None:
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self) -> None:
for active_adapter in self.active_adapters:
val = self.peft_config[active_adapter].bias
if val != 'none':
msg = f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same output as the the base model would without adaption."
warnings.warn(msg)
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name: str | list[str]) -> None:
for module in self.model.modules():
if isinstance(module, LoraLayer):
if module.merged:
warnings.warn('Adapter cannot be set when the model is merged. Unmerging the model first.')
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
@contextmanager
def _enable_peft_forward_hooks(self, *args, **kwargs):
adapter_names = kwargs.pop('adapter_names', None)
if adapter_names is None:
yield
return
if self.training:
raise ValueError('Cannot pass `adapter_names` when the model is in training mode.')
hook_handles = []
for module in self.modules():
if isinstance(module, LoraLayer):
pre_forward = partial(_adapter_names_pre_forward_hook, adapter_names=adapter_names)
handle = module.register_forward_pre_hook(pre_forward, with_kwargs=True)
hook_handles.append(handle)
yield
for handle in hook_handles:
handle.remove()
def _check_merge_allowed(self):
super()._check_merge_allowed()
if getattr(self.model, 'quantization_method', None) == 'gptq':
raise ValueError('Cannot merge LORA layers when the model is gptq quantized')
if self.peft_config.get('layer_replication'):
raise ValueError('Cannot merge LORA layers when base model layers are replicated')
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config['model_type'] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:
raise ValueError('Please specify `target_modules` in `peft_config`')
peft_config.target_modules = set(TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config['model_type']])
return peft_config
def _unload_and_optionally_merge(self, merge=True, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None):
if merge:
self._check_merge_allowed()
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
desc = 'Unloading ' + ('and merging ' if merge else '') + 'model'
for key in tqdm(key_list, disable=not progressbar, desc=desc):
try:
(parent, target, target_name) = _get_submodules(self.model, key)
except AttributeError:
continue
with onload_layer(target):
if hasattr(target, 'base_layer'):
if merge:
target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
new_module = target.modules_to_save[target.active_adapter]
if hasattr(new_module, 'base_layer'):
if merge:
new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
new_module = new_module.get_base_layer()
setattr(parent, target_name, new_module)
return self.model
def _check_add_weighted_adapter(self, adapters: list[str], combination_type: str, svd_rank: int | None) -> tuple[str, int, str]:
for adapter in adapters:
if adapter not in list(self.peft_config.keys()):
raise ValueError(f'Adapter {adapter} does not exist')
modules_to_save_wrappers = [module for module in self.modules() if isinstance(module, ModulesToSaveWrapper)]
problematic_wrappers = [wrapper for wrapper in modules_to_save_wrappers if sum((adapter in wrapper.modules_to_save for adapter in adapters)) > 1]
if problematic_wrappers:
raise ValueError(f'Cannot add weighted adapters if they target the same module with modules_to_save, but found {len(problematic_wrappers)} such instance(s).')
combination_type = 'linear' if len(adapters) == 1 else combination_type
adapters_ranks = [self.peft_config[adapter].r for adapter in adapters]
if combination_type in ('linear', 'ties', 'dare_ties', 'dare_linear', 'magnitude_prune'):
if len(set(adapters_ranks)) != 1:
raise ValueError('All adapters must have the same r value when using combination_type linear, ties, dare_ties or dare_linear.')
new_rank = adapters_ranks[0]
elif combination_type == 'cat':
new_rank = sum(adapters_ranks)
elif combination_type.endswith('svd'):
new_rank = svd_rank or max(adapters_ranks)
else:
raise ValueError(f'Invalid combination_type: {combination_type}')
target_module_types = [type(self.peft_config[adapter].target_modules) for adapter in adapters]
if not target_module_types:
raise ValueError(f'Found no adapter matching the names in {adapters}')
if len(set(target_module_types)) > 1:
raise ValueError('all adapter configs should follow the same target modules type. Combining adapters with `target_modules` type being a mix of list/set and string is not supported.')
if target_module_types[0] is str:
new_target_modules = '|'.join((f'({self.peft_config[adapter].target_modules})' for adapter in adapters))
elif target_module_types[0] is set:
new_target_modules = reduce(operator.or_, (self.peft_config[adapter].target_modules for adapter in adapters))
else:
raise TypeError(f'Invalid type {target_module_types[0]} found in target_modules')
return (combination_type, new_rank, new_target_modules)
def add_weighted_adapter(self, adapters: list[str], weights: list[float], adapter_name: str, combination_type: str='svd', svd_rank: int | None=None, svd_clamp: int | None=None, svd_full_matrices: bool=True, svd_driver: str | None=None, density: float | None=None, majority_sign_method: Literal['total', 'frequency']='total') -> None:
if adapter_name in list(self.peft_config.keys()):
return
(combination_type, new_rank, new_target_modules) = self._check_add_weighted_adapter(adapters=adapters, combination_type=combination_type, svd_rank=svd_rank)
self.peft_config[adapter_name] = replace(self.peft_config[adapters[0]], r=new_rank, lora_alpha=new_rank, target_modules=new_target_modules)
self.inject_adapter(self.model, adapter_name)
_freeze_adapter(self.model, adapter_name)
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
for key in key_list:
(_, target, _) = _get_submodules(self.model, key)
if isinstance(target, LoraLayer):
if adapter_name in target.lora_A:
target_lora_A = target.lora_A[adapter_name].weight
target_lora_B = target.lora_B[adapter_name].weight
elif adapter_name in target.lora_embedding_A:
target_lora_A = target.lora_embedding_A[adapter_name]
target_lora_B = target.lora_embedding_B[adapter_name]
else:
continue
target_lora_A.data = target_lora_A.data * 0.0
target_lora_B.data = target_lora_B.data * 0.0
if combination_type == 'cat':
(loras_A, loras_B) = ([], [])
for (adapter, weight) in zip(adapters, weights):
if adapter in target.lora_A:
current_adapter_lora_A = target.lora_A[adapter].weight
current_adapter_lora_B = target.lora_B[adapter].weight
elif adapter in target.lora_embedding_A:
current_adapter_lora_A = target.lora_embedding_A[adapter]
current_adapter_lora_B = target.lora_embedding_B[adapter]
else:
continue
loras_A.append(current_adapter_lora_A.data * weight * target.scaling[adapter])
loras_B.append(current_adapter_lora_B.data)
if len(loras_A) == 0:
raise ValueError('No matching LoRAs found. Please raise an issue on GitHub.')
loras_A = torch.cat(loras_A, dim=0)
loras_B = torch.cat(loras_B, dim=1)
target_lora_A.data[:loras_A.shape[0], :] = loras_A
target_lora_B.data[:, :loras_B.shape[1]] = loras_B
elif combination_type in ['svd', 'ties_svd', 'dare_linear_svd', 'dare_ties_svd', 'magnitude_prune_svd']:
(target_lora_A.data, target_lora_B.data) = self._svd_generalized_task_arithmetic_weighted_adapter(combination_type, adapters, weights, new_rank, target, target_lora_A, target_lora_B, density, majority_sign_method, svd_clamp, full_matrices=svd_full_matrices, driver=svd_driver)
elif combination_type in ['linear', 'ties', 'dare_linear', 'dare_ties', 'magnitude_prune']:
(target_lora_A.data, target_lora_B.data) = self._generalized_task_arithmetic_weighted_adapter(combination_type, adapters, weights, target, density, majority_sign_method)
def _svd_generalized_task_arithmetic_weighted_adapter(self, combination_type, adapters, weights, new_rank, target, target_lora_A, target_lora_B, density, majority_sign_method, clamp=None, full_matrices=True, driver=None):
valid_adapters = []
valid_weights = []
is_embedding = any((adapter in target.lora_embedding_A for adapter in adapters))
for (adapter, weight) in zip(adapters, weights):
if adapter in target.lora_A or adapter in target.lora_embedding_A:
valid_adapters.append(adapter)
valid_weights.append(weight * target.scaling[adapter])
if len(valid_adapters) == 0:
raise ValueError('No matching LoRAs found. Please raise an issue on Github.')
delta_weight = [target.get_delta_weight(adapter) for adapter in valid_adapters]
valid_weights = torch.tensor(valid_weights).to(delta_weight[0].device)
if combination_type == 'svd':
delta_weight = task_arithmetic(delta_weight, valid_weights)
elif combination_type == 'ties_svd':
delta_weight = ties(delta_weight, valid_weights, density, majority_sign_method)
elif combination_type == 'dare_linear_svd':
delta_weight = dare_linear(delta_weight, valid_weights, density)
elif combination_type == 'dare_ties_svd':
delta_weight = dare_ties(delta_weight, valid_weights, density, majority_sign_method)
elif combination_type == 'magnitude_prune_svd':
delta_weight = magnitude_prune(delta_weight, valid_weights, density)
else:
raise ValueError(f'Invalid value passed to combination type: {combination_type}')
conv2d = isinstance(target, Conv2d)
if conv2d:
conv2d_1x1 = target.weight.size()[2:4] == (1, 1)
if not conv2d_1x1:
delta_weight = delta_weight.flatten(start_dim=1)
else:
delta_weight = delta_weight.squeeze()
if hasattr(target, 'fan_in_fan_out') and target.fan_in_fan_out or is_embedding:
delta_weight = delta_weight.T
(U, S, Vh) = torch.linalg.svd(delta_weight, full_matrices=full_matrices, driver=driver)
U = U[:, :new_rank]
S = S[:new_rank]
U = U @ torch.diag(S)
Vh = Vh[:new_rank, :]
if clamp is not None:
dist = torch.cat([U.flatten(), Vh.flatten()])
hi_val = torch.quantile(dist, clamp)
low_val = -hi_val
U = U.clamp(low_val, hi_val)
Vh = Vh.clamp(low_val, hi_val)
if conv2d:
U = U.reshape(target_lora_B.data.shape)
Vh = Vh.reshape(target_lora_A.data.shape)
return (Vh, U)
def _generalized_task_arithmetic_weighted_adapter(self, combination_type, adapters, weights, target, density, majority_sign_method):
valid_weights = []
lora_A_deltas = []
lora_B_deltas = []
for (adapter, weight) in zip(adapters, weights):
if adapter in target.lora_A:
current_adapter_lora_A = target.lora_A[adapter].weight
current_adapter_lora_B = target.lora_B[adapter].weight
elif adapter in target.lora_embedding_A:
current_adapter_lora_A = target.lora_embedding_A[adapter]
current_adapter_lora_B = target.lora_embedding_B[adapter]
else:
continue
valid_weights.append(math.sqrt(weight * target.scaling[adapter]))
lora_A_deltas.append(current_adapter_lora_A.data)
lora_B_deltas.append(current_adapter_lora_B.data)
valid_weights = torch.tensor(valid_weights).to(lora_A_deltas[0].device)
lora_deltas = [lora_A_deltas, lora_B_deltas]
dtype = lora_A_deltas[0].dtype
for (i, task_tensors) in enumerate(lora_deltas):
if combination_type == 'linear':
lora_deltas[i] = task_arithmetic(task_tensors, valid_weights)
elif combination_type == 'ties':
lora_deltas[i] = ties(task_tensors, valid_weights, density, majority_sign_method)
elif combination_type == 'dare_linear':
lora_deltas[i] = dare_linear(task_tensors, valid_weights, density)
elif combination_type == 'dare_ties':
lora_deltas[i] = dare_ties(task_tensors, valid_weights, density, majority_sign_method)
elif combination_type == 'magnitude_prune':
lora_deltas[i] = magnitude_prune(task_tensors, valid_weights, density)
else:
raise ValueError('Invalid combination type')
lora_deltas = [delta.to(dtype) for delta in lora_deltas]
return lora_deltas
def delete_adapter(self, adapter_name: str) -> None:
if adapter_name not in list(self.peft_config.keys()):
raise ValueError(f'Adapter {adapter_name} does not exist')
del self.peft_config[adapter_name]
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
new_adapter = None
for key in key_list:
(_, target, _) = _get_submodules(self.model, key)
if isinstance(target, LoraLayer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapters[:]
self.active_adapter = new_adapter or []
def merge_and_unload(self, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> torch.nn.Module:
return self._unload_and_optionally_merge(progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names)
def unload(self) -> torch.nn.Module:
return self._unload_and_optionally_merge(merge=False)
def subtract_mutated_init(self, output_state_dict: dict[str, torch.Tensor], adapter_name: str, kwargs=None):
for (name, param) in self.model.named_parameters():
if (param.data.dtype != torch.float32 and param.data.dtype != torch.float16 and (param.data.dtype != torch.bfloat16)) and adapter_name.startswith('pissa'):
warnings.warn('Note that Quant(W_res) + AB != Quant(W) + \\Delta(AB); the converted LoRA, when combined with W or Quant(W), may introduce a certain gap in the fine-tuned model. Therefore, we recommend directly using the Quant(W_res) in conjunction with the PiSSA adapter. ')
mutated_init_state_dict = get_peft_model_state_dict(self, state_dict=kwargs.get('state_dict', None), adapter_name=adapter_name)
tensors_lora = {}
for name in output_state_dict.keys():
if 'lora_A' in name:
tensors_lora[name] = torch.cat([output_state_dict[name], mutated_init_state_dict['.'.join(name.split('.')[1:])]], dim=0)
elif 'lora_B' in name:
tensors_lora[name] = torch.cat([output_state_dict[name], -mutated_init_state_dict['.'.join(name.split('.')[1:])]], dim=1)
return tensors_lora
# File: peft-main/src/peft/tuners/lora/tp_layer.py
from __future__ import annotations
import importlib
import math
import warnings
from typing import Any, Optional, Union
import torch
import torch.nn as nn
import torch.nn.init as init
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils import transpose
from peft.utils.integrations import gather_params_ctx
from .layer import LoraLayer
class LoraParallelLinear(nn.Module, LoraLayer):
def __init__(self, base_layer, adapter_name: str, backend, r: int=0, lora_alpha: int=1, lora_dropout: float=0.0, fan_in_fan_out: bool=False, is_target_conv_1d_layer: bool=False, init_lora_weights: Union[bool, str]=True, use_rslora: bool=False, use_dora: bool=False, **kwargs):
super().__init__()
LoraLayer.__init__(self, base_layer=base_layer, **kwargs)
if use_dora:
raise ValueError(f'{self.__class__.__name__} does not support DoRA yet, please set it to False')
self.backend = backend
self.is_parallel_a = isinstance(base_layer, backend.RowParallelLinear)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
megatron_config = kwargs['megatron_config']
parallel_linear_kwargs = {'megatron_config': megatron_config}
init_method = init.xavier_normal_
if hasattr(megatron_config, 'init_method'):
init_method = megatron_config.init_method
input_is_parallel = True
gather_output = False
if isinstance(base_layer, self.backend.RowParallelLinear):
input_is_parallel = base_layer.input_is_parallel
else:
gather_output = base_layer.gather_output
self.update_layer(adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, init_method=init_method, input_is_parallel=input_is_parallel, gather_output=gather_output, **parallel_linear_kwargs)
if is_target_conv_1d_layer:
raise ValueError(f'{self.__class__.__name__} does not support target_conv_1d_layer yet, please set it to False')
self.is_target_conv_1d_layer = False
def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora=False, init_method=init.xavier_normal_, input_is_parallel=True, gather_output=False, **parallel_linear_kwargs):
if r <= 0:
raise ValueError(f'`r` should be a positive integer value but the value passed is {r}')
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
lora_dropout_layer = nn.Identity()
self.lora_dropout[adapter_name] = lora_dropout_layer
megatron_config = parallel_linear_kwargs['megatron_config']
megatron_config.params_dtype = torch.float32
if self.is_parallel_a:
lora_a = self.backend.RowParallelLinear(input_size=self.in_features, output_size=r, bias=False, input_is_parallel=input_is_parallel, skip_bias_add=True, init_method=init_method, config=megatron_config)
lora_b = nn.Linear(in_features=r, out_features=self.out_features, bias=False, dtype=torch.float32)
else:
lora_a = nn.Linear(in_features=self.in_features, out_features=r, bias=False, dtype=torch.float32)
lora_b = self.backend.ColumnParallelLinear(input_size=r, output_size=self.out_features, bias=False, gather_output=gather_output, init_method=init_method, config=megatron_config)
self.lora_A[adapter_name] = lora_a
self.lora_B[adapter_name] = lora_b
if use_rslora:
self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
else:
self.scaling[adapter_name] = lora_alpha / r
if isinstance(init_lora_weights, str) and init_lora_weights.startswith('pissa'):
with gather_params_ctx(self.get_base_layer().weight):
self.pissa_init(adapter_name, init_lora_weights)
elif isinstance(init_lora_weights, str) and init_lora_weights.lower() == 'olora':
with gather_params_ctx(self.get_base_layer().weight):
self.olora_init(adapter_name)
elif init_lora_weights == 'loftq':
with gather_params_ctx(self.get_base_layer().weight):
self.loftq_init(adapter_name)
elif init_lora_weights:
self.reset_lora_parameters(adapter_name, init_lora_weights)
self._move_adapter_to_device_of_base_layer(adapter_name)
if use_dora:
self.dora_init(adapter_name)
self.use_dora[adapter_name] = True
else:
self.use_dora[adapter_name] = False
self.set_adapter(self.active_adapters)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any):
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop('adapter_names', None)
if self.disable_adapters:
if self.merged:
self.unmerge()
(result, bias) = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
raise ValueError(f'{self.__class__.__name__} does not support mixed_batch_forward yet.')
elif self.merged:
(result, bias) = self.base_layer(x, *args, **kwargs)
else:
(result, bias) = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
x = x.to(lora_A.weight.dtype)
if not self.use_dora[active_adapter]:
lora_result = lora_A(dropout(x))
if isinstance(lora_result, tuple):
lora_result = lora_result[0]
lora_result = lora_B(lora_result)
if isinstance(lora_result, tuple):
lora_result = lora_result[0]
lora_result = lora_result * scaling
result = result + lora_result
else:
x = dropout(x)
result = result + self.lora_magnitude_vector[active_adapter](x, lora_A=lora_A, lora_B=lora_B, scaling=scaling, base_layer=self.get_base_layer())
result = result.to(torch_result_dtype)
return (result, bias)
def merge(self, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.lora_A.keys():
base_layer = self.get_base_layer()
if safe_merge:
orig_weights = base_layer.weight.data.clone()
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
orig_weights = orig_weights + delta_weight
else:
weight_norm = self.lora_magnitude_vector[active_adapter].get_weight_norm(orig_weights, transpose(delta_weight, self.fan_in_fan_out), scaling=1).detach()
self._cache_store(f'{active_adapter}-weight_norm', weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
dora_factor = transpose(dora_factor.view(-1, 1), self.fan_in_fan_out)
orig_weights = dora_factor * (orig_weights + delta_weight)
if not torch.isfinite(orig_weights).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
base_layer.weight.data = orig_weights
else:
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
base_layer.weight.data = base_layer.weight.data + delta_weight
else:
weight_norm = self.lora_magnitude_vector[active_adapter].get_weight_norm(base_layer.weight, transpose(delta_weight, self.fan_in_fan_out), scaling=1).detach()
self._cache_store(f'{active_adapter}-weight_norm', weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
dora_factor = transpose(dora_factor.view(-1, 1), self.fan_in_fan_out)
new_weight = dora_factor * (base_layer.weight.data + delta_weight)
base_layer.weight.data = new_weight
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.lora_A.keys():
weight = self.get_base_layer().weight
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
weight.data -= delta_weight
else:
weight_norm = self._cache_pop(f'{active_adapter}-weight_norm')
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
weight_orig = weight.data / dora_factor.view(-1, 1) - delta_weight
weight.data = weight_orig
def get_delta_weight(self, adapter) -> torch.Tensor:
device = self.lora_B[adapter].weight.device
dtype = self.lora_B[adapter].weight.dtype
cast_to_fp32 = device.type == 'cpu' and (dtype == torch.float16 or dtype == torch.bfloat16)
weight_A = self.lora_A[adapter].weight
weight_B = self.lora_B[adapter].weight
if cast_to_fp32:
weight_A = weight_A.float()
weight_B = weight_B.float()
output_tensor = transpose(weight_B @ weight_A, self.fan_in_fan_out) * self.scaling[adapter]
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
self.lora_A[adapter].weight.data = weight_A.to(dtype)
self.lora_B[adapter].weight.data = weight_B.to(dtype)
return output_tensor
def __repr__(self) -> str:
rep = super().__repr__()
return 'lora.' + rep
def dispatch_megatron(target: torch.nn.Module, adapter_name: str, lora_config, **kwargs: Any) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if lora_config.megatron_config:
megatron_core = importlib.import_module(lora_config.megatron_core)
else:
megatron_core = None
if megatron_core and isinstance(target_base_layer, (megatron_core.tensor_parallel.ColumnParallelLinear, megatron_core.tensor_parallel.RowParallelLinear)):
megatron_kwargs = kwargs.copy()
megatron_config = lora_config.megatron_config
if isinstance(megatron_config, dict):
transformer_config_class = megatron_core.transformer.transformer_config.TransformerConfig
megatron_config = transformer_config_class(**lora_config.megatron_config)
megatron_kwargs['megatron_config'] = megatron_config
if megatron_kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to True but the target module is `ColumnParallelLinear` or `RowParallelLinear`. Setting fan_in_fan_out to False.')
megatron_kwargs['fan_in_fan_out'] = lora_config.fan_in_fan_out = False
new_module = LoraParallelLinear(base_layer=target, adapter_name=adapter_name, backend=megatron_core.tensor_parallel, **megatron_kwargs)
return new_module
# File: peft-main/src/peft/tuners/lycoris_utils.py
from __future__ import annotations
import warnings
from abc import abstractmethod
from dataclasses import dataclass, field
from typing import Any, Optional, Union
import torch
import torch.nn as nn
from tqdm import tqdm
from peft.config import PeftConfig
from peft.utils import ModulesToSaveWrapper, _get_submodules
from .tuners_utils import BaseTuner, BaseTunerLayer, check_adapters_to_merge, check_target_module_exists
@dataclass
class LycorisConfig(PeftConfig):
rank_pattern: Optional[dict] = field(default_factory=dict, metadata={'help': 'The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}'})
alpha_pattern: Optional[dict] = field(default_factory=dict, metadata={'help': 'The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `alpha`. For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}'})
class LycorisLayer(BaseTunerLayer):
other_param_names = ('r', 'alpha', 'scaling', 'rank_dropout', 'module_dropout')
def __init__(self, base_layer: nn.Module) -> None:
self.base_layer = base_layer
self.r = {}
self.alpha = {}
self.scaling = {}
self.rank_dropout = {}
self.module_dropout = {}
self._disable_adapters = False
self.merged_adapters = []
@property
@abstractmethod
def _available_adapters(self) -> set[str]:
...
def _init_empty_weights(self, cls, *args, **kwargs) -> None:
kwargs = kwargs.copy()
final_device = kwargs.pop('device', 'cpu')
cls.__init__(self, *args, device='meta', **kwargs)
self.to_empty(device=final_device)
@abstractmethod
def create_adapter_parameters(self, adapter_name: str, r: int, **kwargs):
...
@abstractmethod
def _get_delta_activations(self, adapter_name: str, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
@abstractmethod
def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
...
def merge(self, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self._available_adapters:
base_layer = self.get_base_layer()
if safe_merge:
orig_weights = base_layer.weight.data.clone()
orig_weights += self.get_delta_weight(active_adapter)
if not torch.isfinite(orig_weights).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += self.get_delta_weight(active_adapter)
self.merged_adapters.append(active_adapter)
@abstractmethod
def reset_adapter_parameters(self, adapter_name: str):
...
def set_scale(self, adapter, scale):
if adapter not in self._available_adapters:
return
self.scaling[adapter] = scale * self.alpha[adapter] / self.r[adapter]
def scale_layer(self, scale: float) -> None:
if scale == 1:
return
for active_adapter in self.active_adapters:
if active_adapter not in self._available_adapters:
continue
self.scaling[active_adapter] *= scale
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self._available_adapters:
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def unscale_layer(self, scale=None) -> None:
for active_adapter in self.active_adapters:
if active_adapter not in self._available_adapters:
continue
if scale is None:
self.scaling[active_adapter] = self.alpha[active_adapter] / self.r[active_adapter]
else:
self.scaling[active_adapter] /= scale
@abstractmethod
def update_layer(self, adapter_name: str, r: int, alpha: float, **kwargs):
...
class LycorisTuner(BaseTuner):
prefix: str
layers_mapping: dict[type[torch.nn.Module], type[LycorisLayer]]
def __init__(self, model, config, adapter_name):
super().__init__(model, config, adapter_name)
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'model':
raise
return getattr(self.model, name)
@staticmethod
def _check_target_module_exists(config, key):
return check_target_module_exists(config, key)
@abstractmethod
def _create_and_replace(self, config: LycorisConfig, adapter_name: str, target: Union[LycorisLayer, nn.Module], target_name, parent, current_key):
...
@classmethod
def _create_new_module(cls, config: LycorisConfig, adapter_name: str, target: nn.Module, **kwargs) -> LycorisLayer:
new_module_cls = None
for (subtype, target_cls) in cls.layers_mapping.items():
if hasattr(target, 'base_layer') and isinstance(target.get_base_layer(), subtype) and isinstance(target, BaseTunerLayer):
new_module_cls = target_cls
break
elif isinstance(target, subtype):
new_module_cls = target_cls
break
if new_module_cls is None:
supported_modules = ', '.join((layer.__name__ for layer in cls.layers_mapping.keys()))
raise ValueError(f'Target module of type {type(target)} not supported, currently only adapters for {supported_modules} are supported')
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Conv2d):
new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs)
elif isinstance(target_base_layer, torch.nn.Linear):
new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs)
else:
supported_modules = ', '.join((layer.__name__ for layer in cls.layers_mapping.keys()))
raise ValueError(f'Target module of type {type(target)} not supported, currently only adapters for {supported_modules} are supported')
return new_module
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for (n, p) in model.named_parameters():
if self.prefix not in n:
p.requires_grad = False
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
raise ValueError('Please specify `target_modules` in `peft_config`')
return peft_config
def _replace_module(self, parent, child_name, new_module, child):
setattr(parent, child_name, new_module)
if not hasattr(new_module, 'base_layer'):
new_module.weight = child.weight
if hasattr(child, 'bias'):
new_module.bias = child.bias
if getattr(child, 'state', None) is not None:
if hasattr(new_module, 'base_layer'):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
for (name, module) in new_module.named_modules():
if self.prefix in name:
module.to(child.weight.device)
def _set_adapter_layers(self, enabled=True):
for module in self.model.modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def _unload_and_optionally_merge(self, merge: bool=True, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None):
if merge:
if getattr(self.model, 'quantization_method', None) == 'gptq':
raise ValueError('Cannot merge LOHA layers when the model is gptq quantized')
self._unloading_checks(adapter_names)
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
desc = 'Unloading ' + ('and merging ' if merge else '') + 'model'
for key in tqdm(key_list, disable=not progressbar, desc=desc):
try:
(parent, target, target_name) = _get_submodules(self.model, key)
except AttributeError:
continue
if hasattr(target, 'base_layer'):
if merge:
target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
new_module = target.modules_to_save[target.active_adapter]
if hasattr(new_module, 'base_layer'):
if merge:
new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
new_module = new_module.get_base_layer()
setattr(parent, target_name, new_module)
return self.model
def enable_adapter_layers(self) -> None:
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self) -> None:
self._set_adapter_layers(enabled=False)
def merge_and_unload(self, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> torch.nn.Module:
return self._unload_and_optionally_merge(progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names)
def unload(self) -> torch.nn.Module:
return self._unload_and_optionally_merge(merge=False)
def set_adapter(self, adapter_name: str | list[str]) -> None:
for module in self.model.modules():
if isinstance(module, LycorisLayer):
if module.merged:
warnings.warn('Adapter cannot be set when the model is merged. Unmerging the model first.')
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
def delete_adapter(self, adapter_name: str) -> None:
if adapter_name not in list(self.peft_config.keys()):
raise ValueError(f'Adapter {adapter_name} does not exist')
del self.peft_config[adapter_name]
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
new_adapter = None
for key in key_list:
(_, target, _) = _get_submodules(self.model, key)
if isinstance(target, LycorisLayer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapters[:]
self.active_adapter = new_adapter or []
# File: peft-main/src/peft/tuners/mixed/model.py
from __future__ import annotations
import warnings
from typing import Any, Optional, Union
from torch import nn
from tqdm import tqdm
from peft.tuners import adalora, loha, lokr, lora, oft
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
from peft.utils import TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, PeftType, _get_submodules, get_auto_gptq_quant_linear
COMPATIBLE_TUNER_TYPES = (PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.ADALORA, PeftType.OFT)
PREFIXES = [lora.LoraModel.prefix, lokr.LoKrModel.prefix, loha.LoHaModel.prefix, oft.OFTModel.prefix]
Configs = Union[lora.LoraConfig, loha.LoHaConfig, lokr.LoKrConfig, adalora.AdaLoraConfig, oft.OFTConfig]
Layers = (lora.layer.LoraLayer, loha.layer.LoHaLayer, lokr.layer.LoKrLayer, adalora.layer.AdaLoraLayer, oft.OFTLayer)
class MixedModel(BaseTuner):
def __init__(self, model: nn.Module, config: Configs, adapter_name: str) -> None:
super().__init__(model, config, adapter_name)
def _check_new_adapter_config(self, config: Configs) -> None:
if not isinstance(config, Configs.__args__):
raise ValueError(f'{self.__class__.__name__} only supports {COMPATIBLE_TUNER_TYPES} configs, but got {type(config)}.')
biases = (getattr(config, 'bias', None) for config in self.peft_config)
biases = [bias for bias in biases if bias not in (None, 'none')]
if len(biases) > 1:
raise ValueError(f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.")
@staticmethod
def _check_target_module_exists(config: Configs, key: str):
return check_target_module_exists(config, key)
def _create_and_replace(self, config: Configs, *args: Any, **kwargs: Any) -> None:
if isinstance(config, adalora.AdaLoraConfig):
adalora.AdaLoraModel._create_and_replace(self, config, *args, **kwargs)
elif isinstance(config, lora.LoraConfig):
lora.LoraModel._create_and_replace(self, config, *args, **kwargs)
elif isinstance(config, loha.LoHaConfig):
loha.LoHaModel._create_and_replace(self, config, *args, **kwargs)
elif isinstance(config, lokr.LoKrConfig):
lokr.LoKrModel._create_and_replace(self, config, *args, **kwargs)
elif isinstance(config, oft.OFTConfig):
oft.OFTModel._create_and_replace(self, config, *args, **kwargs)
else:
raise ValueError(f'Unsupported config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.')
def _replace_module(self, parent, child_name, new_module, child) -> None:
setattr(parent, child_name, new_module)
if hasattr(child, 'base_layer'):
child = child.get_base_layer()
elif hasattr(child, 'quant_linear_module'):
child = child.quant_linear_module
if not hasattr(new_module, 'base_layer'):
new_module.weight = child.weight
if hasattr(child, 'bias'):
new_module.bias = child.bias
if getattr(child, 'state', None) is not None:
if hasattr(new_module, 'base_layer'):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
for (name, module) in new_module.named_modules():
if any((prefix in name for prefix in PREFIXES)):
module.to(child.weight.device)
if 'ranknum' in name:
module.to(child.weight.device)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for (n, p) in model.named_parameters():
if not any((prefix in n for prefix in PREFIXES)):
p.requires_grad = False
for active_adapter in self.active_adapters:
bias = getattr(self.peft_config[active_adapter], 'bias', 'none')
if bias == 'none':
continue
if bias == 'all':
for (n, p) in model.named_parameters():
if 'bias' in n:
p.requires_grad = True
elif bias == 'lora_only':
for m in model.modules():
if isinstance(m, Layers) and hasattr(m, 'bias') and (m.bias is not None):
m.bias.requires_grad = True
else:
raise ValueError(f'Requested bias: {bias}, is not implemented.')
@staticmethod
def _create_new_module(config, adapter_name, target, **kwargs):
gptq_quantization_config = kwargs.get('gptq_quantization_config', None)
AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
if gptq_quantization_config is not None or AutoGPTQQuantLinear is not None:
raise ValueError(f'GPTQ quantization not supported for {config.peft_type.value} (yet).')
loaded_in_8bit = kwargs.pop('loaded_in_8bit', False)
loaded_in_4bit = kwargs.pop('loaded_in_4bit', False)
if loaded_in_8bit or loaded_in_4bit:
raise ValueError(f'8bit and 4bit quantization not supported for {config.peft_type.value} (yet).')
if isinstance(config, adalora.AdaLoraConfig):
new_module = adalora.AdaLoraModel._create_new_module(config, adapter_name, target, **kwargs)
elif isinstance(config, lora.LoraConfig):
new_module = lora.LoraModel._create_new_module(config, adapter_name, target, **kwargs)
elif isinstance(config, loha.LoHaConfig):
new_module = loha.LoHaModel._create_new_module(config, adapter_name, target, **kwargs)
elif isinstance(config, lokr.LoKrConfig):
new_module = lokr.LoKrModel._create_new_module(config, adapter_name, target, **kwargs)
elif isinstance(config, oft.OFTConfig):
new_module = oft.OFTModel._create_new_module(config, adapter_name, target, **kwargs)
else:
raise ValueError(f'Unknown config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.')
return new_module
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'model':
raise
return getattr(self.model, name)
def _set_adapter_layers(self, enabled=True):
for module in self.model.modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self):
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self):
for active_adapter in self.active_adapters:
val = getattr(self.peft_config[active_adapter], 'bias', 'none')
if val != 'none':
msg = f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same output as the the base model would without adaption."
warnings.warn(msg)
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name: Union[str, list[str]]) -> None:
for module in self.model.modules():
if isinstance(module, Layers):
if module.merged:
warnings.warn('Adapter cannot be set when the model is merged. Unmerging the model first.')
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config['model_type'] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:
raise ValueError('Please specify `target_modules` in `peft_config`')
peft_config.target_modules = set(TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config['model_type']])
return peft_config
def _unload_and_optionally_merge(self, merge=True, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None):
if merge:
if getattr(self.model, 'quantization_method', None) == 'gptq':
raise ValueError('Cannot merge layers when the model is gptq quantized')
def merge_recursively(module):
path = []
layer = module
while hasattr(layer, 'base_layer'):
path.append(layer)
layer = layer.base_layer
for (layer_before, layer_after) in zip(path[:-1], path[1:]):
layer_after.merge(safe_merge=safe_merge, adapter_names=adapter_names)
layer_before.base_layer = layer_after.base_layer
module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
key_list = [key for (key, _) in self.model.named_modules() if not any((prefix in key for prefix in PREFIXES))]
desc = 'Unloading ' + ('and merging ' if merge else '') + 'model'
for key in tqdm(key_list, disable=not progressbar, desc=desc):
try:
(parent, target, target_name) = _get_submodules(self.model, key)
except AttributeError:
continue
if hasattr(target, 'base_layer'):
if merge:
merge_recursively(target)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
new_module = target.modules_to_save[target.active_adapter]
if hasattr(new_module, 'base_layer'):
if merge:
new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
new_module = new_module.get_base_layer()
setattr(parent, target_name, new_module)
return self.model
def add_weighted_adapter(self, *args: Any, **kwargs: Any) -> None:
raise NotImplementedError(f'Weighted adapters are not supported for {self.__class__.__name__} (yet).')
def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None:
if isinstance(adapter_name, str):
adapter_names = [adapter_name]
else:
adapter_names = adapter_name
mismatched = set(adapter_names) - set(self.peft_config.keys())
if mismatched:
raise ValueError(f'Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}')
for adapter_name in adapter_names:
del self.peft_config[adapter_name]
key_list = [key for (key, _) in self.model.named_modules() if not any((prefix in key for prefix in PREFIXES))]
new_adapter = None
for key in key_list:
(_, target, _) = _get_submodules(self.model, key)
if isinstance(target, BaseTunerLayer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapters[:]
self.active_adapter = new_adapter or []
def merge_and_unload(self, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> nn.Module:
return self._unload_and_optionally_merge(progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names)
def unload(self) -> nn.Module:
return self._unload_and_optionally_merge(merge=False)
def generate(self, *args: Any, **kwargs: Any):
return self.model.generate(*args, **kwargs)
# File: peft-main/src/peft/tuners/multitask_prompt_tuning/config.py
import enum
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.tuners.prompt_tuning import PromptTuningConfig
from peft.utils import PeftType
class MultitaskPromptTuningInit(str, enum.Enum):
TEXT = 'TEXT'
RANDOM = 'RANDOM'
AVERAGE_SOURCE_TASKS = 'AVERAGE_SOURCE_TASKS'
EXACT_SOURCE_TASK = 'EXACT_SOURCE_TASK'
ONLY_SOURCE_SHARED = 'ONLY_SOURCE_SHARED'
@dataclass
class MultitaskPromptTuningConfig(PromptTuningConfig):
prompt_tuning_init: Union[MultitaskPromptTuningInit, str] = field(default=MultitaskPromptTuningInit.RANDOM, metadata={'help': 'How to initialize the prompt tuning parameters. Can be one of TEXT, RANDOM, AVERAGE_SOURCE_TASKS, EXACT_SOURCE_TASK, ONLY_SOURCE_SHARED.'})
prompt_tuning_init_state_dict_path: Optional[str] = field(default=None, metadata={'help': 'The path of source state dict. This is required when training the downstream target prompt from the pretrained source prompt'})
prompt_tuning_init_task: Optional[int] = field(default=0, metadata={'help': 'source task id for initialization'})
num_ranks: Optional[int] = field(default=1, metadata={'help': 'ranks'})
num_tasks: Optional[int] = field(default=1, metadata={'help': 'number of tasks'})
def __post_init__(self):
self.peft_type = PeftType.MULTITASK_PROMPT_TUNING
# File: peft-main/src/peft/tuners/multitask_prompt_tuning/model.py
import torch
from peft.tuners.prompt_tuning import PromptEmbedding
from peft.utils import TaskType
from peft.utils.save_and_load import torch_load
from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit
class MultitaskPromptEmbedding(PromptEmbedding):
def __init__(self, config: MultitaskPromptTuningConfig, word_embeddings):
super().__init__(config, word_embeddings)
self.num_tasks = config.num_tasks
self.num_ranks = config.num_ranks
self.num_virtual_tokens = config.num_virtual_tokens
self.num_transformer_submodules = config.num_transformer_submodules
if self.num_transformer_submodules is None:
self.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1
self.token_dim = config.token_dim
total_virtual_tokens = self.num_virtual_tokens * self.num_transformer_submodules
self.prefix_task_cols = torch.nn.Parameter(torch.normal(mean=0, std=0.02, size=(self.num_tasks, total_virtual_tokens, self.num_ranks)))
self.prefix_task_rows = torch.nn.Parameter(torch.normal(mean=0, std=0.02, size=(self.num_tasks, self.num_ranks, self.token_dim)))
if config.prompt_tuning_init in [MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS, MultitaskPromptTuningInit.EXACT_SOURCE_TASK, MultitaskPromptTuningInit.ONLY_SOURCE_SHARED]:
if config.prompt_tuning_init_state_dict_path is None:
raise ValueError(f'prompt_tuning_init_state_dict_path needs to be specified with {config.prompt_tuning_init} init method')
if config.prompt_tuning_init_state_dict_path.endswith('.safetensors'):
from safetensors.torch import load_file
state_dict: dict = load_file(config.prompt_tuning_init_state_dict_path)
else:
state_dict: dict = torch_load(config.prompt_tuning_init_state_dict_path, map_location=word_embeddings.weight.device)
if config.prompt_tuning_init in [MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS, MultitaskPromptTuningInit.EXACT_SOURCE_TASK]:
prefix_task_cols_: torch.Tensor = state_dict['prefix_task_cols']
prefix_task_rows_: torch.Tensor = state_dict['prefix_task_rows']
if config.prompt_tuning_init == MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS:
prefix_task_cols_ = prefix_task_cols_.mean(0, keepdim=True)
prefix_task_rows_ = prefix_task_rows_.mean(0, keepdim=True)
elif config.prompt_tuning_init == MultitaskPromptTuningInit.EXACT_SOURCE_TASK:
prefix_task_cols_ = prefix_task_cols_[config.prompt_tuning_init_task, ...].unsqueeze(0)
prefix_task_rows_ = prefix_task_rows_[config.prompt_tuning_init_task, ...].unsqueeze(0)
state_dict = {'embedding.weight': state_dict['prompt_embeddings'], 'prefix_task_cols': prefix_task_cols_, 'prefix_task_rows': prefix_task_rows_}
self.load_state_dict(state_dict, strict=True)
elif config.prompt_tuning_init == MultitaskPromptTuningInit.ONLY_SOURCE_SHARED:
state_dict = {'embedding.weight': state_dict['prompt_embeddings']}
self.load_state_dict(state_dict, strict=False)
def forward(self, indices, task_ids):
if task_ids is None:
raise ValueError('task_ids cannot be None')
prompt_embeddings = self.embedding(indices)
task_cols = torch.index_select(self.prefix_task_cols, 0, task_ids)
task_rows = torch.index_select(self.prefix_task_rows, 0, task_ids)
task_prompts = torch.matmul(task_cols, task_rows)
prompt_embeddings *= task_prompts
return prompt_embeddings
# File: peft-main/src/peft/tuners/oft/config.py
from dataclasses import dataclass, field
from typing import List, Optional, Union
from peft.tuners.lycoris_utils import LycorisConfig
from peft.utils import PeftType
@dataclass
class OFTConfig(LycorisConfig):
r: int = field(default=8, metadata={'help': 'OFT rank'})
module_dropout: float = field(default=0.0, metadata={'help': 'The dropout probability for disabling OFT modules during training'})
target_modules: Optional[Union[List[str], str]] = field(default=None, metadata={'help': "List of module names or regex expression of the module names to replace with OFT.For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."})
init_weights: bool = field(default=True, metadata={'help': "Whether to initialize the weights of the OFT layers with their default initialization. Don't change this setting, except if you know exactly what you're doing."})
layers_to_transform: Optional[Union[List[int], int]] = field(default=None, metadata={'help': 'The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index.'})
layers_pattern: Optional[str] = field(default=None, metadata={'help': 'The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern.'})
modules_to_save: Optional[List[str]] = field(default=None, metadata={'help': 'List of modules apart from OFT layers to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.'})
coft: bool = field(default=False, metadata={'help': 'Whether to use the constrained variant of OFT or not.'})
eps: float = field(default=6e-05, metadata={'help': 'The control strength of COFT. The freedom of rotation. Only has an effect if `coft` is set to True.'})
block_share: bool = field(default=False, metadata={'help': 'Whether to share the OFT parameters between blocks or not.'})
def __post_init__(self):
self.peft_type = PeftType.OFT
self.target_modules = set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
# File: peft-main/src/peft/tuners/oft/layer.py
import math
import warnings
from typing import Any, List, Optional, Set, Tuple
import torch
import torch.nn as nn
from peft.tuners.lycoris_utils import LycorisLayer, check_adapters_to_merge
class OFTLayer(nn.Module, LycorisLayer):
adapter_layer_names = ('oft_r',)
def __init__(self, base_layer: nn.Module):
super().__init__()
LycorisLayer.__init__(self, base_layer)
self.oft_r = nn.ParameterDict({})
self.coft = {}
self.eps = {}
self.block_share = {}
@property
def _available_adapters(self) -> Set[str]:
return {*self.oft_r}
def create_adapter_parameters(self, adapter_name: str, r: int, shape: Tuple[int, ...], block_share: bool):
if block_share:
self.oft_r[adapter_name] = nn.Parameter(torch.empty(1, math.ceil(shape[0] / r), math.ceil(shape[0] / r)))
else:
self.oft_r[adapter_name] = nn.Parameter(torch.empty(r, math.ceil(shape[0] / r), math.ceil(shape[0] / r)))
def reset_adapter_parameters(self, adapter_name: str):
nn.init.zeros_(self.oft_r[adapter_name])
def reset_adapter_parameters_random(self, adapter_name: str):
nn.init.kaiming_uniform_(self.oft_r[adapter_name], a=math.sqrt(5))
def update_layer(self, adapter_name: str, r: int, module_dropout: float, init_weights: bool, coft: bool=False, eps: float=6e-05, block_share: bool=False, **kwargs) -> None:
if r <= 0:
raise ValueError(f'`r` should be a positive integer value but the value passed is {r}')
self.r[adapter_name] = r
self.module_dropout[adapter_name] = module_dropout
self.coft[adapter_name] = coft
self.block_share[adapter_name] = block_share
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
shape = tuple(base_layer.weight.shape)
elif isinstance(base_layer, nn.Conv2d):
shape = (base_layer.out_channels, base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1])
else:
raise TypeError(f'OFT is not implemented for base layers of type {type(base_layer).__name__}')
self.eps[adapter_name] = eps * math.ceil(shape[0] / r) * math.ceil(shape[0] / r)
self.create_adapter_parameters(adapter_name, r, shape, block_share)
if init_weights:
self.reset_adapter_parameters(adapter_name)
else:
self.reset_adapter_parameters_random(adapter_name)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def unscale_layer(self, scale=None) -> None:
pass
def merge(self, safe_merge: bool=False, adapter_names: Optional[List[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self._available_adapters:
base_layer = self.get_base_layer()
orig_weights = base_layer.weight.data
if isinstance(base_layer, nn.Linear):
orig_weights = torch.transpose(orig_weights, 0, 1)
elif isinstance(base_layer, nn.Conv2d):
orig_weights = orig_weights.view([base_layer.out_channels, base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1]])
orig_weights = torch.transpose(orig_weights, 0, 1)
delta_weight = self.get_delta_weight(active_adapter)
if orig_weights.shape[1] != delta_weight.shape[1]:
delta_weight = delta_weight[:orig_weights.shape[1], :orig_weights.shape[1]]
new_weights = torch.mm(orig_weights, delta_weight)
if isinstance(base_layer, nn.Linear):
new_weights = torch.transpose(new_weights, 0, 1)
elif isinstance(base_layer, nn.Conv2d):
new_weights = torch.transpose(new_weights, 0, 1)
new_weights = new_weights.view([base_layer.out_channels, base_layer.in_channels, base_layer.kernel_size[0], base_layer.kernel_size[1]])
if safe_merge and (not torch.isfinite(new_weights).all()):
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
base_layer.weight.data = new_weights.contiguous()
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self._available_adapters:
base_layer = self.get_base_layer()
new_weights = base_layer.weight.data
if isinstance(base_layer, nn.Linear):
new_weights = torch.transpose(new_weights, 0, 1)
elif isinstance(base_layer, nn.Conv2d):
new_weights = new_weights.view([base_layer.out_channels, base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1]])
new_weights = torch.transpose(new_weights, 0, 1)
delta_weight = self.get_delta_weight(active_adapter)
if new_weights.shape[1] != delta_weight.shape[1]:
delta_weight = delta_weight[:new_weights.shape[1], :new_weights.shape[1]]
delta_inv = torch.inverse(delta_weight)
orig_weights = torch.mm(new_weights, delta_inv)
if isinstance(base_layer, nn.Linear):
orig_weights = torch.transpose(orig_weights, 0, 1)
elif isinstance(base_layer, nn.Conv2d):
orig_weights = torch.transpose(orig_weights, 0, 1)
orig_weights = orig_weights.reshape([base_layer.out_channels, base_layer.in_channels, base_layer.kernel_size[0], base_layer.kernel_size[1]])
base_layer.weight.data = orig_weights.contiguous()
def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
rank = self.r[adapter_name]
coft = self.coft[adapter_name]
eps = self.eps[adapter_name]
opt_r = self.oft_r[adapter_name]
if coft:
with torch.no_grad():
opt_r.copy_(self._project_batch(opt_r, eps=eps))
orth_rotate = self._cayley_batch(opt_r)
weight = self._block_diagonal(orth_rotate, rank)
return weight
def _cayley_batch(self, data: torch.Tensor) -> torch.Tensor:
(b, r, c) = data.shape
skew = 0.5 * (data - data.transpose(1, 2))
I = torch.eye(r, device=data.device).unsqueeze(0).expand(b, r, c)
Q = torch.bmm(I - skew, torch.inverse(I + skew))
return Q
def _block_diagonal(self, oft_r: torch.Tensor, rank: int) -> torch.Tensor:
if oft_r.shape[0] == 1:
blocks = [oft_r[0, ...] for i in range(rank)]
else:
blocks = [oft_r[i, ...] for i in range(rank)]
A = torch.block_diag(*blocks)
return A
def _project_batch(self, oft_r, eps=1e-05):
eps = eps * 1 / torch.sqrt(torch.tensor(oft_r.shape[0]))
I = torch.zeros((oft_r.size(1), oft_r.size(1)), device=oft_r.device, dtype=oft_r.dtype).unsqueeze(0).expand_as(oft_r)
diff = oft_r - I
norm_diff = torch.norm(oft_r - I, dim=(1, 2), keepdim=True)
mask = (norm_diff <= eps).bool()
out = torch.where(mask, oft_r, I + eps * (diff / norm_diff))
return out
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
if len(result.shape) == 4:
result = result.permute(0, 2, 3, 1)
base_layer = self.get_base_layer()
base_bias = base_layer.bias
if base_bias is not None:
result = result - base_bias.data
for active_adapter in self.active_adapters:
if active_adapter not in self._available_adapters:
continue
module_dropout = self.module_dropout[active_adapter]
if not self.training or (self.training and torch.rand(1) > module_dropout):
result = self._get_delta_activations(active_adapter, result, *args, **kwargs)
if base_bias is not None:
result = result + base_bias.data
if len(result.shape) == 4:
result = result.permute(0, 3, 1, 2)
result = result.to(previous_dtype)
return result
class Linear(OFTLayer):
def __init__(self, base_layer: nn.Module, adapter_name: str='default', r: int=0, module_dropout: float=0.0, init_weights: bool=True, **kwargs):
super().__init__(base_layer)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, module_dropout, init_weights, **kwargs)
def _get_delta_activations(self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
delta_weight = self.get_delta_weight(adapter_name)
base_layer = self.get_base_layer()
base_weight = base_layer.weight.data
delta_weight = delta_weight[:base_weight.shape[0], :base_weight.shape[0]]
return torch.matmul(input, delta_weight)
def __repr__(self) -> str:
rep = super().__repr__()
return 'oft.' + rep
class Conv2d(OFTLayer):
def __init__(self, base_layer: nn.Module, adapter_name: str='default', r: int=0, module_dropout: float=0.0, init_weights: bool=True, **kwargs):
super().__init__(base_layer)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, module_dropout, init_weights, **kwargs)
def _get_delta_activations(self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
delta_weight = self.get_delta_weight(adapter_name)
base_layer = self.get_base_layer()
base_weight = base_layer.weight.data
delta_weight = delta_weight[:base_weight.shape[0], :base_weight.shape[0]]
return torch.matmul(input, delta_weight)
def __repr__(self) -> str:
rep = super().__repr__()
return 'oft.' + rep
# File: peft-main/src/peft/tuners/oft/model.py
import re
from typing import Dict, Type, Union
import torch
from torch import nn
from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner
from .layer import Conv2d, Linear, OFTLayer
class OFTModel(LycorisTuner):
prefix: str = 'oft_'
layers_mapping: Dict[Type[torch.nn.Module], Type[OFTLayer]] = {torch.nn.Conv2d: Conv2d, torch.nn.Linear: Linear}
def _create_and_replace(self, config: LycorisConfig, adapter_name: str, target: Union[OFTLayer, nn.Module], target_name: str, parent: nn.Module, current_key: str) -> None:
pattern_keys = list(config.rank_pattern.keys())
target_name_key = next(filter(lambda key: re.match(f'(.*\\.)?{key}$', current_key), pattern_keys), target_name)
kwargs = config.to_dict()
kwargs['r'] = config.rank_pattern.get(target_name_key, config.r)
if isinstance(target, OFTLayer):
target.update_layer(adapter_name, **kwargs)
else:
new_module = self._create_new_module(config, adapter_name, target, **kwargs)
self._replace_module(parent, target_name, new_module, target)
# File: peft-main/src/peft/tuners/p_tuning/config.py
import enum
from dataclasses import dataclass, field
from typing import Union
from peft.config import PromptLearningConfig
from peft.utils import PeftType
class PromptEncoderReparameterizationType(str, enum.Enum):
MLP = 'MLP'
LSTM = 'LSTM'
@dataclass
class PromptEncoderConfig(PromptLearningConfig):
encoder_reparameterization_type: Union[str, PromptEncoderReparameterizationType] = field(default=PromptEncoderReparameterizationType.MLP, metadata={'help': 'How to reparameterize the prompt encoder'})
encoder_hidden_size: int = field(default=None, metadata={'help': 'The hidden size of the prompt encoder'})
encoder_num_layers: int = field(default=2, metadata={'help': 'The number of layers of the prompt encoder'})
encoder_dropout: float = field(default=0.0, metadata={'help': 'The dropout of the prompt encoder'})
def __post_init__(self):
self.peft_type = PeftType.P_TUNING
# File: peft-main/src/peft/tuners/p_tuning/model.py
import warnings
import torch
from .config import PromptEncoderConfig, PromptEncoderReparameterizationType
class PromptEncoder(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.token_dim = config.token_dim
self.input_size = self.token_dim
self.output_size = self.token_dim
self.hidden_size = config.encoder_hidden_size
self.total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules
self.encoder_type = config.encoder_reparameterization_type
self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim)
if not config.inference_mode:
if self.encoder_type == PromptEncoderReparameterizationType.LSTM:
lstm_dropout = config.encoder_dropout
num_layers = config.encoder_num_layers
self.lstm_head = torch.nn.LSTM(input_size=self.input_size, hidden_size=self.hidden_size, num_layers=num_layers, dropout=lstm_dropout, bidirectional=True, batch_first=True)
self.mlp_head = torch.nn.Sequential(torch.nn.Linear(self.hidden_size * 2, self.hidden_size * 2), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size * 2, self.output_size))
elif self.encoder_type == PromptEncoderReparameterizationType.MLP:
encoder_num_layers_default = PromptEncoderConfig.encoder_num_layers
if config.encoder_num_layers != encoder_num_layers_default:
warnings.warn(f'for {self.encoder_type.value}, the argument `encoder_num_layers` is ignored. Exactly {encoder_num_layers_default} MLP layers are used.')
layers = [torch.nn.Linear(self.input_size, self.hidden_size), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size, self.hidden_size), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size, self.output_size)]
self.mlp_head = torch.nn.Sequential(*layers)
else:
raise ValueError('Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.')
def forward(self, indices):
input_embeds = self.embedding(indices)
if self.encoder_type == PromptEncoderReparameterizationType.LSTM:
output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0])
elif self.encoder_type == PromptEncoderReparameterizationType.MLP:
output_embeds = self.mlp_head(input_embeds)
else:
raise ValueError('Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.')
return output_embeds
# File: peft-main/src/peft/tuners/poly/config.py
from dataclasses import dataclass, field
from typing import List, Literal, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class PolyConfig(PeftConfig):
r: int = field(default=8, metadata={'help': 'Lora attention dimension'})
target_modules: Optional[Union[List[str], str]] = field(default=None, metadata={'help': "List of module names or regex expression of the module names to replace with Poly.For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "})
modules_to_save: Optional[List[str]] = field(default=None, metadata={'help': 'List of modules apart from Poly layers to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.'})
init_weights: bool = field(default=True, metadata={'help': "Whether to initialize the weights of the Poly layers with their default initialization. Don't change this setting, except if you know exactly what you're doing."})
poly_type: Literal['poly'] = field(default='poly', metadata={'help': 'Type of Poly modules to be used. Currently only "poly" is supported.'})
n_tasks: int = field(default=1, metadata={'help': 'Number of tasks in multitasking scenario.'})
n_skills: int = field(default=4, metadata={'help': 'Number of skills (LoRA) in each Poly layer.'})
n_splits: int = field(default=1, metadata={'help': 'Number of splits within each LoRA of a Poly layer.'})
def __post_init__(self):
self.peft_type = PeftType.POLY
self.target_modules = set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
# File: peft-main/src/peft/tuners/poly/layer.py
import math
from typing import Any
import torch
import torch.nn as nn
from peft.tuners.tuners_utils import BaseTunerLayer
from .config import PolyConfig
from .router import get_router
class PolyLayer(BaseTunerLayer):
adapter_layer_names = ('poly_lora_A', 'poly_lora_B', 'poly_router')
other_param_names = ('r', 'n_tasks', 'n_skills', 'n_splits')
def __init__(self, base_layer: nn.Module, **kwargs):
self.base_layer = base_layer
self.r = {}
self.n_tasks = {}
self.n_skills = {}
self.n_splits = {}
self.poly_type = {}
self.poly_router = nn.ModuleDict()
self.poly_lora_A = nn.ParameterDict()
self.poly_lora_B = nn.ParameterDict()
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
(in_features, out_features) = (base_layer.in_features, base_layer.out_features)
else:
raise ValueError(f'Unsupported layer type {type(base_layer)}')
self.in_features = in_features
self.out_features = out_features
def update_layer(self, adapter_name, poly_config):
if poly_config.r <= 0:
raise ValueError(f'`r` should be a positive integer value but the value passed is {poly_config.r}')
self.r[adapter_name] = poly_config.r
self.n_tasks[adapter_name] = poly_config.n_tasks
self.n_skills[adapter_name] = poly_config.n_skills
self.n_splits[adapter_name] = poly_config.n_splits
self.poly_type[adapter_name] = poly_config.poly_type
self.poly_lora_A[adapter_name] = nn.Parameter(torch.empty(poly_config.n_splits, poly_config.n_skills, self.in_features // poly_config.n_splits, poly_config.r))
self.poly_lora_B[adapter_name] = nn.Parameter(torch.empty(poly_config.n_splits, poly_config.n_skills, poly_config.r, self.out_features // poly_config.n_splits))
self.poly_router[adapter_name] = get_router(poly_config)
self.reset_poly_parameters(adapter_name, init_weights=poly_config.init_weights)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_poly_parameters(self, adapter_name, init_weights):
if adapter_name in self.poly_lora_A.keys():
(n_splits, n_skills, d, r) = self.poly_lora_A[adapter_name].shape
for skill in range(n_skills):
for split in range(n_splits):
param = torch.empty((r, d))
torch.nn.init.kaiming_uniform_(param, a=math.sqrt(5))
self.poly_lora_A[adapter_name].data[split, skill, :, :] = param.T
if init_weights:
torch.nn.init.zeros_(self.poly_lora_B[adapter_name])
else:
(n_splits, n_skills, r, d) = self.poly_lora_B[adapter_name].shape
for skill in range(n_skills):
for split in range(n_splits):
param = torch.empty((d, r))
torch.nn.init.kaiming_uniform_(param, a=math.sqrt(5))
self.poly_lora_B[adapter_name].data[split, skill, :, :] = param.T
self.poly_router[adapter_name].reset()
class Linear(nn.Module, PolyLayer):
def __init__(self, base_layer, adapter_name: str, poly_config: PolyConfig, **kwargs) -> None:
super().__init__()
PolyLayer.__init__(self, base_layer, **kwargs)
self._active_adapter = adapter_name
self.update_layer(adapter_name, poly_config)
def forward(self, x: torch.Tensor, *args: Any, task_ids: torch.Tensor=None, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.poly_lora_A.keys():
continue
r = self.r[active_adapter]
poly_router = self.poly_router[active_adapter]
poly_lora_A = self.poly_lora_A[active_adapter]
poly_lora_B = self.poly_lora_B[active_adapter]
mixing_weights = poly_router(task_ids=task_ids, input_ids=x)
(bs, n_splits, n_skills) = mixing_weights.size()
A = torch.einsum('bqs,qsdr->bqdr', (mixing_weights, poly_lora_A))
B = torch.einsum('bqs,qsrd->bqrd', (mixing_weights, poly_lora_B))
A = A.reshape(bs, self.in_features, r)
B = B.transpose(1, 2).reshape(bs, r, self.out_features)
x = x.to(A.dtype)
result += x.bmm(A).bmm(B) / r
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'poly.' + rep
# File: peft-main/src/peft/tuners/poly/model.py
from contextlib import contextmanager
from dataclasses import asdict
from enum import Enum
from typing import Any
import torch
from torch import nn
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
from peft.utils import TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper
from .config import PolyConfig
from .layer import Linear, PolyLayer
class PolyModel(BaseTuner):
prefix: str = 'poly_'
def __init__(self, model, config, adapter_name) -> None:
super().__init__(model, config, adapter_name)
@staticmethod
def _check_target_module_exists(poly_config, key):
return check_target_module_exists(poly_config, key)
def _create_and_replace(self, poly_config: PolyConfig, adapter_name: str, target: nn.Module, target_name: str, parent: nn.Module, **optional_kwargs: Any):
if isinstance(target, PolyLayer):
target.update_layer(adapter_name, poly_config)
else:
new_module = self._create_new_module(poly_config, adapter_name, target)
if adapter_name not in self.active_adapters:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
def _replace_module(self, parent, child_name, new_module, child):
setattr(parent, child_name, new_module)
if hasattr(child, 'base_layer'):
child = child.base_layer
if not hasattr(new_module, 'base_layer'):
new_module.weight = child.weight
if hasattr(child, 'bias'):
new_module.bias = child.bias
if getattr(child, 'state', None) is not None:
if hasattr(new_module, 'base_layer'):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
for (name, module) in new_module.named_modules():
if self.prefix in name or 'ranknum' in name:
weight = child.qweight if hasattr(child, 'qweight') else child.weight
module.to(weight.device)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for (n, p) in model.named_parameters():
if self.prefix not in n:
p.requires_grad = False
@staticmethod
def _create_new_module(poly_config, adapter_name, target, **kwargs):
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
return Linear(target, adapter_name, poly_config, **kwargs)
else:
raise ValueError(f'Target module {target} is not supported. Currently, only the following modules are supported: `torch.nn.Linear`.')
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'model':
raise
return getattr(self.model, name)
def get_peft_config_as_dict(self, inference: bool=False):
config_dict = {}
for (key, value) in self.peft_config.items():
config = {k: v.value if isinstance(v, Enum) else v for (k, v) in asdict(value).items()}
if inference:
config['inference_mode'] = True
config_dict[key] = config
return config
def _set_adapter_layers(self, enabled=True):
for module in self.model.modules():
if isinstance(module, (PolyLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self):
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self):
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name):
for module in self.model.modules():
if isinstance(module, PolyLayer):
module.set_adapter(adapter_name)
def _prepare_adapter_config(self, peft_config, model_config):
if peft_config.target_modules is None:
if model_config['model_type'] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:
raise ValueError('Please specify `target_modules` in `peft_config`')
peft_config.target_modules = set(TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config['model_type']])
return peft_config
def _register_pre_hooks(self, task_ids):
if task_ids is None:
return []
def pre_hook(_, args, kwargs):
kwargs['task_ids'] = task_ids
return (args, kwargs)
handles = []
for module in self.model.modules():
if isinstance(module, Linear):
handle = module.register_forward_pre_hook(pre_hook, with_kwargs=True)
handles.append(handle)
return handles
@contextmanager
def _manage_pre_hooks(self, task_ids):
handles = self._register_pre_hooks(task_ids)
try:
yield
finally:
for handle in handles:
handle.remove()
def forward(self, *args, task_ids=None, **kwargs):
with self._manage_pre_hooks(task_ids):
return self.model(*args, **kwargs)
def generate(self, *args, task_ids=None, **kwargs):
with self._manage_pre_hooks(task_ids):
return self.model.generate(*args, **kwargs)
# File: peft-main/src/peft/tuners/poly/router.py
from abc import ABC, abstractmethod
import torch
from torch import nn
from torch.distributions.relaxed_bernoulli import RelaxedBernoulli
from .config import PolyConfig
EPS = 1e-12
def get_router(poly_config: PolyConfig) -> nn.Module:
if poly_config.poly_type == 'poly':
return PolyRouter(poly_config)
else:
raise ValueError(f'Unsupported poly_type: {poly_config.poly_type}. Currently, only the following types are supported: `poly`.')
class Router(nn.Module, ABC):
@abstractmethod
def reset(self):
...
@abstractmethod
def forward(self, task_ids: torch.Tensor, input_ids: torch.Tensor):
...
class PolyRouter(Router):
def __init__(self, poly_config: PolyConfig):
super().__init__()
self.poly_type = poly_config.poly_type
self.n_tasks = poly_config.n_tasks
self.n_skills = poly_config.n_skills
self.n_splits = poly_config.n_splits
self.module_logits = nn.Parameter(torch.empty((self.n_tasks, self.n_splits * self.n_skills)))
def reset(self):
torch.nn.init.uniform_(self.module_logits, -0.001, 0.001)
def forward(self, task_ids: torch.Tensor, input_ids: torch.Tensor):
if task_ids is None:
raise ValueError('task_ids should not be None.')
if task_ids.max().item() >= self.n_tasks:
raise ValueError(f'Only {self.n_tasks} tasks available. Found task id = {task_ids.max().item()}')
task_ids = task_ids.to(self.module_logits.device)
module_logits = self.module_logits[task_ids]
module_logits = module_logits.view(-1, self.n_splits, self.n_skills)
if self.training:
module_logits = RelaxedBernoulli(temperature=1.0, logits=module_logits).rsample()
else:
module_logits = torch.sigmoid(module_logits)
module_weights = module_logits / (module_logits.sum(dim=-1, keepdim=True) + EPS)
return module_weights
# File: peft-main/src/peft/tuners/prefix_tuning/config.py
from dataclasses import dataclass, field
from peft.config import PromptLearningConfig
from peft.utils import PeftType
@dataclass
class PrefixTuningConfig(PromptLearningConfig):
encoder_hidden_size: int = field(default=None, metadata={'help': 'The hidden size of the encoder'})
prefix_projection: bool = field(default=False, metadata={'help': 'Whether to project the prefix tokens'})
def __post_init__(self):
self.peft_type = PeftType.PREFIX_TUNING
# File: peft-main/src/peft/tuners/prefix_tuning/model.py
import torch
class PrefixEncoder(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.prefix_projection = config.prefix_projection
token_dim = config.token_dim
num_layers = config.num_layers
encoder_hidden_size = config.encoder_hidden_size
num_virtual_tokens = config.num_virtual_tokens
if self.prefix_projection and (not config.inference_mode):
self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim)
self.transform = torch.nn.Sequential(torch.nn.Linear(token_dim, encoder_hidden_size), torch.nn.Tanh(), torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim))
else:
self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim)
def forward(self, prefix: torch.Tensor):
if self.prefix_projection:
prefix_tokens = self.embedding(prefix)
past_key_values = self.transform(prefix_tokens)
else:
past_key_values = self.embedding(prefix)
return past_key_values
# File: peft-main/src/peft/tuners/prompt_tuning/config.py
import enum
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PromptLearningConfig
from peft.utils import PeftType
class PromptTuningInit(str, enum.Enum):
TEXT = 'TEXT'
RANDOM = 'RANDOM'
@dataclass
class PromptTuningConfig(PromptLearningConfig):
prompt_tuning_init: Union[PromptTuningInit, str] = field(default=PromptTuningInit.RANDOM, metadata={'help': 'How to initialize the prompt tuning parameters'})
prompt_tuning_init_text: Optional[str] = field(default=None, metadata={'help': 'The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`'})
tokenizer_name_or_path: Optional[str] = field(default=None, metadata={'help': 'The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`'})
tokenizer_kwargs: Optional[dict] = field(default=None, metadata={'help': 'The keyword arguments to pass to `AutoTokenizer.from_pretrained`. Only used if prompt_tuning_init is `TEXT`'})
def __post_init__(self):
self.peft_type = PeftType.PROMPT_TUNING
if self.prompt_tuning_init == PromptTuningInit.TEXT and (not self.tokenizer_name_or_path):
raise ValueError(f"When prompt_tuning_init='{PromptTuningInit.TEXT.value}', tokenizer_name_or_path can't be {self.tokenizer_name_or_path}.")
if self.prompt_tuning_init == PromptTuningInit.TEXT and self.prompt_tuning_init_text is None:
raise ValueError(f"When prompt_tuning_init='{PromptTuningInit.TEXT.value}', prompt_tuning_init_text can't be {self.prompt_tuning_init_text}.")
if self.tokenizer_kwargs and self.prompt_tuning_init != PromptTuningInit.TEXT:
raise ValueError(f"tokenizer_kwargs only valid when using prompt_tuning_init='{PromptTuningInit.TEXT.value}'.")
# File: peft-main/src/peft/tuners/prompt_tuning/model.py
import math
import torch
from peft.utils.integrations import gather_params_ctx
from .config import PromptTuningInit
class PromptEmbedding(torch.nn.Module):
def __init__(self, config, word_embeddings):
super().__init__()
total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules
self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim)
if config.prompt_tuning_init == PromptTuningInit.TEXT and (not config.inference_mode):
from transformers import AutoTokenizer
tokenizer_kwargs = config.tokenizer_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path, **tokenizer_kwargs)
init_text = config.prompt_tuning_init_text
init_token_ids = tokenizer(init_text)['input_ids']
num_text_tokens = len(init_token_ids)
if num_text_tokens > total_virtual_tokens:
init_token_ids = init_token_ids[:total_virtual_tokens]
elif num_text_tokens < total_virtual_tokens:
num_reps = math.ceil(total_virtual_tokens / num_text_tokens)
init_token_ids = init_token_ids * num_reps
init_token_ids = init_token_ids[:total_virtual_tokens]
init_token_ids = torch.LongTensor(init_token_ids).to(word_embeddings.weight.device)
with gather_params_ctx(word_embeddings.parameters()):
word_embedding_weights = word_embeddings(init_token_ids).detach().clone()
word_embedding_weights = word_embedding_weights.to(torch.float32)
self.embedding.weight = torch.nn.Parameter(word_embedding_weights)
def forward(self, indices):
prompt_embeddings = self.embedding(indices)
return prompt_embeddings
# File: peft-main/src/peft/tuners/tuners_utils.py
from __future__ import annotations
import copy
import logging
import os
import re
import textwrap
import warnings
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import Any, Optional, Union
import torch
from accelerate.hooks import AlignDevicesHook
from accelerate.utils import named_module_tensors, offload_state_dict
from torch import nn
from transformers import PreTrainedModel
from transformers.pytorch_utils import Conv1D
from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND
from peft.utils.constants import DUMMY_MODEL_CONFIG, DUMMY_TARGET_MODULES, EMBEDDING_LAYER_NAMES, MIN_TARGET_MODULES_FOR_OPTIMIZATION, SEQ_CLS_HEAD_NAMES
from peft.utils.peft_types import PeftType, TaskType
from ..config import PeftConfig
from ..utils import ModulesToSaveWrapper, _get_submodules
from ._buffer_dict import BufferDict
logger = logging.getLogger(__name__)
@contextmanager
def onload_layer(layer):
offloaded_modules = []
for (name, module) in layer.named_modules():
if name in ['', 'base_layer']:
continue
if hasattr(module, '_hf_hook') and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload:
module._hf_hook.pre_forward(module)
offloaded_modules.append(module)
base_layer_offload = False
if hasattr(layer, 'base_layer') and (hasattr(layer.base_layer, '_hf_hook') and isinstance(layer.base_layer._hf_hook, AlignDevicesHook) and layer.base_layer._hf_hook.offload):
if torch.device('meta') in layer.base_layer._hf_hook.original_devices.values() and hasattr(layer.base_layer._hf_hook.weights_map, 'dataset'):
index = layer.base_layer._hf_hook.weights_map.dataset.index
module_name = list(dict(layer.base_layer._hf_hook.weights_map.dataset).keys())[0]
file_name = index[module_name]['safetensors_file']
base_name_arr = []
for i in os.path.split(file_name):
if '--' in i:
base_name_arr.append(i)
break
base_name_arr.append(i)
base_name = os.path.join(*base_name_arr)
safetensors_filename = base_name + '-merged'
layer.base_layer._hf_hook.pre_forward(layer.base_layer)
base_layer_offload = True
yield
for module in offloaded_modules:
module._hf_hook.post_forward(module, torch.tensor([]))
if base_layer_offload:
layer.base_layer._hf_hook.weights_map = {name: param.to('cpu') for (name, param) in named_module_tensors(layer.base_layer)}
if torch.device('meta') in layer.base_layer._hf_hook.original_devices.values() and hasattr(layer.base_layer._hf_hook.weights_map, 'dataset'):
offload_state_dict(safetensors_filename, layer.base_layer._hf_hook.weights_map)
layer.base_layer._hf_hook.post_forward(layer.base_layer, torch.tensor([]))
class BaseTuner(nn.Module, ABC):
def __init__(self, model, peft_config: Union[PeftConfig, dict[str, PeftConfig]], adapter_name: str) -> None:
super().__init__()
self.model = model
self.targeted_module_names: list[str] = []
if not hasattr(self, 'peft_config'):
self.peft_config = {adapter_name: peft_config} if isinstance(peft_config, PeftConfig) else peft_config
else:
logger.info('Already found a `peft_config` attribute in the model. This will lead to having multiple adapters in the model. Make sure to know what you are doing!')
if isinstance(peft_config, PeftConfig):
self.peft_config[adapter_name] = peft_config
else:
self.peft_config.update(peft_config)
self.active_adapter: str | list[str] = adapter_name
self._pre_injection_hook(self.model, self.peft_config[adapter_name], adapter_name)
if peft_config != PeftType.XLORA or peft_config[adapter_name] != PeftType.XLORA:
self.inject_adapter(self.model, adapter_name)
self.model.peft_config = self.peft_config
@property
def active_adapters(self) -> list[str]:
if isinstance(self.active_adapter, str):
return [self.active_adapter]
return self.active_adapter
def forward(self, *args: Any, **kwargs: Any):
return self.model.forward(*args, **kwargs)
def _pre_injection_hook(self, model: nn.Module, config: PeftConfig, adapter_name: str) -> None:
pass
@abstractmethod
def _prepare_adapter_config(self, peft_config: PeftConfig, model_config: dict) -> PeftConfig:
...
def _prepare_model(self, peft_config: PeftConfig, model: nn.Module):
pass
@abstractmethod
def _check_target_module_exists(peft_config: PeftConfig, key: str) -> bool:
...
@abstractmethod
def _create_and_replace(self, peft_config: PeftConfig, adapter_name: str, target: nn.Module, target_name: str, parent: nn.Module, current_key: str) -> None:
...
@abstractmethod
def _mark_only_adapters_as_trainable(self, model: nn.Module):
...
@abstractmethod
def disable_adapter_layers(self) -> None:
...
@abstractmethod
def enable_adapter_layers(self) -> None:
...
def _check_new_adapter_config(self, config: PeftConfig) -> None:
pass
def _cast_adapter_dtype(self, adapter_name: str, autocast_adapter_dtype: bool=True) -> None:
if not autocast_adapter_dtype:
return
dtypes_to_convert_to_fp32 = {torch.float16, torch.bfloat16}
for module in self.model.modules():
if not isinstance(module, BaseTunerLayer):
continue
for submodule in module.modules():
if not isinstance(submodule, (nn.ModuleDict, nn.ParameterDict, BufferDict)):
continue
if adapter_name not in submodule:
continue
if isinstance(submodule[adapter_name], nn.Parameter):
if submodule[adapter_name].dtype in dtypes_to_convert_to_fp32:
submodule[adapter_name].data = submodule[adapter_name].data.to(torch.float32)
continue
if isinstance(submodule[adapter_name], torch.Tensor):
if submodule[adapter_name].dtype in dtypes_to_convert_to_fp32:
submodule[adapter_name] = submodule[adapter_name].to(torch.float32)
continue
for param in submodule[adapter_name].parameters():
if param.dtype in dtypes_to_convert_to_fp32:
param.data = param.data.to(torch.float32)
def _check_merge_allowed(self):
example_code = textwrap.dedent('\n ```python\n from transformers import AutoModelForCausalLM\n\n # Load original tied model\n model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b-it", tie_word_embeddings=False)\n\n # Set the randomly initialized lm_head to the previously tied embeddings\n model.lm_head.weight.data = model.model.embed_tokens.weight.data.clone()\n\n # Save the untied model\n untied_model_dir = "dir/for/untied/model"\n model.save_pretrained(untied_model_dir)\n model.config.save_pretrained(untied_model_dir)\n\n # Now use the original model but in untied format\n model = AutoModelForCausalLM.from_pretrained(untied_model_dir)\n ```\n ')
tied_target_modules = self._get_tied_target_modules(self.model)
if tied_target_modules:
warnings.warn(f'Model with `tie_word_embeddings=True` and the tied_target_modules={tied_target_modules!r} are part of the adapter. This can lead to complications. You can opt to merge the adapter after cloning the weights (to untie the embeddings). You can untie the embeddings by loading the model with `tie_word_embeddings=False`. For example:' + example_code)
def inject_adapter(self, model: nn.Module, adapter_name: str, autocast_adapter_dtype: bool=True) -> None:
peft_config = self.peft_config[adapter_name]
self._check_new_adapter_config(peft_config)
_check_for_modules_to_save = getattr(peft_config, 'modules_to_save', None) is not None
_has_modules_to_save = False
model_config = self.get_model_config(model)
peft_config = self._prepare_adapter_config(peft_config, model_config)
self._prepare_model(peft_config, model)
is_target_modules_in_base_model = False
key_list = [key for (key, _) in model.named_modules()]
if getattr(peft_config, 'target_modules', None) == DUMMY_TARGET_MODULES:
key_list = []
is_target_modules_in_base_model = True
peft_config = _maybe_include_all_linear_layers(peft_config, model)
if isinstance(peft_config.target_modules, (list, set)) and len(peft_config.target_modules) >= MIN_TARGET_MODULES_FOR_OPTIMIZATION:
names_no_target = [name for name in key_list if not any((name.endswith(suffix) for suffix in peft_config.target_modules))]
new_target_modules = _find_minimal_target_modules(peft_config.target_modules, names_no_target)
if len(new_target_modules) < len(peft_config.target_modules):
peft_config.target_modules = new_target_modules
for key in key_list:
if _check_for_modules_to_save and any((key.endswith(f'{module_to_save}') for module_to_save in peft_config.modules_to_save)):
(parent, target, target_name) = _get_submodules(model, key)
if not isinstance(target, ModulesToSaveWrapper):
new_module = ModulesToSaveWrapper(target, adapter_name)
setattr(parent, target_name, new_module)
else:
target.update(adapter_name)
_has_modules_to_save = True
continue
if not self._check_target_module_exists(peft_config, key):
continue
self.targeted_module_names.append(key)
is_target_modules_in_base_model = True
(parent, target, target_name) = _get_submodules(model, key)
self._create_and_replace(peft_config, adapter_name, target, target_name, parent, current_key=key)
tied_target_modules = self._get_tied_target_modules(model=model)
if tied_target_modules:
warnings.warn(f'Model with `tie_word_embeddings=True` and the tied_target_modules={tied_target_modules!r} are part of the adapter. This can lead to complications, for example when merging the adapter or converting your model to formats other than safetensors. See for example https://github.com/huggingface/peft/issues/2018.')
if not is_target_modules_in_base_model and hasattr(peft_config, 'target_modules'):
raise ValueError(f'Target modules {peft_config.target_modules} not found in the base model. Please check the target modules and try again.')
self.set_adapter(self.active_adapters)
self._mark_only_adapters_as_trainable(model)
if self.peft_config[adapter_name].inference_mode:
for (n, p) in model.named_parameters():
if adapter_name in n:
p.requires_grad = False
if _has_modules_to_save:
if not hasattr(model, 'modules_to_save'):
model.modules_to_save = set(peft_config.modules_to_save)
else:
model.modules_to_save.update(set(peft_config.modules_to_save))
def merge_adapter(self, adapter_names: Optional[list[str]]=None) -> None:
self._check_merge_allowed()
for module in self.model.modules():
if isinstance(module, BaseTunerLayer):
with onload_layer(module):
module.merge(adapter_names=adapter_names)
def unmerge_adapter(self):
for module in self.model.modules():
if isinstance(module, BaseTunerLayer):
with onload_layer(module):
module.unmerge()
def _unloading_checks(self, adapter_names: Optional[list[str]]):
adapters_to_consider = adapter_names or self.active_adapters
is_modules_to_save_available = any((self.peft_config[adapter].modules_to_save for adapter in adapters_to_consider))
if is_modules_to_save_available and len(adapters_to_consider) > 1:
raise ValueError('Cannot unload multiple adapters that specify `modules_to_save`.')
@staticmethod
def get_model_config(model: nn.Module) -> dict:
model_config = getattr(model, 'config', DUMMY_MODEL_CONFIG)
if hasattr(model_config, 'to_dict'):
model_config = model_config.to_dict()
return model_config
def _get_tied_target_modules(self, model: nn.Module) -> list[str]:
tied_target_modules = []
model_config = self.get_model_config(model)
if model_config.get('tie_word_embeddings'):
for target_module in self.targeted_module_names:
if target_module in EMBEDDING_LAYER_NAMES:
tied_target_modules.append(target_module)
return tied_target_modules
class BaseTunerLayer(ABC):
adapter_layer_names: tuple[str, ...] = ()
other_param_names: tuple[str, ...] = ()
_disable_adapters: bool = False
_active_adapter: str | list[str] = 'default'
merged_adapters: list[str] = []
def get_base_layer(self) -> nn.Module:
base_layer = self
while hasattr(base_layer, 'base_layer'):
base_layer = base_layer.base_layer
return base_layer
@property
def weight(self) -> torch.Tensor:
base_layer = self.get_base_layer()
if hasattr(base_layer, 'qweight'):
weight = base_layer.qweight
else:
weight = base_layer.weight
return weight
@property
def bias(self) -> torch.Tensor:
base_layer = self.get_base_layer()
return base_layer.bias
def merge(self, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> None:
raise NotImplementedError
def unmerge(self) -> None:
raise NotImplementedError
@property
def merged(self) -> bool:
return bool(self.merged_adapters)
@property
def disable_adapters(self) -> bool:
return self._disable_adapters
@property
def active_adapter(self) -> str | list[str]:
return self._active_adapter
def _get_available_adapters(self) -> set[str]:
adapters = set()
for layer_name in self.adapter_layer_names:
module = getattr(self, layer_name)
if not isinstance(module, (nn.ModuleDict, nn.ParameterDict)):
continue
adapters.update(set(module.keys()))
return adapters
@property
def active_adapters(self):
if isinstance(self.active_adapter, str):
return [self.active_adapter]
return self.active_adapter
def enable_adapters(self, enabled: bool) -> None:
if enabled:
self.set_adapter(self.active_adapters)
self._disable_adapters = False
else:
for layer_name in self.adapter_layer_names:
layer = getattr(self, layer_name)
layer.requires_grad_(False)
self._disable_adapters = True
def set_adapter(self, adapter_names: str | list[str]) -> None:
if isinstance(adapter_names, str):
adapter_names = [adapter_names]
for layer_name in self.adapter_layer_names:
module_dict = getattr(self, layer_name)
for (key, layer) in module_dict.items():
if key in adapter_names:
layer.requires_grad_(True)
else:
layer.requires_grad_(False)
self._active_adapter = adapter_names
def _all_available_adapter_names(self) -> list[str]:
adapter_names = set()
for name in self.adapter_layer_names + self.other_param_names:
attr = getattr(self, name)
if hasattr(attr, 'keys'):
adapter_names.update(attr.keys())
return sorted(adapter_names)
def delete_adapter(self, adapter_name: str) -> None:
for attr in self.adapter_layer_names + self.other_param_names:
if adapter_name in getattr(self, attr):
del getattr(self, attr)[adapter_name]
if adapter_name in self.active_adapters:
active_adapters = self.active_adapters[:]
active_adapters.remove(adapter_name)
if active_adapters:
self.set_adapter(active_adapters)
else:
remaining_adapters = self._all_available_adapter_names()
if not remaining_adapters:
self.set_adapter([])
else:
new_active_adapter = remaining_adapters[0]
warnings.warn(f'Adapter {adapter_name} was active which is now deleted. Setting active adapter to {new_active_adapter}.')
self.set_adapter(remaining_adapters[0])
def _move_adapter_to_device_of_base_layer(self, adapter_name: str, device: Optional[torch.device]=None) -> None:
if device is None:
for weight_name in ('weight', 'qweight'):
weight = getattr(self.get_base_layer(), weight_name, None)
if weight is not None:
device = weight.device
dtype = weight.dtype
break
else:
return
for adapter_layer_name in self.adapter_layer_names + self.other_param_names:
adapter_layer = getattr(self, adapter_layer_name, None)
if not isinstance(adapter_layer, (nn.ModuleDict, nn.ParameterDict, BufferDict)):
continue
if adapter_name not in adapter_layer:
continue
if weight.dtype.is_floating_point or weight.dtype.is_complex:
adapter_layer[adapter_name] = adapter_layer[adapter_name].to(device, dtype=dtype)
else:
adapter_layer[adapter_name] = adapter_layer[adapter_name].to(device)
def _find_minimal_target_modules(target_modules: list[str] | set[str], other_module_names: list[str] | set[str]) -> set[str]:
if isinstance(target_modules, str) or not target_modules:
raise ValueError('target_modules should be a list or set of strings.')
target_modules = set(target_modules)
if '' in target_modules:
raise ValueError('target_modules should not contain an empty string.')
other_module_names = set(other_module_names)
if not target_modules.isdisjoint(other_module_names):
msg = 'target_modules and other_module_names contain common elements, this should not happen, please open a GitHub issue at https://github.com/huggingface/peft/issues with the code to reproduce this issue'
raise ValueError(msg)
def generate_suffixes(s):
parts = s.split('.')
return ['.'.join(parts[i:]) for i in range(len(parts))][::-1]
other_module_suffixes = {suffix for item in other_module_names for suffix in generate_suffixes(item)}
target_modules_suffix_map = {item: generate_suffixes(item) for item in target_modules}
required_suffixes = set()
for (item, suffixes) in target_modules_suffix_map.items():
for suffix in suffixes:
if suffix in required_suffixes or suffix in other_module_suffixes:
continue
if not any((item.endswith(req_suffix) for req_suffix in required_suffixes)):
required_suffixes.add(suffix)
break
if not required_suffixes:
return set(target_modules)
return required_suffixes
def check_target_module_exists(config, key: str) -> bool | re.Match[str] | None:
if isinstance(config.target_modules, str):
target_module_found = re.fullmatch(config.target_modules, key)
elif key in config.target_modules:
target_module_found = True
else:
target_module_found = any((key.endswith(f'.{target_key}') for target_key in config.target_modules))
layer_indexes = getattr(config, 'layers_to_transform', None)
layers_pattern = getattr(config, 'layers_pattern', None)
is_using_layer_indexes = layer_indexes is not None and (len(layer_indexes) != 0 if isinstance(layer_indexes, list) else True)
if is_using_layer_indexes and target_module_found:
layer_index = None
if layers_pattern is None or len(layers_pattern) == 0:
layer_index = re.match('.*\\.[^.]*\\.(\\d+)\\.', key)
else:
layers_pattern = [layers_pattern] if isinstance(layers_pattern, str) else layers_pattern
for pattern in layers_pattern:
layer_index = re.match(f'.*\\.{pattern}\\.(\\d+)\\.', key)
if layer_index is not None:
break
if layer_index is None:
target_module_found = False
else:
layer_index = int(layer_index.group(1))
if isinstance(layer_indexes, int):
target_module_found = layer_index == layer_indexes
else:
target_module_found = layer_index in layer_indexes
return target_module_found
def inspect_matched_modules(tuner: BaseTuner, adapter_name: str='default') -> dict:
config = tuner.peft_config[adapter_name]
key_list = [key for (key, _) in tuner.model.named_modules()]
module_dict = {'matched': [], 'unmatched': []}
for key in key_list:
if tuner._check_target_module_exists(config, key):
module_dict['matched'].append(key)
else:
module_dict['unmatched'].append(key)
return module_dict
def _maybe_include_all_linear_layers(peft_config: PeftConfig, model: nn.Module) -> PeftConfig:
if not hasattr(peft_config, 'target_modules'):
return peft_config
if not (isinstance(peft_config.target_modules, str) and peft_config.target_modules.lower() == INCLUDE_LINEAR_LAYERS_SHORTHAND):
return peft_config
if not isinstance(model, PreTrainedModel):
raise ValueError(f'Only instances of PreTrainedModel support `target_modules={INCLUDE_LINEAR_LAYERS_SHORTHAND!r}`')
linear_classes = (torch.nn.Linear, Conv1D)
linear_module_names = set()
for (name, module) in model.named_modules():
if isinstance(module, linear_classes):
names = name.rsplit('.', 1)[-1]
linear_module_names.add(names)
module_names_to_exclude = set()
output_emb = model.get_output_embeddings()
if output_emb is not None:
last_module_name = [name for (name, module) in model.named_modules() if module is output_emb][0]
module_names_to_exclude.add(last_module_name)
elif peft_config.task_type == TaskType.SEQ_CLS:
for name in SEQ_CLS_HEAD_NAMES:
cls_head = getattr(model, name, None)
if cls_head is not None:
last_module_name = [name for (name, module) in model.named_modules() if module is cls_head][0]
module_names_to_exclude.add(last_module_name)
break
linear_module_names -= module_names_to_exclude
peft_config.target_modules = linear_module_names
return peft_config
def check_adapters_to_merge(module: BaseTunerLayer, adapter_names: Optional[list[str]]=None) -> list[str]:
if adapter_names is None:
adapter_names = module.active_adapters
if isinstance(adapter_names, str):
raise ValueError(f'adapter_names should be a list of strings, got {adapter_names!r}.')
if module.merged:
merged_adapters = set(module.merged_adapters)
adapter_names = [name for name in adapter_names if name not in merged_adapters]
if adapter_names:
warnings.warn(f"Already following adapters were merged {','.join(module.merged_adapters)}. You are now additionally merging {','.join(adapter_names)}.")
else:
warnings.warn('All adapters are already merged, nothing to do.')
return adapter_names
def clone_module(module: nn.Module, share_weights=False):
clone = copy.deepcopy(module)
def _share_weights(src: nn.Module, dst: nn.Module):
for (name, param) in src.named_parameters(recurse=False):
dst.register_parameter(name, param)
if share_weights:
for (name, submodule) in module.named_modules():
_share_weights(submodule, clone.get_submodule(name))
return clone
def replicate_layers(model: nn.Module, layer_map: list[tuple[int, int]]):
while hasattr(model, 'model'):
model = model.model
if hasattr(model, 'bert'):
model = model.bert
model_type = None
layers: nn.ModuleList = None
if hasattr(model, 'layers'):
model_type = 'llama'
layers = model.layers
elif hasattr(model, 'encoder') and hasattr(model.encoder, 'layer'):
model_type = 'bert'
layers = model.encoder.layer
elif hasattr(model, 'h'):
model_type = 'falcon'
layers = model.h
if not model_type or not isinstance(layers, nn.ModuleList):
raise ValueError('Could not locate the layers attribute in the model. Expected Llama, Bert or Falcon compatible architectures.')
new_layers = []
for (start, end) in layer_map:
for i in range(start, end):
current_idx = len(new_layers)
new_layers.append(clone_module(layers[i], share_weights=True))
for submodule in new_layers[-1].modules():
if hasattr(submodule, 'layer_idx'):
submodule.layer_idx = current_idx
layers = nn.ModuleList(new_layers)
if model_type == 'llama':
model.layers = layers
elif model_type == 'bert':
model.encoder.layer = layers
elif model_type == 'falcon':
model.h = layers
else:
raise ValueError('Unexpected model type, need to handle post-processing of layers.')
if hasattr(model.config, 'num_hidden_layers'):
model.config.num_hidden_layers = len(new_layers)
# File: peft-main/src/peft/tuners/vblora/config.py
from dataclasses import dataclass, field
from typing import List, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class VBLoRAConfig(PeftConfig):
r: int = field(default=4, metadata={'help': 'The rank of incremental matrices.'})
num_vectors: int = field(default=256, metadata={'help': 'Number of vectors in the vector bank. Use higher values when the model size increases.'})
vector_length: int = field(default=256, metadata={'help': 'The length of the vectors in the vector bank. The length of the vectors should be divisible by the hidden dimension of the model.'})
topk: int = field(default=2, metadata={'help': 'The K value for top-K selection. A larger value of K increases the size of the saved model. In practice, setting K=2 typically provides the best performance and parameter efficiency. For more details, refer to the discussion in the paper.'})
target_modules: Optional[Union[List[str], str]] = field(default=None, metadata={'help': "List of module names or regex expression of the module names to replace with LoRA.For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'.This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer.If not specified, modules will be chosen according to the model architecture, If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually."})
save_only_topk_weights: bool = field(default=False, metadata={'help': 'Whether to only save the topk weights. Setting `save_only_topk_weights = True` significantly reduces storage space. However, models saved in this mode can be used for merging or inference only, not for resuming training.'})
vblora_dropout: float = field(default=0.0, metadata={'help': 'VBLoRA dropout'})
fan_in_fan_out: bool = field(default=False, metadata={'help': 'Set this to True if the layer to replace stores weight like (fan_in, fan_out)'})
bias: str = field(default='none', metadata={'help': "Bias type for VBLoRA. Can be 'none', 'all' or 'vblora_only'"})
modules_to_save: Optional[List[str]] = field(default=None, metadata={'help': 'List of modules apart from VBLoRA layers to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.'})
init_vector_bank_bound: float = field(default=0.02, metadata={'help': 'The vector bank is initialized with a uniform distribution between -init_vector_bank_bound and init_vector_bank_bound. Avoid initializing the vector bank with all zeros to prevent zero gradients. A small value, such as 0.02, is typically effective. Initializing with a large value may cause training instability.'})
init_logits_std: float = field(default=0.1, metadata={'help': 'The logits are initialized with a normal distribution with a standard deviation of init_logits_std. Default value 0.1 typically works well.'})
layers_to_transform: Optional[Union[List[int], int]] = field(default=None, metadata={'help': 'The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. This only works when target_modules is a list of str.'})
layers_pattern: Optional[Union[List[str], str]] = field(default=None, metadata={'help': 'The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern.This only works when target_modules is a list of str.'})
def __post_init__(self):
self.peft_type = PeftType.VBLORA
self.target_modules = set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
# File: peft-main/src/peft/tuners/vblora/layer.py
import warnings
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils.other import transpose
class VBLoRALayer(BaseTunerLayer):
adapter_layer_names = ('vblora_logits_A', 'vblora_logits_B', 'vblora_vector_bank')
def __init__(self, base_layer: nn.Module, **kwargs):
self.base_layer = base_layer
self.r = {}
self.topk = {}
self.vblora_dropout = nn.ModuleDict({})
self.vblora_logits_A = nn.ParameterDict({})
self.vblora_logits_B = nn.ParameterDict({})
self._disable_adapters = False
self.merged_adapters = []
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
(in_features, out_features) = (base_layer.in_features, base_layer.out_features)
elif isinstance(base_layer, Conv1D):
(in_features, out_features) = base_layer.weight.ds_shape if hasattr(base_layer.weight, 'ds_shape') else base_layer.weight.shape
self.in_features = in_features
self.out_features = out_features
self.kwargs = kwargs
@property
def merged(self) -> bool:
return bool(self.merged_adapters)
def update_layer(self, adapter_name: str, vblora_vector_bank, r: int, topk: int, num_vectors: int, vector_length: float, vblora_dropout: float=0.0, init_logits_std: float=0.01):
if r <= 0:
raise ValueError(f'`r` {r} should be a positive integer value')
if topk <= 0:
raise ValueError(f'`topk` {topk} should be a positive integer value')
if self.in_features % vector_length != 0:
raise ValueError(f'`in_features` {self.in_features} must be divisible by `vector_length` {vector_length}')
if self.out_features % vector_length != 0:
raise ValueError(f'`out_features` {self.out_features} must be divisible by `vector_length` {vector_length}')
self.r[adapter_name] = r
self.topk[adapter_name] = topk
if vblora_dropout > 0.0:
vblora_dropout_layer = nn.Dropout(p=vblora_dropout)
else:
vblora_dropout_layer = nn.Identity()
self.vblora_dropout.update(nn.ModuleDict({adapter_name: vblora_dropout_layer}))
self.vblora_logits_A[adapter_name] = nn.Parameter(torch.zeros(r, self.in_features // vector_length, num_vectors), requires_grad=True)
self.vblora_logits_B[adapter_name] = nn.Parameter(torch.zeros(self.out_features // vector_length, r, num_vectors), requires_grad=True)
self.vblora_vector_bank = vblora_vector_bank
self.reset_vblora_logits(adapter_name, init_logits_std)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_vblora_logits(self, adapter_name, init_logits_std):
if adapter_name in self.vblora_logits_A.keys():
with torch.no_grad():
nn.init.normal_(self.vblora_logits_A[adapter_name], 0, init_logits_std)
nn.init.normal_(self.vblora_logits_B[adapter_name], 0, init_logits_std)
class Linear(nn.Linear, VBLoRALayer):
def __init__(self, base_layer, vblora_vector_bank, adapter_name: str, r: int, num_vectors: int, vector_length: int, topk: int=2, vblora_dropout: float=0.0, init_logits_std: float=0.01, fan_in_fan_out: bool=False, is_target_conv_1d_layer: bool=False, **kwargs) -> None:
super(nn.Linear, self).__init__()
VBLoRALayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(adapter_name, vblora_vector_bank, r, topk, num_vectors, vector_length, vblora_dropout, init_logits_std)
self.is_target_conv_1d_layer = is_target_conv_1d_layer
def merge(self, safe_merge: bool=False, adapter_names: Optional[List[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.vblora_logits_A.keys():
base_layer = self.get_base_layer()
if safe_merge:
orig_weights = base_layer.weight.data.clone()
orig_weights += self.get_delta_weight(active_adapter)
if not torch.isfinite(orig_weights).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += self.get_delta_weight(active_adapter)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.vblora_logits_A.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def _get_low_rank_matrix(self, logits: torch.tensor, vblora_vector_bank, topk) -> torch.Tensor:
(top_k_logits, indices) = logits.topk(topk, dim=-1)
topk_weights = F.softmax(top_k_logits, dim=-1)
return (topk_weights.unsqueeze(-1) * vblora_vector_bank[indices]).sum(-2)
def _get_lora_matrices(self, adapter, cast_to_fp32=False) -> Tuple[torch.Tensor, torch.Tensor]:
vblora_logits_A = self.vblora_logits_A[adapter]
vblora_logits_B = self.vblora_logits_B[adapter]
if self.training and vblora_logits_A[0, 0].isinf().any():
raise RuntimeError('Found infinity values in VB-LoRA logits. Ensure training was not resumed from a `save_only_topk_weights` model.')
vblora_vector_bank = self.vblora_vector_bank[adapter].to(vblora_logits_A.device)
topk = self.topk[adapter]
if cast_to_fp32:
vblora_logits_A = vblora_logits_A.float()
vblora_logits_B = vblora_logits_B.float()
vblora_vector_bank = vblora_vector_bank.float()
A = self._get_low_rank_matrix(vblora_logits_A, vblora_vector_bank, topk).reshape(vblora_logits_A.shape[0], -1)
B = self._get_low_rank_matrix(vblora_logits_B, vblora_vector_bank, topk).transpose(1, 2).reshape(-1, vblora_logits_B.shape[1])
return (A, B)
def get_delta_weight(self, adapter) -> torch.Tensor:
device = self.vblora_logits_A[adapter].device
dtype = self.vblora_logits_A[adapter].dtype
cast_to_fp32 = device.type == 'cpu' and dtype == torch.float16
(A, B) = self._get_lora_matrices(adapter, cast_to_fp32)
output_tensor = transpose(B @ A, self.fan_in_fan_out)
return output_tensor
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.vblora_logits_A.keys():
continue
(A, B) = self._get_lora_matrices(active_adapter)
x = x.to(self.vblora_vector_bank[active_adapter].dtype)
dropout = self.vblora_dropout[active_adapter]
result = result + F.linear(F.linear(dropout(x), A), B)
result = result.to(previous_dtype)
return result
# File: peft-main/src/peft/tuners/vblora/model.py
from __future__ import annotations
import warnings
from dataclasses import asdict
from enum import Enum
from typing import Optional
import torch
import torch.nn as nn
from tqdm import tqdm
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
from peft.utils import TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _get_submodules
from .config import VBLoRAConfig
from .layer import Linear, VBLoRALayer
class VBLoRAModel(BaseTuner):
prefix: str = 'vblora_'
def __init__(self, model, config, adapter_name) -> None:
super().__init__(model, config, adapter_name)
def _init_vblora_vector_bank(self, config: VBLoRAConfig, adapter_name: str) -> None:
vblora_vector_bank = torch.zeros(config.num_vectors, config.vector_length)
torch.nn.init.uniform_(vblora_vector_bank, -config.init_vector_bank_bound, config.init_vector_bank_bound)
self.vblora_vector_bank[adapter_name] = vblora_vector_bank
def _pre_injection_hook(self, model: nn.Module, config: VBLoRAConfig, adapter_name: str) -> None:
self.vblora_vector_bank = nn.ParameterDict({})
def _check_new_adapter_config(self, config: VBLoRAConfig) -> None:
if len(self.peft_config) > 1 and config.bias != 'none':
raise ValueError(f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.")
@staticmethod
def _check_target_module_exists(vblora_config, key):
return check_target_module_exists(vblora_config, key)
def _create_and_replace(self, vblora_config, adapter_name, target, target_name, parent, current_key):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
bias = hasattr(target, 'bias') and target.bias is not None
kwargs = {'fan_in_fan_out': vblora_config.fan_in_fan_out, 'bias': bias}
self._init_vblora_vector_bank(vblora_config, adapter_name)
if isinstance(target, Linear):
target.update_layer(adapter_name=adapter_name, vblora_vector_bank=self.vblora_vector_bank, r=vblora_config.r, topk=vblora_config.topk, num_vectors=vblora_config.num_vectors, vector_length=vblora_config.vector_length, vblora_dropout=vblora_config.vblora_dropout, init_logits_std=vblora_config.init_logits_std)
else:
new_module = self._create_new_module(vblora_config=vblora_config, vblora_vector_bank=self.vblora_vector_bank, adapter_name=adapter_name, target=target, **kwargs)
if adapter_name not in self.active_adapter:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _replace_module(parent, child_name, new_module, child):
setattr(parent, child_name, new_module)
if hasattr(child, 'base_layer'):
child = child.base_layer
if not hasattr(new_module, 'base_layer'):
new_module.weight = child.weight
if hasattr(child, 'bias'):
new_module.bias = child.bias
if getattr(child, 'state', None) is not None:
if hasattr(new_module, 'base_layer'):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
for (name, module) in new_module.named_modules():
if 'vblora_' in name:
module.to(child.weight.device)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for (n, p) in model.named_parameters():
if self.prefix not in n:
p.requires_grad = False
for active_adapter in self.active_adapters:
bias = self.peft_config[active_adapter].bias
if bias == 'none':
continue
if bias == 'all':
for (n, p) in model.named_parameters():
if 'bias' in n:
p.requires_grad = True
elif bias == 'vblora_only':
for m in model.modules():
if isinstance(m, VBLoRALayer) and hasattr(m, 'bias') and (m.bias is not None):
m.bias.requires_grad = True
else:
raise NotImplementedError(f'Requested bias: {bias}, is not implemented.')
@staticmethod
def _create_new_module(vblora_config, vblora_vector_bank, adapter_name, target, **kwargs):
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
if kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. Setting fan_in_fan_out to False.')
kwargs['fan_in_fan_out'] = vblora_config.fan_in_fan_out = False
elif isinstance(target_base_layer, Conv1D):
kwargs['is_target_conv_1d_layer'] = True
if not kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True.')
kwargs['fan_in_fan_out'] = vblora_config.fan_in_fan_out = True
else:
raise ValueError(f'Target module {target} is not supported. Currently, only the following modules are supported: `torch.nn.Linear`, `transformers.pytorch_utils.Conv1D`.')
new_module = Linear(base_layer=target, vblora_vector_bank=vblora_vector_bank, adapter_name=adapter_name, r=vblora_config.r, num_vectors=vblora_config.num_vectors, vector_length=vblora_config.vector_length, topk=vblora_config.topk, vblora_dropout=vblora_config.vblora_dropout, init_logits_std=vblora_config.init_logits_std, **kwargs)
return new_module
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'model':
raise
return getattr(self.model, name)
def get_peft_config_as_dict(self, inference: bool=False):
config_dict = {}
for (key, value) in self.peft_config.items():
config = {k: v.value if isinstance(v, Enum) else v for (k, v) in asdict(value).items()}
if inference:
config['inference_mode'] = True
config_dict[key] = config
return config
def _set_adapter_layers(self, enabled: bool=True) -> None:
for module in self.model.modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self) -> None:
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self) -> None:
for active_adapter in self.active_adapters:
val = self.peft_config[active_adapter].bias
if val != 'none':
msg = f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same output as the the base model would without adaption."
warnings.warn(msg)
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name: str | list[str]) -> None:
for module in self.model.modules():
if isinstance(module, VBLoRALayer):
if module.merged:
warnings.warn('Adapter cannot be set when the model is merged. Unmerging the model first.')
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config['model_type'] not in TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING:
raise ValueError('Please specify `target_modules` in `peft_config`')
peft_config.target_modules = set(TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING[model_config['model_type']])
return peft_config
def _unload_and_optionally_merge(self, merge=True, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None):
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
desc = 'Unloading ' + ('and merging ' if merge else '') + 'model'
for key in tqdm(key_list, disable=not progressbar, desc=desc):
try:
(parent, target, target_name) = _get_submodules(self.model, key)
except AttributeError:
continue
if hasattr(target, 'base_layer'):
if merge:
target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
setattr(parent, target_name, target.modules_to_save[target.active_adapter])
return self.model
def delete_adapter(self, adapter_name: str) -> None:
if adapter_name not in list(self.peft_config.keys()):
raise ValueError(f'Adapter {adapter_name} does not exist')
del self.peft_config[adapter_name]
key_list = [key for (key, _) in self.model.named_modules() if self.prefix not in key]
new_adapter = None
for key in key_list:
(_, target, _) = _get_submodules(self.model, key)
if isinstance(target, VBLoRALayer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapter[:]
self.active_adapter = new_adapter or []
def merge_and_unload(self, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None) -> torch.nn.Module:
return self._unload_and_optionally_merge(progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names)
def unload(self):
return self._unload_and_optionally_merge(merge=False)
def get_nb_savable_parameters(self, adapter='default') -> tuple[int, int]:
logits_params = 0
vector_bank_params = 0
other_params = 0
for (name, param) in self.named_parameters():
if 'vblora_logits' in name:
logits_params += param.numel()
elif 'vblora_vector_bank' in name:
vector_bank_params += param.numel()
elif param.requires_grad:
other_params += param.numel()
if self.peft_config[adapter].save_only_topk_weights:
num_vectors = self.peft_config[adapter].num_vectors
factor = 1
if num_vectors < 2 ** 8:
factor = 0.25
elif num_vectors < 2 ** 15:
factor = 0.5
elif num_vectors < 2 ** 31:
factor = 1
else:
factor = 2
topk_weight_params = logits_params / self.peft_config[adapter].num_vectors * (self.peft_config[adapter].topk - 1)
topk_indices_params = logits_params / self.peft_config[adapter].num_vectors * self.peft_config[adapter].topk * factor
vblora_params = int(vector_bank_params + topk_weight_params + topk_indices_params)
else:
vblora_params = vector_bank_params + logits_params
return (vblora_params, other_params)
def print_savable_parameters(self) -> None:
(vblora_params, other_params) = self.get_nb_savable_parameters()
print(f'VB-LoRA params to-be-saved (float32-equivalent): {vblora_params:,d} || total params to-be-saved: {vblora_params + other_params:,d}')
# File: peft-main/src/peft/tuners/vera/config.py
import warnings
from dataclasses import dataclass, field
from typing import List, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class VeraConfig(PeftConfig):
r: int = field(default=256, metadata={'help': 'Vera attention dimension'})
target_modules: Optional[Union[List[str], str]] = field(default=None, metadata={'help': "List of module names or regex expression of the module names to replace with Vera.For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. Only linear layers are supported."})
projection_prng_key: int = field(default=0, metadata={'help': 'Vera PRNG init key. Used for initialising vera_A and vera_B for new models or when loading a checkpoint that did not include these projections.'})
save_projection: bool = field(default=True, metadata={'help': 'Whether to save the vera_A / vera_B projections in the state dict alongside per layer lambda_b / lambda_d weights. This will increase the size of the checkpoint, but guarantee that we can reload the checkpoint on all system configurations.'})
vera_dropout: float = field(default=0.0, metadata={'help': 'Vera dropout'})
d_initial: float = field(default=0.1, metadata={'help': 'Initial init value for d vector.'})
fan_in_fan_out: bool = field(default=False, metadata={'help': 'Set this to True if the layer to replace stores weight like (fan_in, fan_out)'})
bias: str = field(default='none', metadata={'help': "Bias type for Vera. Can be 'none', 'all' or 'vera_only'"})
modules_to_save: Optional[List[str]] = field(default=None, metadata={'help': 'List of modules apart from Vera layers to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.'})
init_weights: bool = field(default=True, metadata={'help': "Whether to initialize the weights of the Vera layers with their default initialization. Don't change this setting, except if you know exactly what you're doing."})
layers_to_transform: Optional[Union[List[int], int]] = field(default=None, metadata={'help': 'The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index.'})
layers_pattern: Optional[str] = field(default=None, metadata={'help': 'The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern.'})
def __post_init__(self):
self.peft_type = PeftType.VERA
self.target_modules = set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
if not self.save_projection:
warnings.warn('Specified to not save vera_A and vera_B within the state dictionary, instead they will be restored using the PRNG key store in `config.projection_prng_key`. Consider setting `config.save_projection` to `True` to guarantee restoring the checkpoint correctly on all system configurations.')
# File: peft-main/src/peft/tuners/vera/layer.py
import warnings
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils.other import transpose
from .._buffer_dict import BufferDict
class VeraLayer(BaseTunerLayer):
adapter_layer_names = ('vera_lambda_b', 'vera_lambda_d')
other_param_names = ('vera_A', 'vera_B')
def __init__(self, base_layer: nn.Module, **kwargs):
self.base_layer = base_layer
self.r = {}
self.vera_dropout = nn.ModuleDict({})
self.vera_lambda_b = nn.ParameterDict({})
self.vera_lambda_d = nn.ParameterDict({})
self.vera_A: Optional[BufferDict] = None
self.vera_B: Optional[BufferDict] = None
self._disable_adapters = False
self.merged_adapters = []
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
(in_features, out_features) = (base_layer.in_features, base_layer.out_features)
elif isinstance(base_layer, Conv1D):
(in_features, out_features) = base_layer.weight.ds_shape if hasattr(base_layer.weight, 'ds_shape') else base_layer.weight.shape
self.in_features = in_features
self.out_features = out_features
self.kwargs = kwargs
@property
def merged(self) -> bool:
return bool(self.merged_adapters)
def update_layer(self, adapter_name, vera_A: BufferDict, vera_B: BufferDict, r, vera_dropout, init_weights, d_initial: float=0.1):
if r <= 0:
raise ValueError(f'`r` should be a positive integer value but the value passed is {r}')
self.r[adapter_name] = r
if vera_dropout > 0.0:
vera_dropout_layer = nn.Dropout(p=vera_dropout)
else:
vera_dropout_layer = nn.Identity()
self.vera_dropout.update(nn.ModuleDict({adapter_name: vera_dropout_layer}))
self.vera_lambda_b[adapter_name] = nn.Parameter(torch.ones(self.out_features), requires_grad=True)
self.vera_lambda_d[adapter_name] = nn.Parameter(torch.randn(r), requires_grad=True)
self.vera_A = vera_A
self.vera_B = vera_B
if adapter_name not in vera_A:
if len(self.vera_A) < 1:
raise ValueError('The `vera_A` and `vera_B` buffers are empty. This should not happen. Please report this issue.')
vera_A_param = list(self.vera_A.values())[0]
vera_B_param = list(self.vera_B.values())[0]
error_tmpl = '{} has a size of {} but {} or greater is required; this probably happened because an additional VeRA adapter was added after the first one with incompatible shapes.'
if vera_A_param.shape[1] < self.in_features:
raise ValueError(error_tmpl.format('vera_A', vera_A_param.shape[1], self.in_features))
if vera_B_param.shape[0] < self.out_features:
raise ValueError(error_tmpl.format('vera_B', vera_B_param.shape[0], self.out_features))
error_tmpl = '{} has a size of {} but {} or greater is required; this probably happened because an additional VeRA adapter with a lower rank was added after the first one; loading the adapters in reverse order may solve this.'
if vera_A_param.shape[0] < self.r[adapter_name]:
raise ValueError(error_tmpl.format('vera_A', vera_A_param.shape[0], self.r[adapter_name]))
if vera_B_param.shape[1] < self.r[adapter_name]:
raise ValueError(error_tmpl.format('vera_B', vera_B_param.shape[1], self.r[adapter_name]))
self.vera_A[adapter_name] = vera_A_param
self.vera_B[adapter_name] = vera_B_param
if init_weights:
self.reset_vera_parameters(adapter_name, d_initial=d_initial)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_vera_parameters(self, adapter_name, d_initial: float=0.1):
if adapter_name in self.vera_lambda_d.keys():
with torch.no_grad():
nn.init.zeros_(self.vera_lambda_d[adapter_name]).fill_(d_initial)
nn.init.zeros_(self.vera_lambda_b[adapter_name])
class Linear(nn.Linear, VeraLayer):
def __init__(self, base_layer, vera_A: BufferDict, vera_B: BufferDict, adapter_name: str, r: int=0, vera_dropout: float=0.0, fan_in_fan_out: bool=False, is_target_conv_1d_layer: bool=False, init_weights: bool=True, d_initial: float=0.1, **kwargs) -> None:
super(nn.Linear, self).__init__()
VeraLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(adapter_name, vera_A, vera_B, r, vera_dropout, init_weights, d_initial=d_initial)
self.is_target_conv_1d_layer = is_target_conv_1d_layer
def merge(self, safe_merge: bool=False, adapter_names: Optional[List[str]]=None) -> None:
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.vera_lambda_d.keys():
base_layer = self.get_base_layer()
if safe_merge:
orig_weights = base_layer.weight.data.clone()
orig_weights += self.get_delta_weight(active_adapter)
if not torch.isfinite(orig_weights).all():
raise ValueError(f'NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken')
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += self.get_delta_weight(active_adapter)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn('Already unmerged. Nothing to do.')
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.vera_lambda_d.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def get_delta_weight(self, adapter) -> torch.Tensor:
vera_A = self.vera_A[adapter]
vera_B = self.vera_B[adapter]
device = vera_B.device
dtype = vera_B.dtype
cast_to_fp32 = device.type == 'cpu' and (dtype == torch.float16 or dtype == torch.bfloat16)
lambda_d = self.vera_lambda_d[adapter]
lambda_b = self.vera_lambda_b[adapter]
if cast_to_fp32:
vera_A = vera_A.float()
vera_B = vera_B.float()
lambda_d = lambda_d.float()
lambda_b = lambda_b.float()
sliced_A = vera_A[:, :self.in_features]
sliced_B = vera_B[:self.out_features, :]
lambda_b = lambda_b.unsqueeze(-1)
lambda_d = lambda_d.unsqueeze(-1)
output_tensor = transpose(lambda_b * sliced_B @ (lambda_d * sliced_A), self.fan_in_fan_out)
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
self.vera_lambda_d[adapter].data = lambda_d.to(dtype)
self.vera_lambda_b[adapter].data = lambda_b.to(dtype)
return output_tensor
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.vera_lambda_d.keys():
continue
lambda_d = self.vera_lambda_d[active_adapter]
lambda_b = self.vera_lambda_b[active_adapter]
vera_A = self.vera_A[active_adapter]
vera_B = self.vera_B[active_adapter]
sliced_A = vera_A[:, :self.in_features]
sliced_B = vera_B[:self.out_features, :]
dropout = self.vera_dropout[active_adapter]
x = x.to(lambda_d.dtype)
result = result + lambda_b * F.linear(lambda_d * F.linear(dropout(x), sliced_A), sliced_B)
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return 'vera.' + rep
# File: peft-main/src/peft/tuners/vera/model.py
from __future__ import annotations
import math
import warnings
from dataclasses import asdict
from enum import Enum
from typing import Optional, Union
import torch
import torch.nn as nn
from torch.nn.init import _calculate_correct_fan
from tqdm import tqdm
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
from peft.utils import TRANSFORMERS_MODELS_TO_VERA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _get_submodules
from .._buffer_dict import BufferDict
from ..tuners_utils import _maybe_include_all_linear_layers
from .config import VeraConfig
from .layer import Linear, VeraLayer
def _kaiming_init(tensor_or_shape: Union[torch.Tensor, tuple[int, ...]], generator: torch.Generator) -> torch.Tensor:
if isinstance(tensor_or_shape, tuple):
tensor = torch.empty(tensor_or_shape)
else:
tensor = tensor_or_shape
fan = _calculate_correct_fan(tensor, 'fan_in')
gain = math.sqrt(2)
std = gain / math.sqrt(fan)
bound = math.sqrt(3.0) * std
with torch.no_grad():
return tensor.uniform_(-bound, bound, generator=generator)
class VeraModel(BaseTuner):
prefix: str = 'vera_lambda'
def __init__(self, model, config, adapter_name) -> None:
super().__init__(model, config, adapter_name)
def _find_dim(self, config) -> tuple[int, int]:
model_config = self.get_model_config(self.model)
peft_config = self._prepare_adapter_config(config, model_config)
peft_config = _maybe_include_all_linear_layers(peft_config, self.model)
largest_shape = None
for (key, module) in self.model.named_modules():
if not self._check_target_module_exists(peft_config, key):
continue
if isinstance(module, (nn.Linear, Conv1D)):
module_shape = tuple(module.weight.shape)
if isinstance(module, Conv1D):
module_shape = module_shape[::-1]
else:
continue
if largest_shape is None:
largest_shape = module_shape
continue
if module_shape != largest_shape:
largest_shape = tuple((max(a, b) for (a, b) in zip(largest_shape, module_shape)))
if largest_shape is None:
msg = 'No layers types compatible with VeRA were found. Please check `peft_config.target_modules`.'
raise ValueError(msg)
return largest_shape
def _init_vera_A_vera_B(self, config: VeraConfig, adapter_name: str) -> None:
(linear_out_dim, linear_in_dim) = self._find_dim(config)
self.vera_A = BufferDict({}, persistent=config.save_projection)
self.vera_B = BufferDict({}, persistent=config.save_projection)
generator = torch.Generator(device='cpu').manual_seed(config.projection_prng_key)
vera_A = _kaiming_init((config.r, linear_in_dim), generator=generator)
vera_B = _kaiming_init((linear_out_dim, config.r), generator=generator)
self.vera_A[adapter_name] = vera_A
self.vera_B[adapter_name] = vera_B
def _pre_injection_hook(self, model: nn.Module, config: VeraConfig, adapter_name: str) -> None:
self._init_vera_A_vera_B(config, adapter_name)
def _check_new_adapter_config(self, config: VeraConfig) -> None:
if len(self.peft_config) > 1 and config.bias != 'none':
raise ValueError(f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.")
for existing_config in self.peft_config.values():
if existing_config is config:
continue
if existing_config.projection_prng_key != config.projection_prng_key:
raise ValueError(f'Vera PRNG initialisation key must be the same for all adapters. Got config.projection_prng_key={config.projection_prng_key!r} but previous config had {existing_config.projection_prng_key}.')
save_project_unique_values = sorted({config.save_projection for config in self.peft_config.values()})
if len(save_project_unique_values) > 1:
raise ValueError(f'VeRA projection weights must be saved for all adapters or none, but got multiple different values: {save_project_unique_values}')
@staticmethod
def _check_target_module_exists(vera_config, key):
return check_target_module_exists(vera_config, key)
def _create_and_replace(self, vera_config, adapter_name, target, target_name, parent, current_key, **optional_kwargs):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
r = vera_config.r
bias = hasattr(target, 'bias') and target.bias is not None
kwargs = {'r': r, 'vera_dropout': vera_config.vera_dropout, 'fan_in_fan_out': vera_config.fan_in_fan_out, 'init_weights': vera_config.init_weights}
kwargs['bias'] = bias
if isinstance(target, Linear):
target.update_layer(adapter_name, self.vera_A, self.vera_B, r, vera_config.vera_dropout, vera_config.init_weights, d_initial=vera_config.d_initial)
else:
new_module = self._create_new_module(vera_config, self.vera_A, self.vera_B, adapter_name, target, **kwargs)
if adapter_name not in self.active_adapter:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _replace_module(parent, child_name, new_module, child):
setattr(parent, child_name, new_module)
if hasattr(child, 'base_layer'):
child = child.base_layer
if not hasattr(new_module, 'base_layer'):
new_module.weight = child.weight
if hasattr(child, 'bias'):
new_module.bias = child.bias
if getattr(child, 'state', None) is not None:
if hasattr(new_module, 'base_layer'):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
for (name, module) in new_module.named_modules():
if 'vera_' in name:
module.to(child.weight.device)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for (n, p) in model.named_parameters():
if self.prefix not in n:
p.requires_grad = False
for active_adapter in self.active_adapters:
bias = self.peft_config[active_adapter].bias
if bias == 'none':
continue
if bias == 'all':
for (n, p) in model.named_parameters():
if 'bias' in n:
p.requires_grad = True
elif bias == 'vera_only':
for m in model.modules():
if isinstance(m, VeraLayer) and hasattr(m, 'bias') and (m.bias is not None):
m.bias.requires_grad = True
else:
raise NotImplementedError(f'Requested bias: {bias}, is not implemented.')
@staticmethod
def _create_new_module(vera_config, vera_A, vera_B, adapter_name, target, **kwargs):
bias = kwargs.pop('bias', False)
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
if kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. Setting fan_in_fan_out to False.')
kwargs['fan_in_fan_out'] = vera_config.fan_in_fan_out = False
elif isinstance(target_base_layer, Conv1D):
kwargs['is_target_conv_1d_layer'] = True
if not kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True.')
kwargs['fan_in_fan_out'] = vera_config.fan_in_fan_out = True
else:
raise ValueError(f'Target module {target} is not supported. Currently, only the following modules are supported: `torch.nn.Linear`, `transformers.pytorch_utils.Conv1D`.')
new_module = Linear(target, vera_A, vera_B, adapter_name, bias=bias, d_initial=vera_config.d_initial, **kwargs)
return new_module
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'model':
raise
return getattr(self.model, name)
def get_peft_config_as_dict(self, inference: bool=False):
config_dict = {}
for (key, value) in self.peft_config.items():
config = {k: v.value if isinstance(v, Enum) else v for (k, v) in asdict(value).items()}
if inference:
config['inference_mode'] = True
config_dict[key] = config
return config
def _set_adapter_layers(self, enabled=True):
for module in self.model.modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self):
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self):
for active_adapter in self.active_adapters:
val = self.peft_config[active_adapter].bias
if val != 'none':
msg = f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same output as the the base model would without adaption."
warnings.warn(msg)
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name):
for module in self.model.modules():
if isinstance(module, VeraLayer):
if module.merged:
warnings.warn('Adapter cannot be set when the model is merged. Unmerging the model first.')
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config['model_type'] not in TRANSFORMERS_MODELS_TO_VERA_TARGET_MODULES_MAPPING:
raise ValueError('Please specify `target_modules` in `peft_config`')
peft_config.target_modules = set(TRANSFORMERS_MODELS_TO_VERA_TARGET_MODULES_MAPPING[model_config['model_type']])
return peft_config
def _unload_and_optionally_merge(self, merge=True, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None):
key_list = [key for (key, _) in self.model.named_modules() if 'vera' not in key]
desc = 'Unloading ' + ('and merging ' if merge else '') + 'model'
for key in tqdm(key_list, disable=not progressbar, desc=desc):
try:
(parent, target, target_name) = _get_submodules(self.model, key)
except AttributeError:
continue
if hasattr(target, 'base_layer'):
if merge:
target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
setattr(parent, target_name, target.modules_to_save[target.active_adapter])
return self.model
def delete_adapter(self, adapter_name: str):
if adapter_name not in list(self.peft_config.keys()):
raise ValueError(f'Adapter {adapter_name} does not exist')
del self.peft_config[adapter_name]
key_list = [key for (key, _) in self.model.named_modules() if 'vera' not in key]
new_adapter = None
for key in key_list:
(_, target, _) = _get_submodules(self.model, key)
if isinstance(target, VeraLayer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapter[:]
self.active_adapter = new_adapter or []
def merge_and_unload(self, progressbar: bool=False, safe_merge: bool=False, adapter_names: Optional[list[str]]=None):
return self._unload_and_optionally_merge(progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names)
def unload(self):
return self._unload_and_optionally_merge(merge=False)
# File: peft-main/src/peft/tuners/xlora/classifier.py
from __future__ import annotations
import builtins
from typing import Optional, Union
import torch
import torch.nn as nn
from .config import XLoraConfig
Number = Union[builtins.int, builtins.float, builtins.bool]
class TemperatureScaledSoftmax(nn.Module):
def __init__(self, temperature=1.0):
super().__init__()
self.temperature = temperature
self.softmax = nn.Softmax(dim=-1)
def forward(self, logits):
scaled_logits = logits / self.temperature
return self.softmax(scaled_logits)
class XLoraClassifier(nn.Module):
def __init__(self, model: nn.Module, config: XLoraConfig, n_classes: int, n_layers: int, device: torch.device):
super().__init__()
self.n_classes = n_classes
self.n_layers = n_layers
self.config = config
self.log_scalings = []
self.softmax = TemperatureScaledSoftmax(temperature=self.config.softmax_temperature)
self.override_scaling_pass_value: Number = config.scaling_pass_value
self.scalings_logging = False
self.dtype = next(model.parameters()).dtype
add_dropout = config.xlora_dropout_p > 0.0
layers = []
if self.config.xlora_depth == 1:
if config.layerwise_scalings:
last = nn.Linear(config.hidden_size, n_classes * n_layers, bias=True).to(device).to(self.dtype)
else:
last = nn.Linear(config.hidden_size, n_classes, bias=True).to(device).to(self.dtype)
else:
if self.config.xlora_depth <= 0:
raise ValueError('X-LoRA depth must be strictly positive.')
layers.append(nn.Linear(config.hidden_size, config.xlora_size, bias=True).to(device).to(self.dtype))
layers.append(nn.ReLU())
if add_dropout:
layers.append(nn.Dropout(p=config.xlora_dropout_p))
for _ in range(config.xlora_depth - 2):
layers.append(nn.Linear(config.xlora_size, config.xlora_size, bias=True).to(device).to(self.dtype))
layers.append(nn.ReLU())
if add_dropout:
layers.append(nn.Dropout(p=config.xlora_dropout_p))
if config.layerwise_scalings:
last = nn.Linear(config.xlora_size, n_classes * n_layers, bias=True).to(device).to(self.dtype)
else:
last = nn.Linear(config.xlora_size, n_classes, bias=True).to(device).to(self.dtype)
self.layers = nn.Sequential(*layers, last)
def make_dummy_scalings(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, *args, **kwargs) -> torch.Tensor:
if input_ids is not None:
batch_size = input_ids.shape[0]
device = input_ids.device
seq_len = input_ids.shape[1]
else:
batch_size = inputs_embeds.shape[0]
device = inputs_embeds.device
seq_len = inputs_embeds.shape[1]
return torch.full((batch_size, seq_len, self.n_layers, self.n_classes), self.override_scaling_pass_value).to(device=device, dtype=self.dtype)
def forward(self, result, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, *args, **kwargs) -> torch.Tensor:
if input_ids is not None:
batch_size = input_ids.shape[0]
seq_len = input_ids.shape[1]
else:
batch_size = inputs_embeds.shape[0]
seq_len = inputs_embeds.shape[1]
hidden_states = result.hidden_states
hidden_state = hidden_states[-1]
logits = self.layers.forward(hidden_state)
if not self.config.layerwise_scalings:
logits = logits.unsqueeze(2)
logits = logits.expand(-1, -1, self.n_layers, -1)
scalings = logits.reshape(batch_size, seq_len, self.n_layers, self.n_classes)
if self.config.enable_softmax:
scalings = self.softmax(scalings)
if self.scalings_logging:
self.log_scalings.append(scalings)
return scalings
def _get_bucketed_scalings(self) -> dict[int, tuple[list[int], list[torch.Tensor]]]:
seqlens_map: dict[int, tuple[list[int], list[torch.Tensor]]] = {}
for (i, scaling) in enumerate(self.log_scalings):
seq_len = scaling.shape[1]
if seq_len not in seqlens_map:
seqlens_map[seq_len] = ([i], [scaling])
else:
seqlens_map[seq_len][0].append(i)
seqlens_map[seq_len][1].append(scaling)
return seqlens_map
def _set_override_scaling_pass_value(self, value: Union[Number, None]):
if value is None:
self.override_scaling_pass_value = 1 / self.n_classes
else:
self.override_scaling_pass_value = value
self.config.scaling_pass_value = self.override_scaling_pass_value
# File: peft-main/src/peft/tuners/xlora/config.py
from __future__ import annotations
import warnings
from dataclasses import dataclass
from typing import Optional
from peft.config import PeftConfig
from peft.utils.peft_types import PeftType
@dataclass
class XLoraConfig(PeftConfig):
hidden_size: int = None
adapters: dict[str, str] = None
enable_softmax: bool = True
enable_softmax_topk: bool = False
layerwise_scalings: bool = False
xlora_depth: int = 1
xlora_size: int = 2048
xlora_dropout_p: float = 0.2
use_trainable_adapters: bool = False
softmax_temperature: float = 1.0
top_k_lora: Optional[int] = None
scaling_pass_value: float = 0.0
global_scaling_weight: float = 1.0
def __post_init__(self):
self.peft_type = PeftType.XLORA
if self.hidden_size is None:
warnings.warn('No value was provided for `hidden_size`. This will be set to 4096 by default, please ensure that this is correct.')
self.hidden_size = 4096
if self.adapters is None:
warnings.warn('No value was provided for for `adapters`. This will be set to empty, please ensure that this is correct.')
self.adapters = {}
if self.enable_softmax_topk and self.top_k_lora is None:
warnings.warn('`enable_softmax_topk` enabled `top_k_lora` is not set')
if self.enable_softmax_topk and self.enable_softmax:
warnings.warn('`enable_softmax_topk` and `enable_softmax` are both enabled. This will result in worse performance.')
if self.top_k_lora is not None and self.top_k_lora < 1:
warnings.warn('`top_k_lora` value must be at least 1.')
# File: peft-main/src/peft/tuners/xlora/layer.py
from __future__ import annotations
from typing import Any, Callable, Optional
import torch
import torch.nn as nn
from torch import Tensor
from peft.tuners import lora
from .config import XLoraConfig
class XLoraLayer:
def __init__(self, model: nn.Module, target: lora.LoraLayer, target_forward: Callable[..., Any], layer_number: int, config: XLoraConfig) -> None:
self.model = model
self.target_forward = target_forward
self.target = target
self.layer_number = layer_number
self.config = config
''
@staticmethod
def apply_scalings_to_x(x: torch.Tensor, scalings_layer: torch.Tensor, adapter: int) -> torch.Tensor:
scalings = scalings_layer[:, :, adapter].unsqueeze(-1)
return x * scalings
''
def get_maybe_topk_scalings(self, scalings) -> torch.Tensor:
xlora_scalings: Tensor = scalings[:, :, self.layer_number, :]
if self.config.top_k_lora is not None:
(_, topk_indices) = torch.topk(xlora_scalings, k=self.config.top_k_lora, dim=-1)
mask = torch.zeros_like(xlora_scalings, dtype=torch.bool)
mask.scatter_(-1, topk_indices, True)
xlora_scalings = xlora_scalings * mask.to(xlora_scalings.dtype)
if self.config.enable_softmax_topk:
nonzero_mask = xlora_scalings != 0
softmax_res_nonzero = torch.softmax(xlora_scalings[nonzero_mask], dim=-1)
xlora_scalings[nonzero_mask] = softmax_res_nonzero
return xlora_scalings
class XLoraLinearLayer(XLoraLayer):
def __init__(self, model: nn.Module, target: lora.Linear, target_forward: Callable[..., Any], layer_number: int, config: XLoraConfig) -> None:
super().__init__(model, target, target_forward, layer_number, config)
def forward(self, x: Tensor, *args: Any, scalings: Optional[Tensor]=None, **kwargs: Any) -> Tensor:
previous_dtype = x.dtype
if scalings is not None:
xlora_scalings = self.get_maybe_topk_scalings(scalings)
result = self.target.base_layer(x, *args, **kwargs)
if not self.target.merged:
for (adapter_n, active_adapter) in enumerate(self.target.active_adapters):
if self.target.use_dora[active_adapter]:
raise ValueError('X-LoRA currently does not support LoRA layers with DoRA')
if active_adapter not in self.target.lora_A.keys():
continue
lora_A = self.target.lora_A[active_adapter]
lora_B = self.target.lora_B[active_adapter]
dropout = self.target.lora_dropout[active_adapter]
scaling = self.target.scaling[active_adapter]
x = x.to(lora_A.weight.dtype)
if scalings is not None:
x_mod = self.apply_scalings_to_x(x, xlora_scalings, adapter_n)
scaling_weight = self.config.global_scaling_weight
else:
x_mod = x
scaling_weight = 1
result += lora_B(lora_A(dropout(x_mod))) * scaling * scaling_weight
result = result.to(previous_dtype)
return result
class XLoraEmbeddingLayer(XLoraLayer):
def __init__(self, model: nn.Module, target: lora.Embedding, target_forward: Callable[..., Any], layer_number: int, config: XLoraConfig) -> None:
super().__init__(model, target, target_forward, layer_number, config)
def forward(self, x: Tensor, *args: Any, scalings: Optional[Tensor]=None, **kwargs: Any) -> Tensor:
if scalings is not None:
xlora_scalings = self.get_maybe_topk_scalings(scalings)
result = self.target.base_layer(x, *args, **kwargs)
if not self.target.merged:
for (adapter_n, active_adapter) in enumerate(self.target.active_adapters):
if self.target.use_dora.get(active_adapter, False):
raise ValueError('X-LoRA currently does not support LoRA layers with DoRA')
if active_adapter not in self.target.lora_embedding_A:
continue
embedding_A = self.target.lora_embedding_A[active_adapter].T
embedding_B = self.target.lora_embedding_B[active_adapter].T
scaling = self.target.scaling[active_adapter]
after_A = self.target._embed(x, embedding_A)
if scalings is not None:
after_A_mod = self.apply_scalings_to_x(after_A, xlora_scalings, adapter_n)
scaling_weight = self.config.global_scaling_weight
else:
after_A_mod = after_A
scaling_weight = 1
result += after_A_mod @ embedding_B * scaling * scaling_weight
return result
class XLoraConv2dLayer(XLoraLayer):
def __init__(self, model: nn.Module, target: lora.Conv2d, target_forward: Callable[..., Any], layer_number: int, config: XLoraConfig) -> None:
super().__init__(model, target, target_forward, layer_number, config)
def forward(self, x: Tensor, *args: Any, scalings: Optional[Tensor]=None, **kwargs: Any) -> Tensor:
previous_dtype = x.dtype
if scalings is not None:
xlora_scalings = self.get_maybe_topk_scalings(scalings)
result = self.target.base_layer(x, *args, **kwargs)
if not self.target.merged:
for (adapter_n, active_adapter) in enumerate(self.target.active_adapters):
if self.target.use_dora[active_adapter]:
raise ValueError('X-LoRA currently does not support LoRA layers with DoRA')
if active_adapter not in self.target.lora_A.keys():
continue
lora_A = self.target.lora_A[active_adapter]
lora_B = self.target.lora_B[active_adapter]
dropout = self.target.lora_dropout[active_adapter]
scaling = self.target.scaling[active_adapter]
x = x.to(lora_A.weight.dtype)
if scalings is not None:
x_mod = self.apply_scalings_to_x(x, xlora_scalings, adapter_n)
scaling_weight = self.config.global_scaling_weight
else:
x_mod = x
scaling_weight = 1
result += lora_B(lora_A(dropout(x_mod))) * scaling * scaling_weight
result = result.to(previous_dtype)
return result
# File: peft-main/src/peft/tuners/xlora/model.py
from __future__ import annotations
import copy
from contextlib import contextmanager
from functools import partial
from typing import Optional, Union
import torch
import torch.nn as nn
from peft.tuners.lora.layer import LoraLayer
from peft.tuners.lora.model import LoraModel
from peft.tuners.tuners_utils import BaseTuner
from peft.utils.constants import DUMMY_TARGET_MODULES
from peft.utils.save_and_load import set_peft_model_state_dict
from .. import lora
from .classifier import XLoraClassifier
from .config import XLoraConfig
from .layer import XLoraConv2dLayer, XLoraEmbeddingLayer, XLoraLinearLayer
def convert_layers_to_xlora(base: nn.Module, xloramodel: nn.Module, config: XLoraConfig) -> tuple[int, torch.device | None]:
total_swapped = 0
all_layers = []
device = None
for module in base.modules():
if isinstance(module, lora.Linear):
device = module.lora_A[next(iter(module.lora_A))].weight.device
new_layer = XLoraLinearLayer(model=xloramodel, target=module, target_forward=module.forward, layer_number=total_swapped, config=config)
all_layers.append(new_layer)
module.forward = new_layer.forward
total_swapped += 1
elif isinstance(module, lora.Embedding):
device = module.lora_embedding_A[next(iter(module.lora_embedding_A))].device
new_layer = XLoraEmbeddingLayer(model=xloramodel, target=module, target_forward=module.forward, layer_number=total_swapped, config=config)
all_layers.append(new_layer)
module.forward = new_layer.forward
total_swapped += 1
elif isinstance(module, lora.Conv2d):
device = module.lora_A[next(iter(module.lora_A))].weight.device
new_layer = XLoraConv2dLayer(model=xloramodel, target=module, target_forward=module.forward, layer_number=total_swapped, config=config)
all_layers.append(new_layer)
module.forward = new_layer.forward
total_swapped += 1
return (total_swapped, device)
def _load_adapter_into_lora_model(lora_model: LoraModel, adapter_name: str, model_id: str, torch_device: Optional[str]=None, ephemeral_gpu_offload: bool=False, autocast_adapter_dtype: bool=True, subfolder: Optional[str]=None, **kwargs):
from peft.peft_model import PeftModel
from peft.tuners.lora.config import LoraConfig
from peft.utils.other import infer_device
from peft.utils.save_and_load import load_peft_weights
(hf_hub_download_kwargs, kwargs) = PeftModel._split_kwargs(kwargs)
if torch_device is None:
torch_device = infer_device()
if adapter_name not in lora_model.peft_config:
lora_peft_config = LoraConfig.from_pretrained(model_id, ephemeral_gpu_offload=ephemeral_gpu_offload, subfolder=subfolder, **hf_hub_download_kwargs)
lora_peft_config.inference_mode = False
lora_model.peft_config[adapter_name] = lora_peft_config
lora_model.inject_adapter(lora_model.model, adapter_name)
adapter_weights = load_peft_weights(model_id, device=torch_device, subfolder=subfolder, **hf_hub_download_kwargs)
new_adapter_weights = {}
for old_key in adapter_weights.keys():
key: str = old_key
while not (key.startswith('model.') and (not key.startswith('model.model.'))):
key = key[key.find('.') + 1:]
key = 'model.' + key
new_adapter_weights[key] = adapter_weights[old_key]
ignore_mismatched_sizes = kwargs.get('ignore_mismatched_sizes', False)
load_result = set_peft_model_state_dict(lora_model, new_adapter_weights, adapter_name=adapter_name, ignore_mismatched_sizes=ignore_mismatched_sizes)
if len(load_result.unexpected_keys) > 0:
raise ValueError(f'Got unexpected keys! Please raise an issue and tag @EricLBuehler.\n\nunexpected_keys={load_result.unexpected_keys}')
if hasattr(lora_model, '_cast_adapter_dtype'):
lora_model._cast_adapter_dtype(adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype)
class XLoraModel(BaseTuner):
def __init__(self, model: nn.Module, config: Union[dict[str, XLoraConfig], XLoraConfig], adapter_name: str, torch_device: Optional[str]=None, ephemeral_gpu_offload: bool=False, autocast_adapter_dtype: bool=True, **kwargs) -> None:
nn.Module.__init__(self)
if isinstance(config, dict):
conf = config[adapter_name]
else:
conf = config
base_lora_config = copy.copy(conf)
base_lora_config.target_modules = DUMMY_TARGET_MODULES
base_lora_config.layer_replication = None
base_lora_config.bias = 'none'
lora_model = LoraModel(model, base_lora_config, adapter_name)
self.xlora_config = conf
self.lora_model = lora_model
peft_config = conf
if hasattr(model.config, 'use_cache') and model.config.use_cache:
raise ValueError('`use_cache` must be False')
adapters_items = peft_config.adapters.items()
if hasattr(self.xlora_config, '_subfolders'):
adapters_items = zip(peft_config.adapters.items(), self.xlora_config._subfolders)
else:
adapters_items = peft_config.adapters.items()
if hasattr(self.xlora_config, '_subfolders'):
for (i, (_adapter_name, model_id), subfolder) in enumerate(adapters_items):
_load_adapter_into_lora_model(lora_model=self.lora_model, adapter_name=str(i), model_id=model_id, torch_device=torch_device, ephemeral_gpu_offload=ephemeral_gpu_offload, autocast_adapter_dtype=autocast_adapter_dtype, subfolder=subfolder, **kwargs)
else:
for (i, (_adapter_name, model_id)) in enumerate(adapters_items):
_load_adapter_into_lora_model(lora_model=self.lora_model, adapter_name=str(i), model_id=model_id, torch_device=torch_device, ephemeral_gpu_offload=ephemeral_gpu_offload, autocast_adapter_dtype=autocast_adapter_dtype, subfolder=None, **kwargs)
self.lora_model.set_adapter(list(peft_config.adapters.keys()))
self._maybe_freeze_all_adapters()
(total_swapped, device) = convert_layers_to_xlora(model, self, peft_config)
n_classes = len(peft_config.adapters)
xlora_classifier = XLoraClassifier(model, peft_config, n_classes, total_swapped, device)
self.internal_xlora_classifier = xlora_classifier
self.internal_xlora_scalings = None
self.disabled = False
def _maybe_freeze_all_adapters(self):
self.eval()
if not self.xlora_config.use_trainable_adapters:
for (name, param) in self.named_parameters():
if 'lora_' in name:
param.requires_grad = False
def generate(self, *args, **kwargs):
res = self.lora_model.generate(*args, **kwargs)
self._maybe_freeze_all_adapters()
return res
@contextmanager
def _enable_peft_forward_hooks(self, *generate_args, **generate_kwargs):
def scalings_injection_hook(target, args, kwargs, scalings):
kwargs['scalings'] = scalings
return (args, kwargs)
handles_to_remove = None
def pre_forward(module, *args, **kwargs):
nonlocal handles_to_remove
args_real = args[0]
kwargs_real = args[1]
kwargs_real.update(kwargs)
dummy_scalings = self.internal_xlora_classifier.make_dummy_scalings(*args_real, **kwargs_real)
hook_handles = []
for module in self.modules():
if isinstance(module, LoraLayer):
pre_forward = partial(scalings_injection_hook, scalings=dummy_scalings)
handle = module.register_forward_pre_hook(pre_forward, with_kwargs=True)
hook_handles.append(handle)
with torch.no_grad():
self.lora_model.disable_adapter_layers()
try:
scaling_pass_kwargs = kwargs_real.copy()
scaling_pass_kwargs['output_hidden_states'] = True
scaling_pass_kwargs['return_dict'] = True
try:
base_output = self.lora_model.model.forward(*args_real, **scaling_pass_kwargs)
finally:
for handle in hook_handles:
handle.remove()
finally:
self.lora_model.enable_adapter_layers()
xlora_scalings = self.internal_xlora_classifier(*args_real, result=base_output, **kwargs_real)
hook_handles = []
for module in self.modules():
if isinstance(module, LoraLayer):
pre_forward = partial(scalings_injection_hook, scalings=xlora_scalings)
handle = module.register_forward_pre_hook(pre_forward, with_kwargs=True)
hook_handles.append(handle)
handles_to_remove = hook_handles
if not self.disabled:
forward_handle = self.lora_model.model.register_forward_pre_hook(pre_forward, with_kwargs=True)
yield
if not self.disabled:
for handle in handles_to_remove:
handle.remove()
forward_handle.remove()
def __getattr__(self, name: str):
try:
return super().__getattr__(name)
except AttributeError:
if name == 'lora_model':
raise
return getattr(self.lora_model, name)
@staticmethod
def _prepare_adapter_config(peft_config, _model_config):
return peft_config
''
def _mark_only_adapters_as_trainable(self) -> None:
...
''
def enable_adapter_layers(self) -> None:
self.disabled = False
''
def disable_adapter_layers(self) -> None:
self.disabled = True
def _create_and_replace(self, lora_config, adapter_name, target, target_name, parent, current_key):
pass
@staticmethod
def _check_target_module_exists(lora_config, key):
return False
def forward(self, *args, **kwargs):
return self.lora_model.model(*args, **kwargs)
def set_topk_lora(self, value: Optional[int]):
classifier: XLoraClassifier = self.internal_xlora_classifier
classifier.config.top_k_lora = value
def set_global_scaling_weight(self, weight: float):
classifier: XLoraClassifier = self.internal_xlora_classifier
classifier.config.global_scaling_weight = weight
def set_scaling_pass_value(self, value: float | None):
classifier: XLoraClassifier = self.internal_xlora_classifier
classifier._set_override_scaling_pass_value(value)
def get_global_scaling_weight(self) -> float:
classifier: XLoraClassifier = self.internal_xlora_classifier
return classifier.config.global_scaling_weight
def get_latest_scalings(self) -> Optional[torch.Tensor]:
return self.internal_xlora_scalings
def get_scalings_log(self) -> list[torch.Tensor]:
classifier: XLoraClassifier = self.internal_xlora_classifier
return classifier.log_scalings.copy()
def enable_scalings_logging(self):
classifier: XLoraClassifier = self.internal_xlora_classifier
classifier.scalings_logging = True
def disable_scalings_logging(self):
classifier: XLoraClassifier = self.internal_xlora_classifier
classifier.scalings_logging = False
def clear_scalings_log(self):
classifier: XLoraClassifier = self.internal_xlora_classifier
classifier.log_scalings.clear()
def get_bucketed_scalings_log(self) -> dict[int, tuple[list[int], list[torch.Tensor]]]:
classifier: XLoraClassifier = self.internal_xlora_classifier
return classifier._get_bucketed_scalings()