# File: trl-main/benchmark/benchmark.py import argparse import math import os import shlex import subprocess import uuid from distutils.util import strtobool import requests def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--command', type=str, default='', help='the command to run') parser.add_argument('--num-seeds', type=int, default=3, help='the number of random seeds') parser.add_argument('--start-seed', type=int, default=1, help='the number of the starting seed') parser.add_argument('--workers', type=int, default=0, help='the number of workers to run benchmark experimenets') parser.add_argument('--auto-tag', type=lambda x: bool(strtobool(x)), default=True, nargs='?', const=True, help='if toggled, the runs will be tagged with git tags, commit, and pull request number if possible') parser.add_argument('--slurm-template-path', type=str, default=None, help='the path to the slurm template file (see docs for more details)') parser.add_argument('--slurm-gpus-per-task', type=int, default=1, help='the number of gpus per task to use for slurm jobs') parser.add_argument('--slurm-total-cpus', type=int, default=50, help='the number of gpus per task to use for slurm jobs') parser.add_argument('--slurm-ntasks', type=int, default=1, help='the number of tasks to use for slurm jobs') parser.add_argument('--slurm-nodes', type=int, default=None, help='the number of nodes to use for slurm jobs') args = parser.parse_args() return args def run_experiment(command: str): command_list = shlex.split(command) print(f'running {command}') fd = subprocess.Popen(command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output, errors) = fd.communicate() return_code = fd.returncode assert return_code == 0, f"Command failed with error: {errors.decode('utf-8')}" return output.decode('utf-8').strip() def autotag() -> str: wandb_tag = '' print('autotag feature is enabled') git_tag = '' try: git_tag = subprocess.check_output(['git', 'describe', '--tags']).decode('ascii').strip() print(f'identified git tag: {git_tag}') except subprocess.CalledProcessError as e: print(e) if len(git_tag) == 0: try: count = int(subprocess.check_output(['git', 'rev-list', '--count', 'HEAD']).decode('ascii').strip()) hash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('ascii').strip() git_tag = f'no-tag-{count}-g{hash}' print(f'identified git tag: {git_tag}') except subprocess.CalledProcessError as e: print(e) wandb_tag = f'{git_tag}' git_commit = subprocess.check_output(['git', 'rev-parse', '--verify', 'HEAD']).decode('ascii').strip() try: prs = requests.get(f'https://api.github.com/search/issues?q=repo:huggingface/trl+is:pr+{git_commit}') if prs.status_code == 200: prs = prs.json() if len(prs['items']) > 0: pr = prs['items'][0] pr_number = pr['number'] wandb_tag += f',pr-{pr_number}' print(f'identified github pull request: {pr_number}') except Exception as e: print(e) return wandb_tag if __name__ == '__main__': args = parse_args() if args.auto_tag: existing_wandb_tag = os.environ.get('WANDB_TAGS', '') wandb_tag = autotag() if len(wandb_tag) > 0: if len(existing_wandb_tag) > 0: os.environ['WANDB_TAGS'] = ','.join([existing_wandb_tag, wandb_tag]) else: os.environ['WANDB_TAGS'] = wandb_tag print('WANDB_TAGS: ', os.environ.get('WANDB_TAGS', '')) commands = [] for seed in range(0, args.num_seeds): commands += [' '.join([args.command, '--seed', str(args.start_seed + seed)])] print('======= commands to run:') for command in commands: print(command) if args.workers > 0 and args.slurm_template_path is None: from concurrent.futures import ThreadPoolExecutor executor = ThreadPoolExecutor(max_workers=args.workers, thread_name_prefix='cleanrl-benchmark-worker-') for command in commands: executor.submit(run_experiment, command) executor.shutdown(wait=True) else: print('not running the experiments because --workers is set to 0; just printing the commands to run') if args.slurm_template_path is not None: if not os.path.exists('slurm'): os.makedirs('slurm') if not os.path.exists('slurm/logs'): os.makedirs('slurm/logs') print('======= slurm commands to run:') with open(args.slurm_template_path) as f: slurm_template = f.read() slurm_template = slurm_template.replace('{{array}}', f'0-{len(commands) - 1}%{args.workers}') slurm_template = slurm_template.replace('{{seeds}}', f"({' '.join([str(args.start_seed + int(seed)) for seed in range(args.num_seeds)])})") slurm_template = slurm_template.replace('{{len_seeds}}', f'{args.num_seeds}') slurm_template = slurm_template.replace('{{command}}', args.command) slurm_template = slurm_template.replace('{{gpus_per_task}}', f'{args.slurm_gpus_per_task}') total_gpus = args.slurm_gpus_per_task * args.slurm_ntasks slurm_cpus_per_gpu = math.ceil(args.slurm_total_cpus / total_gpus) slurm_template = slurm_template.replace('{{cpus_per_gpu}}', f'{slurm_cpus_per_gpu}') slurm_template = slurm_template.replace('{{ntasks}}', f'{args.slurm_ntasks}') if args.slurm_nodes is not None: slurm_template = slurm_template.replace('{{nodes}}', f'#SBATCH --nodes={args.slurm_nodes}') else: slurm_template = slurm_template.replace('{{nodes}}', '') filename = str(uuid.uuid4()) open(os.path.join('slurm', f'{filename}.slurm'), 'w').write(slurm_template) slurm_path = os.path.join('slurm', f'{filename}.slurm') print(f'saving command in {slurm_path}') if args.workers > 0: job_id = run_experiment(f'sbatch --parsable {slurm_path}') print(f'Job ID: {job_id}') # File: trl-main/benchmark/post_github_comment.py import json import os from ghapi.all import GhApi FOLDER_STRING = os.environ.get('FOLDER_STRING', '') folder = f'benchmark/trl/{FOLDER_STRING}' host_url = f'https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/benchmark/{FOLDER_STRING}' github_context = json.loads(os.environ['GITHUB_CONTEXT']) token = os.environ['PERSONAL_ACCESS_TOKEN_GITHUB'] status_message = '**[COSTA BENCHMARK BOT]**: Here are the results' body = status_message repo = github_context['repository'] (owner, repo) = repo.split('/') api = GhApi(owner=owner, repo=repo, token=token) for file in os.listdir(folder): if file.endswith('.png'): body += f'\n![{file}]({host_url}/{file})' api.issues.create_comment(issue_number=github_context['event']['issue']['number'], body=body) # File: trl-main/benchmark/upload_benchmark.py from dataclasses import dataclass import tyro from huggingface_hub import HfApi @dataclass class Args: folder_path: str = 'benchmark/trl' path_in_repo: str = 'images/benchmark' repo_id: str = 'trl-internal-testing/example-images' repo_type: str = 'dataset' args = tyro.cli(Args) api = HfApi() api.upload_folder(folder_path=args.folder_path, path_in_repo=args.path_in_repo, repo_id=args.repo_id, repo_type=args.repo_type) # File: trl-main/trl/__init__.py __version__ = '0.11.0.dev0' from typing import TYPE_CHECKING from .import_utils import _LazyModule, is_diffusers_available, OptionalDependencyNotAvailable _import_structure = {'core': ['set_seed'], 'environment': ['TextEnvironment', 'TextHistory'], 'extras': ['BestOfNSampler'], 'import_utils': ['is_bitsandbytes_available', 'is_diffusers_available', 'is_npu_available', 'is_peft_available', 'is_pil_available', 'is_wandb_available', 'is_xpu_available', 'is_llmblender_available', 'is_openai_available', 'is_liger_available'], 'models': ['AutoModelForCausalLMWithValueHead', 'AutoModelForSeq2SeqLMWithValueHead', 'PreTrainedModelWrapper', 'create_reference_model', 'setup_chat_format', 'SUPPORTED_ARCHITECTURES'], 'trainer': ['DataCollatorForCompletionOnlyLM', 'DPOConfig', 'DPOTrainer', 'CPOConfig', 'CPOTrainer', 'AlignPropConfig', 'AlignPropTrainer', 'IterativeSFTTrainer', 'KTOConfig', 'KTOTrainer', 'BCOConfig', 'BCOTrainer', 'ModelConfig', 'OnlineDPOConfig', 'OnlineDPOTrainer', 'XPOConfig', 'XPOTrainer', 'ORPOConfig', 'ORPOTrainer', 'PPOConfig', 'PPOTrainer', 'PPOv2Config', 'PPOv2Trainer', 'RewardConfig', 'RewardTrainer', 'RLOOConfig', 'RLOOTrainer', 'SFTConfig', 'SFTTrainer', 'FDivergenceConstants', 'FDivergenceType', 'GKDTrainer', 'GKDConfig', 'WinRateCallback', 'BaseJudge', 'BaseRankJudge', 'BasePairwiseJudge', 'RandomRankJudge', 'RandomPairwiseJudge', 'PairRMJudge', 'HfPairwiseJudge', 'OpenAIPairwiseJudge', 'LogCompletionsCallback'], 'commands': [], 'commands.cli_utils': ['init_zero_verbose', 'SFTScriptArguments', 'DPOScriptArguments', 'TrlParser'], 'trainer.callbacks': ['RichProgressCallback', 'SyncRefModelCallback'], 'trainer.utils': ['get_kbit_device_map', 'get_peft_config', 'get_quantization_config'], 'multitask_prompt_tuning': ['MultitaskPromptEmbedding', 'MultitaskPromptTuningConfig', 'MultitaskPromptTuningInit'], 'data_utils': ['apply_chat_template', 'extract_prompt', 'is_conversational', 'maybe_apply_chat_template', 'maybe_extract_prompt', 'maybe_unpair_preference_dataset', 'unpair_preference_dataset']} try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['models'].extend(['DDPOPipelineOutput', 'DDPOSchedulerOutput', 'DDPOStableDiffusionPipeline', 'DefaultDDPOStableDiffusionPipeline']) _import_structure['trainer'].extend(['DDPOConfig', 'DDPOTrainer']) if TYPE_CHECKING: from .core import set_seed from .environment import TextEnvironment, TextHistory from .extras import BestOfNSampler from .import_utils import is_bitsandbytes_available, is_diffusers_available, is_npu_available, is_peft_available, is_pil_available, is_wandb_available, is_xpu_available, is_llmblender_available, is_openai_available, is_liger_available from .models import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, PreTrainedModelWrapper, create_reference_model, setup_chat_format, SUPPORTED_ARCHITECTURES from .trainer import DataCollatorForCompletionOnlyLM, DPOConfig, DPOTrainer, CPOConfig, CPOTrainer, AlignPropConfig, AlignPropTrainer, IterativeSFTTrainer, KTOConfig, KTOTrainer, BCOConfig, BCOTrainer, ModelConfig, OnlineDPOConfig, OnlineDPOTrainer, XPOConfig, XPOTrainer, ORPOConfig, ORPOTrainer, PPOConfig, PPOTrainer, PPOv2Config, PPOv2Trainer, RewardConfig, RewardTrainer, RLOOConfig, RLOOTrainer, SFTConfig, SFTTrainer, FDivergenceConstants, FDivergenceType, GKDTrainer, GKDConfig, WinRateCallback, BaseJudge, BaseRankJudge, BasePairwiseJudge, RandomRankJudge, RandomPairwiseJudge, PairRMJudge, HfPairwiseJudge, OpenAIPairwiseJudge, LogCompletionsCallback from .trainer.callbacks import RichProgressCallback, SyncRefModelCallback from .trainer.utils import get_kbit_device_map, get_peft_config, get_quantization_config from .commands.cli_utils import init_zero_verbose, SFTScriptArguments, DPOScriptArguments, TrlParser from .data_utils import apply_chat_template, extract_prompt, is_conversational, maybe_apply_chat_template, maybe_extract_prompt, maybe_unpair_preference_dataset, unpair_preference_dataset try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .models import DDPOPipelineOutput, DDPOSchedulerOutput, DDPOStableDiffusionPipeline, DefaultDDPOStableDiffusionPipeline from .trainer import DDPOConfig, DDPOTrainer else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__, extra_objects={'__version__': __version__}) # File: trl-main/trl/commands/__init__.py from typing import TYPE_CHECKING from ..import_utils import _LazyModule, OptionalDependencyNotAvailable _import_structure = {'cli_utils': ['SFTScriptArguments', 'init_zero_verbose', 'DPOScriptArguments', 'TrlParser', 'YamlConfigParser']} if TYPE_CHECKING: from .cli_utils import SFTScriptArguments, init_zero_verbose, DPOScriptArguments, TrlParser, YamlConfigParser else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: trl-main/trl/commands/cli.py import os import subprocess import sys from subprocess import CalledProcessError from rich.console import Console SUPPORTED_COMMANDS = ['sft', 'dpo', 'chat', 'kto'] def main(): console = Console() with console.status('[bold purple]Welcome! Initializing the TRL CLI...'): from trl.commands.cli_utils import init_zero_verbose init_zero_verbose() command_name = sys.argv[1] if command_name not in SUPPORTED_COMMANDS: raise ValueError(f'Please use one of the supported commands, got {command_name} - supported commands are {SUPPORTED_COMMANDS}') trl_examples_dir = os.path.dirname(__file__) if command_name == 'chat': command = f"\n python {trl_examples_dir}/scripts/{command_name}.py {' '.join(sys.argv[2:])}\n " else: command = f"\n accelerate launch {trl_examples_dir}/scripts/{command_name}.py {' '.join(sys.argv[2:])}\n " try: subprocess.run(command.split(), text=True, check=True, encoding='utf-8', cwd=os.getcwd(), env=os.environ.copy()) except (CalledProcessError, ChildProcessError) as exc: console.log(f'TRL - {command_name.upper()} failed on ! See the logs above for further details.') raise ValueError('TRL CLI failed! Check the traceback above..') from exc if __name__ == '__main__': main() # File: trl-main/trl/commands/cli_utils.py import logging import os import sys from argparse import Namespace from dataclasses import dataclass, field import yaml from transformers import HfArgumentParser logger = logging.getLogger(__name__) class YamlConfigParser: def parse_and_set_env(self, config_path): with open(config_path) as yaml_file: config = yaml.safe_load(yaml_file) if 'env' in config: env_vars = config.pop('env') if isinstance(env_vars, dict): for (key, value) in env_vars.items(): os.environ[key] = str(value) else: raise ValueError('`env` field should be a dict in the YAML file.') return config def to_string(self, config): final_string = '' for (key, value) in config.items(): if isinstance(value, (dict, list)): if len(value) != 0: value = str(value) value = value.replace("'", '"') value = f"'{value}'" else: continue final_string += f'--{key} {value} ' return final_string def init_zero_verbose(): import logging import warnings from rich.logging import RichHandler FORMAT = '%(message)s' logging.basicConfig(format=FORMAT, datefmt='[%X]', handlers=[RichHandler()], level=logging.ERROR) def warning_handler(message, category, filename, lineno, file=None, line=None): logging.warning(f'{filename}:{lineno}: {category.__name__}: {message}') warnings.showwarning = warning_handler @dataclass class SFTScriptArguments: dataset_name: str = field(default='timdettmers/openassistant-guanaco', metadata={'help': 'the dataset name'}) dataset_train_split: str = field(default='train', metadata={'help': 'The dataset split to train on'}) dataset_test_split: str = field(default='test', metadata={'help': 'The dataset split to evaluate on'}) config: str = field(default=None, metadata={'help': 'Path to the optional config file'}) gradient_checkpointing_use_reentrant: bool = field(default=False, metadata={'help': 'Whether to apply `use_reentrant` for gradient_checkpointing'}) @dataclass class RewardScriptArguments: dataset_name: str = field(default='trl-lib/ultrafeedback_binarized', metadata={'help': 'the dataset name'}) dataset_train_split: str = field(default='train', metadata={'help': 'The dataset split to train on'}) dataset_test_split: str = field(default='test', metadata={'help': 'The dataset split to evaluate on'}) config: str = field(default=None, metadata={'help': 'Path to the optional config file'}) gradient_checkpointing_use_reentrant: bool = field(default=False, metadata={'help': 'Whether to apply `use_reentrant` for gradient_checkpointing'}) @dataclass class DPOScriptArguments: dataset_name: str = field(default=None, metadata={'help': 'the dataset name'}) dataset_train_split: str = field(default='train', metadata={'help': 'The dataset split to use for training'}) dataset_test_split: str = field(default='test', metadata={'help': 'The dataset split to use for evaluation'}) ignore_bias_buffers: bool = field(default=False, metadata={'help': 'debug argument for distributed training;fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. Seehttps://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992'}) config: str = field(default=None, metadata={'help': 'Path to the optional config file'}) gradient_checkpointing_use_reentrant: bool = field(default=False, metadata={'help': 'Whether to apply `use_reentrant` for gradient_checkpointing'}) @dataclass class ChatArguments: model_name_or_path: str = field(metadata={'help': 'Name of the pre-trained model'}) user: str = field(default=None, metadata={'help': 'Username to display in chat interface'}) system_prompt: str = field(default=None, metadata={'help': 'System prompt'}) save_folder: str = field(default='./chat_history/', metadata={'help': 'Folder to save chat history'}) device: str = field(default='cpu', metadata={'help': 'device to use for inference.'}) config: str = field(default='default', metadata={'help': 'Config file used for setting the configs. If `default` uses examples/scripts/config/default_chat_config.yaml'}) examples: str = field(default=None, metadata={'help': 'Empty placeholder needs to be set via config.'}) max_new_tokens: int = field(default=256, metadata={'help': 'Maximum number of tokens to generate'}) do_sample: bool = field(default=True, metadata={'help': 'Whether to sample outputs during generation'}) num_beams: int = field(default=1, metadata={'help': 'Number of beams for beam search'}) temperature: float = field(default=1.0, metadata={'help': 'Temperature parameter for generation'}) top_k: int = field(default=50, metadata={'help': 'Value of k for top-k sampling'}) top_p: float = field(default=1.0, metadata={'help': 'Value of p for nucleus sampling'}) repetition_penalty: float = field(default=1.0, metadata={'help': 'Repetition penalty'}) eos_tokens: str = field(default=None, metadata={'help': 'EOS tokens to stop the generation. If multiple they should be comma separated'}) eos_token_ids: str = field(default=None, metadata={'help': 'EOS token IDs to stop the generation. If multiple they should be comma separated'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) torch_dtype: str = field(default=None, metadata={'help': "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the dtype will be automatically derived from the model's weights.", 'choices': ['auto', 'bfloat16', 'float16', 'float32']}) trust_remote_code: bool = field(default=False, metadata={'help': 'Trust remote code when loading a model.'}) attn_implementation: str = field(default=None, metadata={'help': 'Which attention implementation to use; you can run --attn_implementation=flash_attention_2, in which case you must install this manually by running `pip install flash-attn --no-build-isolation`'}) load_in_8bit: bool = field(default=False, metadata={'help': 'use 8 bit precision for the base model - works only with LoRA'}) load_in_4bit: bool = field(default=False, metadata={'help': 'use 4 bit precision for the base model - works only with LoRA'}) bnb_4bit_quant_type: str = field(default='nf4', metadata={'help': 'precise the quantization type (fp4 or nf4)'}) use_bnb_nested_quant: bool = field(default=False, metadata={'help': 'use nested quantization'}) class TrlParser(HfArgumentParser): def __init__(self, parsers, ignore_extra_args=False): super().__init__(parsers) self.yaml_parser = YamlConfigParser() self.ignore_extra_args = ignore_extra_args def post_process_dataclasses(self, dataclasses): training_args = trl_args = None training_args_index = None for (i, dataclass_obj) in enumerate(dataclasses): if dataclass_obj.__class__.__name__ == 'TrainingArguments': training_args = dataclass_obj training_args_index = i elif dataclass_obj.__class__.__name__ in ('SFTScriptArguments', 'DPOScriptArguments'): trl_args = dataclass_obj else: ... if trl_args is not None and training_args is not None: training_args.gradient_checkpointing_kwargs = dict(use_reentrant=trl_args.gradient_checkpointing_use_reentrant) dataclasses[training_args_index] = training_args return dataclasses def parse_args_and_config(self, return_remaining_strings=False): yaml_config = None if '--config' in sys.argv: config_index = sys.argv.index('--config') _ = sys.argv.pop(config_index) config_path = sys.argv.pop(config_index) yaml_config = self.yaml_parser.parse_and_set_env(config_path) self.set_defaults_with_config(**yaml_config) outputs = self.parse_args_into_dataclasses(return_remaining_strings=return_remaining_strings) if yaml_config is None: return outputs if return_remaining_strings: remaining_strings = outputs[-1] + [f'{key}: {value}' for (key, value) in vars(outputs[-2]).items()] return (outputs[:-2], remaining_strings) else: if isinstance(outputs[-1], Namespace) and (not self.ignore_extra_args): remaining_args = vars(outputs[-1]) raise ValueError(f'Some specified config arguments are not used by the TrlParser: {remaining_args}') return outputs def set_defaults_with_config(self, **kwargs): self._defaults.update(kwargs) for action in self._actions: if action.dest in kwargs: action.default = kwargs[action.dest] action.required = False # File: trl-main/trl/core.py import gc import random import warnings from contextlib import contextmanager from typing import Dict, List, Optional, Tuple, Union import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence from transformers.generation import TopKLogitsWarper, TopPLogitsWarper from .import_utils import is_npu_available, is_xpu_available try: from collections.abc import Mapping except ImportError: from collections.abc import Mapping WANDB_PADDING = -1 def top_k_top_p_filtering(logits: torch.FloatTensor, top_k: int=0, top_p: float=1.0, filter_value: float=-float('Inf'), min_tokens_to_keep: int=1) -> torch.FloatTensor: if top_k > 0: logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)(None, logits) if 0 <= top_p <= 1.0: logits = TopPLogitsWarper(top_p=top_p, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)(None, logits) return logits def flatten_dict(nested: Dict, sep: str='/') -> Dict: def recurse(nest: Dict, prefix: str, into: Dict) -> None: for (k, v) in nest.items(): if sep in k: raise ValueError(f"separator '{sep}' not allowed to be in key '{k}'") if isinstance(v, Mapping): recurse(v, prefix + k + sep, into) else: into[prefix + k] = v flat = {} recurse(nested, '', flat) return flat def convert_to_scalar(stats: Dict) -> Dict: tensorboard_stats = {} for (k, v) in stats.items(): if (isinstance(v, torch.Tensor) or isinstance(v, np.ndarray)) and (len(v.shape) == 0 or (len(v.shape) == 1 and v.shape[0] == 1)): v = v.item() tensorboard_stats[k] = v return tensorboard_stats def stack_dicts(stats_dicts: List[Dict]) -> Dict: results = dict() for k in stats_dicts[0]: stats_list = [torch.flatten(d[k]) for d in stats_dicts] results[k] = pad_sequence(stats_list, batch_first=True, padding_value=WANDB_PADDING) return results def logprobs_from_logits(logits: torch.Tensor, labels: torch.Tensor, gather: bool=True) -> torch.Tensor: logp = F.log_softmax(logits, dim=2) if not gather: return logp logpy = torch.gather(logp, 2, labels.unsqueeze(2)).squeeze(-1) return logpy def whiten(values: torch.Tensor, shift_mean: bool=True) -> torch.Tensor: (mean, var) = (torch.mean(values), torch.var(values)) whitened = (values - mean) * torch.rsqrt(var + 1e-08) if not shift_mean: whitened += mean return whitened def masked_mean(values: torch.Tensor, mask: torch.Tensor, axis: Optional[bool]=None) -> torch.Tensor: if axis is not None: return (values * mask).sum(axis=axis) / mask.sum(axis=axis) else: return (values * mask).sum() / mask.sum() def masked_var(values: torch.Tensor, mask: torch.Tensor, unbiased: bool=True) -> torch.Tensor: mean = masked_mean(values, mask) centered_values = values - mean variance = masked_mean(centered_values ** 2, mask) if unbiased: mask_sum = mask.sum() if mask_sum == 0: raise ValueError('The sum of the mask is zero, which can happen when `mini_batch_size=1`;try increase the `mini_batch_size` or `gradient_accumulation_steps`') bessel_correction = mask_sum / (mask_sum - 1) variance = variance * bessel_correction return variance def masked_whiten(values: torch.Tensor, mask: torch.Tensor, shift_mean: bool=True) -> torch.Tensor: (mean, var) = (masked_mean(values, mask), masked_var(values, mask)) whitened = (values - mean) * torch.rsqrt(var + 1e-08) if not shift_mean: whitened += mean return whitened def clip_by_value(x: torch.Tensor, tensor_min: float, tensor_max: float) -> torch.Tensor: clipped = torch.max(torch.min(x, tensor_max), tensor_min) return clipped def entropy_from_logits(logits: torch.Tensor) -> torch.Tensor: pd = torch.nn.functional.softmax(logits, dim=-1) entropy = torch.logsumexp(logits, axis=-1) - torch.sum(pd * logits, axis=-1) return entropy def stats_to_np(stats_dict: Dict) -> Dict: new_dict = dict() for (k, v) in stats_dict.items(): if isinstance(v, torch.Tensor): new_dict[k] = v.detach().cpu() if new_dict[k].dtype == torch.bfloat16: new_dict[k] = new_dict[k].float() new_dict[k] = new_dict[k].numpy() else: new_dict[k] = v if np.isscalar(new_dict[k]): new_dict[k] = float(new_dict[k]) return new_dict def respond_to_batch(model: nn.Module, queries: List[torch.LongTensor], txt_len: int=20, top_k: int=0, top_p: float=1.0) -> torch.LongTensor: input_ids = queries for _i in range(txt_len): outputs = model(input_ids) next_token_logits = outputs[0][:, -1, :] next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) probs = F.softmax(next_token_logits, dim=-1) next_token = torch.multinomial(probs, num_samples=1).squeeze(1) input_ids = torch.cat([input_ids, next_token.unsqueeze(-1)], dim=-1) return input_ids[:, -txt_len:] def set_seed(seed: int) -> None: random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if is_xpu_available(): torch.xpu.manual_seed_all(seed) elif is_npu_available(): torch.npu.manual_seed_all(seed) else: torch.cuda.manual_seed_all(seed) class LengthSampler: def __init__(self, min_value: int, max_value: int): self.values = list(range(min_value, max_value)) def __call__(self) -> int: return np.random.choice(self.values) class PPODecorators: optimize_device_cache = False @classmethod @contextmanager def empty_device_cache(cls): yield if cls.optimize_device_cache: if is_xpu_available(): gc.collect() torch.xpu.empty_cache() gc.collect() elif is_npu_available(): gc.collect() torch.npu.empty_cache() gc.collect() elif torch.cuda.is_available(): gc.collect() torch.cuda.empty_cache() gc.collect() def randn_tensor(shape: Union[Tuple, List], generator: Optional[Union[List[torch.Generator], torch.Generator]]=None, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None, layout: Optional[torch.layout]=None) -> torch.Tensor: rand_device = device batch_size = shape[0] layout = layout or torch.strided device = device or torch.device('cpu') if generator is not None: gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type if gen_device_type != device.type and gen_device_type == 'cpu': rand_device = 'cpu' if device != 'mps': warnings.warn(f"The passed generator was created on 'cpu' even though a tensor on {device} was expected. Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably slighly speed up this function by passing a generator that was created on the {device} device.") elif gen_device_type != device.type and gen_device_type == 'cuda': raise ValueError(f'Cannot generate a {device} tensor from a generator of type {gen_device_type}.') if isinstance(generator, list) and len(generator) == 1: generator = generator[0] if isinstance(generator, list): shape = (1,) + shape[1:] latents = [torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout) for i in range(batch_size)] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device) return latents # File: trl-main/trl/data_utils.py from typing import Any, Dict, List, Optional, TypeVar from datasets import Dataset, DatasetDict from transformers import PreTrainedTokenizer DatasetType = TypeVar('DatasetType', Dataset, DatasetDict) def is_conversational(example: Dict[str, Any]) -> bool: supported_keys = ['prompt', 'chosen', 'rejected', 'completion', 'messages'] example_keys = {key for key in example.keys() if key in supported_keys} if example_keys: key = example_keys.pop() maybe_messages = example[key] if isinstance(maybe_messages, list): maybe_message = maybe_messages[0] if isinstance(maybe_message, dict) and 'role' in maybe_message and ('content' in maybe_message): return True return False def apply_chat_template(example: Dict[str, List[Dict[str, str]]], tokenizer: PreTrainedTokenizer) -> Dict[str, str]: supported_keys = ['prompt', 'chosen', 'rejected', 'completion', 'messages', 'label'] example_keys = {key for key in example.keys() if key in supported_keys} if example_keys not in [{'messages'}, {'prompt'}, {'prompt', 'completion'}, {'prompt', 'chosen', 'rejected'}, {'chosen', 'rejected'}, {'prompt', 'completion', 'label'}]: raise KeyError(f'Invalid keys in the example: {example_keys}') if 'messages' in example: messages = tokenizer.apply_chat_template(example['messages'], tokenize=False) if 'prompt' in example: prompt = tokenizer.apply_chat_template(example['prompt'], tokenize=False, add_generation_prompt=True) if 'prompt' in example: if 'chosen' in example: prompt_chosen = tokenizer.apply_chat_template(example['prompt'] + example['chosen'], tokenize=False) chosen = prompt_chosen[len(prompt):] if 'rejected' in example and 'prompt' in example: prompt_rejected = tokenizer.apply_chat_template(example['prompt'] + example['rejected'], tokenize=False) rejected = prompt_rejected[len(prompt):] if 'completion' in example: prompt_completion = tokenizer.apply_chat_template(example['prompt'] + example['completion'], tokenize=False) completion = prompt_completion[len(prompt):] else: if 'chosen' in example: chosen = tokenizer.apply_chat_template(example['chosen'], tokenize=False) if 'rejected' in example: rejected = tokenizer.apply_chat_template(example['rejected'], tokenize=False) if 'prompt' in example: error_message = 'The chat template applied to the prompt + completion does not start with the chat template applied to the prompt alone. This can indicate that the chat template is not supported by TRL.\n**Prompt**:\n{}\n\n**Prompt + Completion**:\n{}' if 'chosen' in example and (not prompt_chosen.startswith(prompt)): raise ValueError(error_message.format(prompt, prompt_chosen)) if 'rejected' in example and (not prompt_rejected.startswith(prompt)): raise ValueError(error_message.format(prompt, prompt_rejected)) if 'completion' in example and (not prompt_completion.startswith(prompt)): raise ValueError(error_message.format(prompt, prompt_completion)) output = {} if 'messages' in example: output['text'] = messages if 'prompt' in example: output['prompt'] = prompt if 'chosen' in example: output['chosen'] = chosen if 'rejected' in example: output['rejected'] = rejected if 'completion' in example: output['completion'] = completion if 'label' in example: output['label'] = example['label'] return output def maybe_apply_chat_template(example: Dict[str, List[Dict[str, str]]], tokenizer: PreTrainedTokenizer) -> Dict[str, str]: if is_conversational(example): return apply_chat_template(example, tokenizer) else: return example def _unpair_row(examples: List[Dict[str, List[Dict[str, str]]]]) -> List[Dict[str, List[Dict[str, str]]]]: batch_size = len(examples['chosen']) new_rows = {'completion': examples['chosen'] + examples['rejected'], 'label': [True] * batch_size + [False] * batch_size} if 'prompt' in examples: new_rows['prompt'] = examples['prompt'] + examples['prompt'] return new_rows def unpair_preference_dataset(dataset: DatasetType, num_proc: Optional[int]=None) -> DatasetType: return dataset.map(_unpair_row, batched=True, remove_columns=['chosen', 'rejected'], num_proc=num_proc) def maybe_unpair_preference_dataset(dataset: DatasetType, num_proc: Optional[int]=None) -> DatasetType: if isinstance(dataset, DatasetDict): column_names = dataset[list(dataset.keys())[0]].column_names else: column_names = dataset.column_names if 'chosen' in column_names and 'rejected' in column_names: return unpair_preference_dataset(dataset, num_proc=num_proc) else: return dataset def extract_prompt(example: Dict[str, List]) -> Dict[str, List]: for idx in range(min(len(example['chosen']), len(example['rejected']))): if example['chosen'][idx]['content'] != example['rejected'][idx]['content']: break return {'prompt': example['chosen'][:idx], 'chosen': example['chosen'][idx:], 'rejected': example['rejected'][idx:]} def maybe_extract_prompt(example: Dict[str, List]) -> Dict[str, List]: if 'prompt' in example and is_conversational({'prompt': example['prompt']}): return example else: return extract_prompt({'chosen': example['chosen'], 'rejected': example['rejected']}) # File: trl-main/trl/env_utils.py def strtobool(val: str) -> bool: val = val.lower() if val in ('y', 'yes', 't', 'true', 'on', '1'): return True if val in ('n', 'no', 'f', 'false', 'off', '0'): return False raise ValueError(f'Invalid truth value, it should be a string but {val} was provided instead.') # File: trl-main/trl/environment/__init__.py from typing import TYPE_CHECKING from ..import_utils import _LazyModule _import_structure = {'base_environment': ['TextEnvironment', 'TextHistory']} if TYPE_CHECKING: from .base_environment import TextEnvironment, TextHistory else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: trl-main/trl/environment/base_environment.py import re import warnings from typing import Optional import torch from accelerate.utils import extract_model_from_parallel from transformers import StoppingCriteria, StoppingCriteriaList from ..import_utils import is_rich_available if is_rich_available(): from rich import print from rich.text import Text class StringStoppingCriteria(StoppingCriteria): def __init__(self, stop_strings, tokenizer): self.stop_strings = stop_strings self.tokenizer = tokenizer self.first_call = True def __call__(self, input_ids, scores, **kwargs): if self.first_call: self.generated_tokens = [1 for _ in range(input_ids.shape[0])] self.start_length = input_ids.shape[-1] - 1 self.first_call = False decoded_generations = self.tokenizer.batch_decode(input_ids[:, self.start_length:]) done = [] for (i, decoded_generation) in enumerate(decoded_generations): sequence_complete = any((stop_string in decoded_generation for stop_string in self.stop_strings)) done.append(sequence_complete) if not sequence_complete: self.generated_tokens[i] += 1 if all(done): self.first_call = True return all(done) class TextHistory: def __init__(self, text, tokens, system=True): self.system_spans = [] self.text_spans = [] self.token_spans = [] self.token_masks = torch.tensor([], dtype=torch.long).to(tokens.device) self.text = '' self.tokens = torch.tensor([], dtype=torch.long).to(tokens.device) self.completed = False self.truncated = False self.reward = 0.0 self.prompt_color = 'black on grey85' self.system_color = 'black on cyan3' self.model_color = 'black on deep_sky_blue1' self.reward_color = 'black on plum1' self.append_segment(text, tokens, system=system) def append_segment(self, text, tokens, system=True): if len(text) == 0 or len(tokens) == 0: raise ValueError("Can't append empty text or token list to history.") original_text_length = len(self.text) self.text += text self.text_spans.append((original_text_length, len(self.text))) self.system_spans.append(system) original_token_length = len(self.tokens) self.tokens = torch.cat((self.tokens, tokens)) if system: self.token_masks = torch.cat((self.token_masks, torch.zeros_like(tokens))) else: self.token_masks = torch.cat((self.token_masks, torch.ones_like(tokens))) self.token_spans.append((original_token_length, len(self.tokens))) def complete(self, truncated=False): self.completed = True self.truncated = truncated @property def last_text_segment(self): (start, end) = self.text_spans[-1] return self.text[start:end] def split_query_response_tokens(self): split_index = self.token_spans[0][1] query = self.tokens[:split_index] response = self.tokens[split_index:] mask = self.token_masks[split_index:] return (query, response, mask) def show_text(self, show_legend=False): if not is_rich_available(): warnings.warn('install rich to display text') return text = Text(self.text) text.stylize(self.prompt_color, self.text_spans[0][0], self.text_spans[1][0]) for (i, (start, end)) in enumerate(self.text_spans[1:]): if self.system_spans[i + 1]: text.stylize(self.system_color, start, end) else: text.stylize(self.model_color, start, end) text.append(f'\n\nReward: {self.reward}', style=self.reward_color) print(text) if show_legend: self.show_colour_legend() def show_tokens(self, tokenizer, show_legend=False): if not is_rich_available(): warnings.warn('install rich to display tokens') return text = Text() prompt_end = self.token_spans[0][1] for (i, (token, mask)) in enumerate(zip(self.tokens, self.token_masks)): if i < prompt_end: text.append(tokenizer.convert_ids_to_tokens(token.item()), style=self.prompt_color) text.append(' ') elif mask == 0: text.append(tokenizer.convert_ids_to_tokens(token.item()), style=self.system_color) text.append(' ') else: text.append(tokenizer.convert_ids_to_tokens(token.item()), style=self.model_color) text.append(' ') text.append(f'\n\nReward: {self.reward}', style=self.reward_color) print(text) if show_legend: self.show_colour_legend() def show_colour_legend(self): if not is_rich_available(): warnings.warn('install rich to display colour legend') return text = Text('\n\n(Colour Legend: ') text.append('Prompt', style=self.prompt_color) text.append('|') text.append('System', style=self.system_color) text.append('|') text.append('Model', style=self.model_color) text.append('|') text.append('Reward', style=self.reward_color) text.append(')') print(text) class TextEnvironment: def __init__(self, model=None, tokenizer=None, tools=None, reward_fn=None, prompt=None, max_turns=4, max_tool_reponse=100, max_length=None, generation_kwargs=None): self.model = model self.tokenizer = tokenizer self.prompt = prompt if isinstance(tools, dict): self.tools = tools else: self.tools = {tool.__class__.__name__: tool for tool in tools} self.reward_fn = reward_fn self.max_length = max_length self.request_token = '' self.call_token = '' self.response_token = '' self.submit_token = '' self.max_turns = max_turns self.max_tool_response = max_tool_reponse if generation_kwargs is None: self.generation_kwargs = dict() else: self.generation_kwargs = generation_kwargs self.is_encoder_decoder = hasattr(self.model, 'is_encoder_decoder') self.current_device = extract_model_from_parallel(self.model).pretrained_model.device def run(self, queries, **rewards_kwargs): turns = 0 queries = [self.prompt + task for task in queries] queries_tokens = [self.tokenizer(query, return_tensors='pt').input_ids[0].to(self.model.pretrained_model.device) for query in queries] histories = [TextHistory(q, qt, system=True) for (q, qt) in zip(queries, queries_tokens)] while any((not history.completed for history in histories)) and turns < self.max_turns: histories = self.generate(histories) histories = self.tasks_end_check(histories) for i in range(len(histories)): histories[i] = self.step(histories[i]) histories = self.tasks_end_check(histories, model_turn=False) turns += 1 self.compute_reward(histories, **rewards_kwargs) (queries, responses, masks) = map(list, zip(*[history.split_query_response_tokens() for history in histories])) rewards = [history.reward for history in histories] return (queries, responses, masks, rewards, histories) def step(self, history): (truncated, ended) = self.task_end_check(history) if ended: history.complete(truncated=truncated) if history.completed: return history (tool, query) = self.parse_tool_call(history.last_text_segment) if tool is None or query is None: response = f'Unknown tool call: {history.last_text_segment}' else: if tool not in self.tools: response = f'Unknown tool {tool}.' try: response = self.tools[tool](query) except Exception as error: response = f'Tool error: {str(error)}' if len(response) > self.max_tool_response: response = response[:self.max_tool_response - 3] + '...' history.append_segment(response + self.response_token, self.tokenizer(response + self.response_token, return_tensors='pt').input_ids[0].to(self.model.pretrained_model.device), system=True) return history def parse_tool_call(self, text): result = re.search(f'(?<={self.request_token}).*?(?={self.call_token})', text, re.DOTALL) if result is None: return (None, None) else: extracted_text = result.group() result = re.search('<(.*?)>', extracted_text) if result is None: return (None, None) else: tool = result.group(1) query = '>'.join(extracted_text.split('>')[1:]) return (tool, query) def compute_reward(self, histories, **reward_kwargs): rewards = self.reward_fn([history.last_text_segment for history in histories], **reward_kwargs) for (history, reward) in zip(histories, rewards): history.reward = reward return histories def generate(self, histories): active_histories = [i for (i, history) in enumerate(histories) if not history.completed] query_tensors = [histories[i].tokens for i in active_histories] response_tensors = self._generate_batched(query_tensors) response_texts = self.tokenizer.batch_decode(response_tensors) for (i, response_text, response_tensor) in zip(active_histories, response_texts, response_tensors): histories[i].append_segment(response_text, response_tensor, system=False) return histories def tasks_end_check(self, histories, model_turn=True): for history in histories: if not history.completed: (truncated, ended) = self.task_end_check(history, model_turn=model_turn) if ended: history.complete(truncated=truncated) return histories def task_end_check(self, history, model_turn=True): truncated = False ended = False if history.completed: return (truncated, ended) if self.max_length is not None and len(self.tokenizer(history.text).input_ids[0]) > self.max_length: truncated = True ended = True elif self.tokenizer.eos_token in history.text: ended = True elif model_turn and (not (self.request_token in history.last_text_segment and self.call_token in history.last_text_segment or self.submit_token in history.last_text_segment)): ended = True elif self.submit_token in history.last_text_segment: ended = True return (truncated, ended) def _generate_batched(self, query_tensors, batch_size: int=16, pad_to_multiple_of: Optional[int]=None): outputs = [] padding_side_default = self.tokenizer.padding_side if not self.is_encoder_decoder: self.tokenizer.padding_side = 'left' batch_size = min(len(query_tensors), batch_size) for i in range(0, len(query_tensors), batch_size): end_index = min(len(query_tensors), i + batch_size) batch = query_tensors[i:end_index] batch_mask = [torch.ones_like(element) for element in batch] inputs = {'input_ids': batch, 'attention_mask': batch_mask} padded_inputs = self.tokenizer.pad(inputs, padding=True, max_length=None, pad_to_multiple_of=pad_to_multiple_of, return_tensors='pt').to(self.current_device) stopping_criteria = StringStoppingCriteria([self.call_token, self.submit_token], self.tokenizer) self.generation_kwargs['stopping_criteria'] = StoppingCriteriaList([stopping_criteria]) generations = extract_model_from_parallel(self.model).generate(**padded_inputs, **self.generation_kwargs) for (generation, mask, generated_tokens) in zip(generations, padded_inputs['attention_mask'], stopping_criteria.generated_tokens): if not self.is_encoder_decoder: output = generation[(1 - mask).sum():] else: output = generation if not self.is_encoder_decoder: output = output[mask.sum():] outputs.append(output[:generated_tokens]) self.tokenizer.padding_side = padding_side_default return outputs # File: trl-main/trl/extras/__init__.py from typing import TYPE_CHECKING from ..import_utils import _LazyModule _import_structure = {'best_of_n_sampler': ['BestOfNSampler']} if TYPE_CHECKING: from .best_of_n_sampler import BestOfNSampler else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: trl-main/trl/extras/best_of_n_sampler.py from typing import Any, Callable, List, Optional, Union import torch from transformers import GenerationConfig, PreTrainedTokenizer, PreTrainedTokenizerFast from ..core import set_seed from ..models import SUPPORTED_ARCHITECTURES, PreTrainedModelWrapper class BestOfNSampler: def __init__(self, model: PreTrainedModelWrapper, tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], queries_to_scores: Callable[[List[str]], List[float]], length_sampler: Any, sample_size: int=4, seed: Optional[int]=None, n_candidates: int=1, generation_config: Optional[GenerationConfig]=None) -> None: if seed is not None: set_seed(seed) if not isinstance(tokenizer, (PreTrainedTokenizer, PreTrainedTokenizerFast)): raise ValueError(f'tokenizer must be a PreTrainedTokenizer or PreTrainedTokenizerFast, got {type(tokenizer)}') if not isinstance(model, SUPPORTED_ARCHITECTURES): raise ValueError(f'model must be a PreTrainedModelWrapper, got {type(model)} - supported architectures are: {SUPPORTED_ARCHITECTURES}') self.model = model self.tokenizer = tokenizer self.queries_to_scores = queries_to_scores self.length_sampler = length_sampler self.gen_config = generation_config self.sample_size = sample_size self.n_candidates = n_candidates def generate(self, tokenized_query: Union[List[int], torch.Tensor, List[torch.Tensor], List[List[int]]], skip_special_tokens: bool=True, device: Optional[Union[str, torch.device]]=None, **generation_kwargs) -> List[List[str]]: queries = None if isinstance(tokenized_query, torch.Tensor) and tokenized_query.ndim == 1: queries = tokenized_query.unsqueeze(0) elif isinstance(tokenized_query, List): element_type = type(tokenized_query[0]) if element_type is int: queries = torch.tensor(tokenized_query).unsqueeze(0) elif element_type is torch.Tensor: queries = [tensor.reshape((1, -1)) for tensor in tokenized_query] else: queries = [torch.tensor(query).reshape((1, -1)) for query in tokenized_query] result = [] for query in queries: queries = query.repeat((self.sample_size, 1)) output = self.model.generate(queries.to(device), max_new_tokens=self.length_sampler(), generation_config=self.gen_config, **generation_kwargs).squeeze() output = self.tokenizer.batch_decode(output, skip_special_tokens=skip_special_tokens) scores = torch.tensor(self.queries_to_scores(output)) output = [output[i] for i in scores.topk(self.n_candidates).indices] result.append(output) return result # File: trl-main/trl/extras/dataset_formatting.py import logging from typing import Callable, Literal, Optional, Union from datasets import Dataset, Value from transformers import AutoTokenizer from ..trainer.utils import ConstantLengthDataset FORMAT_MAPPING = {'chatml': [{'content': Value(dtype='string', id=None), 'role': Value(dtype='string', id=None)}], 'instruction': {'completion': Value(dtype='string', id=None), 'prompt': Value(dtype='string', id=None)}} def conversations_formatting_function(tokenizer: AutoTokenizer, messages_field: Literal['messages', 'conversations']): def format_dataset(examples): if isinstance(examples[messages_field][0], list): output_texts = [] for i in range(len(examples[messages_field])): output_texts.append(tokenizer.apply_chat_template(examples[messages_field][i], tokenize=False)) return output_texts else: return tokenizer.apply_chat_template(examples[messages_field], tokenize=False) return format_dataset def instructions_formatting_function(tokenizer: AutoTokenizer): def format_dataset(examples): if isinstance(examples['prompt'], list): output_texts = [] for i in range(len(examples['prompt'])): converted_sample = [{'role': 'user', 'content': examples['prompt'][i]}, {'role': 'assistant', 'content': examples['completion'][i]}] output_texts.append(tokenizer.apply_chat_template(converted_sample, tokenize=False)) return output_texts else: converted_sample = [{'role': 'user', 'content': examples['prompt']}, {'role': 'assistant', 'content': examples['completion']}] return tokenizer.apply_chat_template(converted_sample, tokenize=False) return format_dataset def get_formatting_func_from_dataset(dataset: Union[Dataset, ConstantLengthDataset], tokenizer: AutoTokenizer) -> Optional[Callable]: if isinstance(dataset, Dataset): if 'messages' in dataset.features: if dataset.features['messages'] == FORMAT_MAPPING['chatml']: logging.info('Formatting dataset with chatml format') return conversations_formatting_function(tokenizer, 'messages') if 'conversations' in dataset.features: if dataset.features['conversations'] == FORMAT_MAPPING['chatml']: logging.info('Formatting dataset with chatml format') return conversations_formatting_function(tokenizer, 'conversations') elif dataset.features == FORMAT_MAPPING['instruction']: logging.info('Formatting dataset with instruction format') return instructions_formatting_function(tokenizer) return None # File: trl-main/trl/import_utils.py import importlib import os import sys from importlib.util import find_spec from itertools import chain from types import ModuleType from typing import Any if sys.version_info < (3, 8): _is_python_greater_3_8 = False else: _is_python_greater_3_8 = True def is_peft_available() -> bool: return find_spec('peft') is not None def is_liger_available() -> bool: return find_spec('liger_kernel') is not None def is_unsloth_available() -> bool: return find_spec('unsloth') is not None def is_accelerate_greater_20_0() -> bool: if _is_python_greater_3_8: from importlib.metadata import version accelerate_version = version('accelerate') else: import pkg_resources accelerate_version = pkg_resources.get_distribution('accelerate').version return accelerate_version >= '0.20.0' def is_transformers_greater_than(current_version: str) -> bool: if _is_python_greater_3_8: from importlib.metadata import version _transformers_version = version('transformers') else: import pkg_resources _transformers_version = pkg_resources.get_distribution('transformers').version return _transformers_version > current_version def is_torch_greater_2_0() -> bool: if _is_python_greater_3_8: from importlib.metadata import version torch_version = version('torch') else: import pkg_resources torch_version = pkg_resources.get_distribution('torch').version return torch_version >= '2.0' def is_diffusers_available() -> bool: return find_spec('diffusers') is not None def is_pil_available() -> bool: return find_spec('PIL') is not None def is_bitsandbytes_available() -> bool: import torch return find_spec('bitsandbytes') is not None and torch.cuda.is_available() def is_torchvision_available() -> bool: return find_spec('torchvision') is not None def is_rich_available() -> bool: return find_spec('rich') is not None def is_wandb_available() -> bool: return find_spec('wandb') is not None def is_sklearn_available() -> bool: return find_spec('sklearn') is not None def is_llmblender_available() -> bool: return find_spec('llm_blender') is not None def is_openai_available() -> bool: return find_spec('openai') is not None def is_xpu_available() -> bool: if is_accelerate_greater_20_0(): import accelerate return accelerate.utils.is_xpu_available() else: if find_spec('intel_extension_for_pytorch') is None: return False try: import torch return hasattr(torch, 'xpu') and torch.xpu.is_available() except RuntimeError: return False def is_npu_available() -> bool: if find_spec('torch') is None or find_spec('torch_npu') is None: return False import torch import torch_npu return hasattr(torch, 'npu') and torch.npu.is_available() class _LazyModule(ModuleType): def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): super().__init__(name) self._modules = set(import_structure.keys()) self._class_to_module = {} for (key, values) in import_structure.items(): for value in values: self._class_to_module[value] = key self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) self.__file__ = module_file self.__spec__ = module_spec self.__path__ = [os.path.dirname(module_file)] self._objects = {} if extra_objects is None else extra_objects self._name = name self._import_structure = import_structure def __dir__(self): result = super().__dir__() for attr in self.__all__: if attr not in result: result.append(attr) return result def __getattr__(self, name: str) -> Any: if name in self._objects: return self._objects[name] if name in self._modules: value = self._get_module(name) elif name in self._class_to_module.keys(): module = self._get_module(self._class_to_module[name]) value = getattr(module, name) else: raise AttributeError(f'module {self.__name__} has no attribute {name}') setattr(self, name, value) return value def _get_module(self, module_name: str): try: return importlib.import_module('.' + module_name, self.__name__) except Exception as e: raise RuntimeError(f'Failed to import {self.__name__}.{module_name} because of the following error (look up to see its traceback):\n{e}') from e def __reduce__(self): return (self.__class__, (self._name, self.__file__, self._import_structure)) class OptionalDependencyNotAvailable(BaseException): # File: trl-main/trl/models/__init__.py from typing import TYPE_CHECKING from ..import_utils import _LazyModule, is_diffusers_available, OptionalDependencyNotAvailable _import_structure = {'modeling_base': ['PreTrainedModelWrapper', 'create_reference_model'], 'modeling_value_head': ['AutoModelForCausalLMWithValueHead', 'AutoModelForSeq2SeqLMWithValueHead'], 'utils': ['setup_chat_format', 'SUPPORTED_ARCHITECTURES', 'unwrap_model_for_generation']} try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_sd_base'] = ['DDPOPipelineOutput', 'DDPOSchedulerOutput', 'DDPOStableDiffusionPipeline', 'DefaultDDPOStableDiffusionPipeline'] if TYPE_CHECKING: from .modeling_base import PreTrainedModelWrapper, create_reference_model from .modeling_value_head import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead from .utils import setup_chat_format, SUPPORTED_ARCHITECTURES try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sd_base import DDPOPipelineOutput, DDPOSchedulerOutput, DDPOStableDiffusionPipeline, DefaultDDPOStableDiffusionPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: trl-main/trl/models/auxiliary_modules.py import os import torch import torch.nn as nn import torchvision from huggingface_hub import hf_hub_download from huggingface_hub.utils import EntryNotFoundError from transformers import CLIPModel from trl.import_utils import is_npu_available, is_xpu_available class MLP(nn.Module): def __init__(self): super().__init__() self.layers = nn.Sequential(nn.Linear(768, 1024), nn.Dropout(0.2), nn.Linear(1024, 128), nn.Dropout(0.2), nn.Linear(128, 64), nn.Dropout(0.1), nn.Linear(64, 16), nn.Linear(16, 1)) def forward(self, embed): return self.layers(embed) class AestheticScorer(torch.nn.Module): def __init__(self, *, dtype, model_id, model_filename): super().__init__() self.clip = CLIPModel.from_pretrained('openai/clip-vit-large-patch14') self.normalize = torchvision.transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]) self.target_size = 224 self.mlp = MLP() try: cached_path = hf_hub_download(model_id, model_filename) except EntryNotFoundError: cached_path = os.path.join(model_id, model_filename) state_dict = torch.load(cached_path, map_location=torch.device('cpu'), weights_only=True) self.mlp.load_state_dict(state_dict) self.dtype = dtype self.eval() def __call__(self, images): device = next(self.parameters()).device images = torchvision.transforms.Resize(self.target_size)(images) images = self.normalize(images).to(self.dtype).to(device) embed = self.clip.get_image_features(pixel_values=images) embed = embed / torch.linalg.vector_norm(embed, dim=-1, keepdim=True) reward = self.mlp(embed).squeeze(1) return reward def aesthetic_scorer(hub_model_id, model_filename): scorer = AestheticScorer(model_id=hub_model_id, model_filename=model_filename, dtype=torch.float32) if is_npu_available(): scorer = scorer.npu() elif is_xpu_available(): scorer = scorer.xpu() else: scorer = scorer.cuda() def _fn(images, prompts, metadata): images = images.clamp(0, 1) scores = scorer(images) return (scores, {}) return _fn # File: trl-main/trl/models/modeling_base.py import json import logging import os from copy import deepcopy from typing import Optional import torch import torch.nn as nn from accelerate import PartialState from huggingface_hub import hf_hub_download from huggingface_hub.utils import EntryNotFoundError, HFValidationError, LocalEntryNotFoundError, RepositoryNotFoundError from safetensors.torch import load_file as safe_load_file from transformers import PreTrainedModel from ..import_utils import is_npu_available, is_peft_available, is_transformers_greater_than, is_xpu_available if is_peft_available(): from peft import PeftConfig, PeftModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM, PromptLearningConfig, get_peft_model, prepare_model_for_kbit_training if is_transformers_greater_than('4.33.0'): from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled else: from transformers.deepspeed import is_deepspeed_zero3_enabled LAYER_PATTERNS = ['transformer.h.{layer}', 'model.decoder.layers.{layer}', 'gpt_neox.layers.{layer}', 'model.layers.{layer}'] class PreTrainedModelWrapper(nn.Module): transformers_parent_class = None supported_args = None supported_modules = ('v_head',) supported_rm_modules = ('score',) supported_pretrained_model_architectures = PreTrainedModel if not is_peft_available() else (PreTrainedModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM) def __init__(self, pretrained_model=None, score_module=None, supports_rm_adapter=False, rm_adapter_name=None, **kwargs): super().__init__() self.pretrained_model = pretrained_model self.config = pretrained_model.config self.prepare_inputs_for_generation = pretrained_model.prepare_inputs_for_generation self.is_loaded_in_8bit = getattr(pretrained_model, 'is_loaded_in_8bit', False) self.is_loaded_in_4bit = getattr(pretrained_model, 'is_loaded_in_4bit', False) self.is_sequential_parallel = False if hasattr(pretrained_model, 'gradient_checkpointing_disable'): self.gradient_checkpointing_disable = pretrained_model.gradient_checkpointing_disable if hasattr(pretrained_model, 'gradient_checkpointing_enable'): self.gradient_checkpointing_enable = pretrained_model.gradient_checkpointing_enable if hasattr(pretrained_model, 'enable_input_require_grads'): self.enable_input_require_grads = pretrained_model.enable_input_require_grads self.supports_rm_adapter = supports_rm_adapter self.rm_adapter_name = rm_adapter_name self.policy_adapter_name = 'default' if score_module is not None: self.score = score_module @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): if kwargs is not None: peft_config = kwargs.pop('peft_config', None) reward_adapter = kwargs.pop('reward_adapter', None) reward_adapter_name = kwargs.pop('reward_adapter_name', 'reward_adapter') is_trainable = kwargs.pop('is_trainable', False) (trl_model_args, pretrained_kwargs, peft_quantization_kwargs) = cls._split_kwargs(kwargs) token = pretrained_kwargs.get('token', None) else: peft_config = None is_trainable = False trl_model_args = {} pretrained_kwargs = {} peft_quantization_kwargs = {} token = None if reward_adapter is not None and (not isinstance(reward_adapter, str)): raise ValueError('The `reward_adapter` argument should be a string representing the name of local path or the Hub id to the Reward Modeling adapter.') is_peft_model = False current_device = cls._get_current_device() if isinstance(pretrained_model_name_or_path, str): is_loaded_in_8bit = pretrained_kwargs['load_in_8bit'] if 'load_in_8bit' in pretrained_kwargs else False is_loaded_in_4bit = pretrained_kwargs['load_in_4bit'] if 'load_in_4bit' in pretrained_kwargs else False else: is_loaded_in_8bit = getattr(pretrained_model_name_or_path, 'is_loaded_in_8bit', False) is_loaded_in_4bit = getattr(pretrained_model_name_or_path, 'is_loaded_in_4bit', False) if (is_loaded_in_8bit or is_loaded_in_4bit) and 'device_map' not in pretrained_kwargs: logging.warning('The `device_map` argument is not provided. We will override the device_map argument. to set the entire model on the current device. If you want to set the model on multiple devices, please provide a custom `device_map` argument.') pretrained_kwargs['device_map'] = {'': current_device} if is_peft_available() and peft_config is not None and (not isinstance(peft_config, PeftConfig)): raise ValueError('The `peft_config` argument should be an instance of `peft.PeftConfig` class.') if isinstance(pretrained_model_name_or_path, str): if is_peft_available(): try: remote_adapter_config = hf_hub_download(pretrained_model_name_or_path, 'adapter_config.json', token=token) except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError): remote_adapter_config = None else: remote_adapter_config = None local_adapter_present = os.path.exists(os.path.join(pretrained_model_name_or_path, 'adapter_config.json')) if (local_adapter_present or remote_adapter_config is not None) and is_peft_available(): if peft_config is not None: logging.warning(f'`peft_config` argument ignored since a peft config file was found in {pretrained_model_name_or_path}') if local_adapter_present: trained_adapter_config = PeftConfig.from_pretrained(pretrained_model_name_or_path) else: remote_adapter_dir = os.path.dirname(remote_adapter_config) trained_adapter_config = PeftConfig.from_pretrained(remote_adapter_dir) pretrained_model = cls.transformers_parent_class.from_pretrained(trained_adapter_config.base_model_name_or_path, *model_args, **pretrained_kwargs) pretrained_model = PeftModel.from_pretrained(pretrained_model, pretrained_model_name_or_path, is_trainable=is_trainable, token=token) logging.info('Trained peft adapter loaded') else: pretrained_model = cls.transformers_parent_class.from_pretrained(pretrained_model_name_or_path, *model_args, **pretrained_kwargs) if peft_config is not None: if is_loaded_in_8bit or is_loaded_in_4bit: pretrained_model = prepare_model_for_kbit_training(pretrained_model, **peft_quantization_kwargs) pretrained_model = get_peft_model(pretrained_model, peft_config) logging.info('peft adapter initialised') elif isinstance(pretrained_model_name_or_path, cls.supported_pretrained_model_architectures): pretrained_model = pretrained_model_name_or_path if peft_config is not None and isinstance(pretrained_model, PreTrainedModel): if is_loaded_in_8bit or is_loaded_in_4bit: pretrained_model = prepare_model_for_kbit_training(pretrained_model, **peft_quantization_kwargs) pretrained_model = get_peft_model(pretrained_model, peft_config) logging.info('peft adapter initialised') else: raise ValueError(f'pretrained_model_name_or_path should be a string or a PreTrainedModel, but is {type(pretrained_model_name_or_path)}') if is_peft_available(): if isinstance(pretrained_model, PeftModel): is_peft_model = True if hasattr(pretrained_model, 'active_peft_config') and isinstance(pretrained_model.active_peft_config, PromptLearningConfig): raise ValueError('PromptLearningConfig is not supported for PPO training.') if not is_peft_model and reward_adapter is not None: raise ValueError('reward_adapter can only be used with a PeftModel. ') elif is_peft_model and reward_adapter is not None: score_module = cls.add_and_load_reward_modeling_adapter(pretrained_model, reward_adapter, reward_adapter_name, token=token) multi_adapter_args = {'score_module': score_module, 'supports_rm_adapter': True, 'rm_adapter_name': reward_adapter_name} else: multi_adapter_args = {'supports_rm_adapter': False} model = cls(pretrained_model, **multi_adapter_args, **trl_model_args) is_resuming_training = True if isinstance(pretrained_model_name_or_path, str): safe_filename = os.path.join(pretrained_model_name_or_path, 'model.safetensors') filename = os.path.join(pretrained_model_name_or_path, 'pytorch_model.bin') sharded_index_filename = os.path.join(pretrained_model_name_or_path, 'pytorch_model.bin.index.json') safe_sharded_index_filename = os.path.join(pretrained_model_name_or_path, 'model.safetensors.index.json') is_sharded = False use_safe = os.path.exists(safe_filename) if not (os.path.exists(filename) or os.path.exists(safe_filename)): (filename, files_to_download, is_sharded, is_resuming_training) = cls._get_checkpoint_from_hub(pretrained_model, pretrained_model_name_or_path, sharded_index_filename, token=token) if filename is None and files_to_download is None: (safe_filename, files_to_download, is_sharded, is_resuming_training) = cls._get_checkpoint_from_hub(pretrained_model, pretrained_model_name_or_path, safe_sharded_index_filename, token=token, model_name='model.safetensors', model_index_name='model.safetensors.index.json') use_safe = True else: use_safe = False loading_func = safe_load_file if use_safe else torch.load load_kwargs = {} if use_safe else {'map_location': 'cpu', 'weights_only': True} if is_resuming_training: if is_sharded: state_dict = {} for shard_file in files_to_download: filename = hf_hub_download(pretrained_model_name_or_path, shard_file, token=token) state_dict.update(loading_func(filename, **load_kwargs)) else: state_dict = loading_func(filename if not use_safe else safe_filename, **load_kwargs) else: state_dict = pretrained_model_name_or_path.state_dict() model.is_peft_model = is_peft_model model.current_device = current_device if is_resuming_training: model.post_init(state_dict=state_dict) return model @classmethod def _get_checkpoint_from_hub(cls, pretrained_model, pretrained_model_name_or_path, index_filename, token=None, model_name='pytorch_model.bin', model_index_name='pytorch_model.bin.index.json'): files_to_download = None filename = None is_resuming_training = True is_sharded = False try: filename = hf_hub_download(pretrained_model_name_or_path, model_name, token=token) except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError): if os.path.exists(index_filename): index_file_name = index_filename else: try: index_file_name = hf_hub_download(pretrained_model_name_or_path, model_index_name, token=token) except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError): is_resuming_training = False logging.warning(f"A {type(pretrained_model)} model is loaded from '{pretrained_model_name_or_path}', and no v_head weight is found. This IS expected if you are not resuming PPO training.") if is_resuming_training: with open(index_file_name) as f: index = json.load(f) files_to_download = set() for (k, v) in index['weight_map'].items(): if any((module in k for module in cls.supported_modules)): files_to_download.add(v) is_sharded = True return (filename, files_to_download, is_sharded, is_resuming_training) @classmethod def _get_current_device(cls): state = PartialState() if is_xpu_available(): return f'xpu:{state.local_process_index}' elif is_npu_available(): return f'npu:{state.local_process_index}' else: return state.local_process_index if torch.cuda.is_available() else 'cpu' @classmethod def _split_kwargs(cls, kwargs): check_peft_kwargs = False if is_peft_available(): from peft import prepare_model_for_kbit_training check_peft_kwargs = True supported_kwargs = {} unsupported_kwargs = {} peft_kwargs = {} for (key, value) in kwargs.items(): if key in cls.supported_args: supported_kwargs[key] = value else: unsupported_kwargs[key] = value if check_peft_kwargs: if key in prepare_model_for_kbit_training.__code__.co_varnames: peft_kwargs[key] = value if key in unsupported_kwargs: unsupported_kwargs.pop(key) return (supported_kwargs, unsupported_kwargs, peft_kwargs) @classmethod def add_and_load_reward_modeling_adapter(cls, pretrained_model, adapter_model_id, adapter_name='reward_model_adapter', token=None): pretrained_model.load_adapter(adapter_model_id, adapter_name, is_trainable=False) pretrained_model.train() filename = os.path.join(adapter_model_id, 'adapter_model.bin') safe_loading = False if not os.path.exists(filename): try: local_filename = hf_hub_download(adapter_model_id, 'adapter_model.bin', token=token) except Exception: filename = os.path.join(adapter_model_id, 'adapter_model.safetensors') safe_loading = True if not os.path.exists(filename): try: local_filename = hf_hub_download(adapter_model_id, 'adapter_model.safetensors', token=token) except Exception as exc: raise ValueError('Could not find adapter model in the Hub, make sure you have the correct adapter model id.') from exc else: local_filename = filename else: local_filename = filename loading_func = safe_load_file if safe_loading else torch.load load_kwargs = {} if safe_loading else {'map_location': 'cpu', 'weights_only': True} adapter_state_dict = loading_func(local_filename, **load_kwargs) for score_name_candidate in cls.supported_rm_modules: if any((score_name_candidate in name for name in adapter_state_dict.keys())): score_name = score_name_candidate break score_dict = {} for (name, param) in adapter_state_dict.items(): if score_name in name: key_name = '.'.join(name.split('.')[-1:]) score_dict[key_name] = param.to(cls._get_current_device()) (num_labels, hidden_dim) = score_dict['weight'].shape has_bias = any(('bias' in name for name in adapter_state_dict.keys())) score = nn.Linear(hidden_dim, num_labels, bias=has_bias).to(device=cls._get_current_device(), dtype=pretrained_model.dtype) score.load_state_dict(score_dict) for param in score.parameters(): param.requires_grad = False return score def push_to_hub(self, *args, **kwargs): raise NotImplementedError def save_pretrained(self, *args, **kwargs): state_dict = kwargs.get('state_dict') if state_dict is None: state_dict = self.state_dict() kwargs['state_dict'] = state_dict if self.is_peft_model: save_path = args[0] save_path = os.path.join(save_path, 'pytorch_model.bin') torch.save(state_dict, save_path) _ = kwargs.pop('state_dict', None) return self.pretrained_model.save_pretrained(*args, **kwargs) def state_dict(self, *args, **kwargs): raise NotImplementedError def post_init(self, *args, **kwargs): raise NotImplementedError def compute_reward_score(self, input_ids, attention_mask=None, **kwargs): if not self.supports_rm_adapter: raise ValueError('This model does not support reward modeling adapter.') self.pretrained_model.set_adapter(self.rm_adapter_name) self.pretrained_model.eval() with torch.no_grad(): base_model_output = self.pretrained_model(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True, return_dict=True, **kwargs) last_hidden_states = base_model_output.hidden_states[-1] scores = self.score(last_hidden_states) self.pretrained_model.set_adapter(self.policy_adapter_name) self.pretrained_model.eval() return scores def create_reference_model(model: PreTrainedModelWrapper, num_shared_layers: Optional[int]=None, pattern: Optional[str]=None) -> PreTrainedModelWrapper: if is_deepspeed_zero3_enabled(): raise ValueError('DeepSpeed ZeRO-3 is enabled and is not compatible with `create_reference_model()`. Please instantiate your reference model directly with `AutoCausalLM.from_pretrained()`.') parameter_names = [n for (n, _) in model.named_parameters()] ref_model = deepcopy(model) if num_shared_layers is None: for param_name in parameter_names: param = ref_model.get_parameter(param_name) param.requires_grad = False return ref_model.eval() if pattern is not None: pattern = pattern.format(layer=num_shared_layers) else: for pattern_candidate in LAYER_PATTERNS: pattern_candidate = pattern_candidate.format(layer=num_shared_layers) if any((pattern_candidate in name for name in parameter_names)): pattern = pattern_candidate break if pattern is None: raise ValueError('Layer pattern could not be matched.') shared_param_list = [] unshared_param_list = [] shared_parameter = True for (name, _param) in model.named_parameters(): if pattern in name: shared_parameter = False if shared_parameter: shared_param_list.append(name) else: unshared_param_list.append(name) for param_name in shared_param_list: param = model.get_parameter(param_name) param.requires_grad = False _ref_param = ref_model.get_parameter(param_name) for param_name in unshared_param_list: param = ref_model.get_parameter(param_name) param.requires_grad = False if pattern is not None and len(unshared_param_list) == 0: logging.warning('Pattern passed or found, but no layers matched in the model. Check for a typo.') return ref_model.eval() # File: trl-main/trl/models/modeling_sd_base.py import contextlib import os import random import warnings from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch import torch.utils.checkpoint as checkpoint from diffusers import DDIMScheduler, StableDiffusionPipeline, UNet2DConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import rescale_noise_cfg from ..core import randn_tensor from ..import_utils import is_peft_available from .sd_utils import convert_state_dict_to_diffusers if is_peft_available(): from peft import LoraConfig from peft.utils import get_peft_model_state_dict @dataclass class DDPOPipelineOutput: images: torch.Tensor latents: torch.Tensor log_probs: torch.Tensor @dataclass class DDPOSchedulerOutput: latents: torch.Tensor log_probs: torch.Tensor class DDPOStableDiffusionPipeline: def __call__(self, *args, **kwargs) -> DDPOPipelineOutput: raise NotImplementedError def scheduler_step(self, *args, **kwargs) -> DDPOSchedulerOutput: raise NotImplementedError @property def unet(self): raise NotImplementedError @property def vae(self): raise NotImplementedError @property def tokenizer(self): raise NotImplementedError @property def scheduler(self): raise NotImplementedError @property def text_encoder(self): raise NotImplementedError @property def autocast(self): raise NotImplementedError def set_progress_bar_config(self, *args, **kwargs): raise NotImplementedError def save_pretrained(self, *args, **kwargs): raise NotImplementedError def get_trainable_layers(self, *args, **kwargs): raise NotImplementedError def save_checkpoint(self, *args, **kwargs): raise NotImplementedError def load_checkpoint(self, *args, **kwargs): raise NotImplementedError def _left_broadcast(input_tensor, shape): input_ndim = input_tensor.ndim if input_ndim > len(shape): raise ValueError('The number of dimensions of the tensor to broadcast cannot be greater than the length of the shape to broadcast to') return input_tensor.reshape(input_tensor.shape + (1,) * (len(shape) - input_ndim)).broadcast_to(shape) def _get_variance(self, timestep, prev_timestep): alpha_prod_t = torch.gather(self.alphas_cumprod, 0, timestep.cpu()).to(timestep.device) alpha_prod_t_prev = torch.where(prev_timestep.cpu() >= 0, self.alphas_cumprod.gather(0, prev_timestep.cpu()), self.final_alpha_cumprod).to(timestep.device) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = beta_prod_t_prev / beta_prod_t * (1 - alpha_prod_t / alpha_prod_t_prev) return variance def scheduler_step(self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, eta: float=0.0, use_clipped_model_output: bool=False, generator=None, prev_sample: Optional[torch.FloatTensor]=None) -> DDPOSchedulerOutput: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps prev_timestep = torch.clamp(prev_timestep, 0, self.config.num_train_timesteps - 1) alpha_prod_t = self.alphas_cumprod.gather(0, timestep.cpu()) alpha_prod_t_prev = torch.where(prev_timestep.cpu() >= 0, self.alphas_cumprod.gather(0, prev_timestep.cpu()), self.final_alpha_cumprod) alpha_prod_t = _left_broadcast(alpha_prod_t, sample.shape).to(sample.device) alpha_prod_t_prev = _left_broadcast(alpha_prod_t_prev, sample.shape).to(sample.device) beta_prod_t = 1 - alpha_prod_t if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 pred_epsilon = model_output elif self.config.prediction_type == 'sample': pred_original_sample = model_output pred_epsilon = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output pred_epsilon = alpha_prod_t ** 0.5 * model_output + beta_prod_t ** 0.5 * sample else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction`') if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp(-self.config.clip_sample_range, self.config.clip_sample_range) variance = _get_variance(self, timestep, prev_timestep) std_dev_t = eta * variance ** 0.5 std_dev_t = _left_broadcast(std_dev_t, sample.shape).to(sample.device) if use_clipped_model_output: pred_epsilon = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t ** 2) ** 0.5 * pred_epsilon prev_sample_mean = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if prev_sample is not None and generator is not None: raise ValueError('Cannot pass both generator and prev_sample. Please make sure that either `generator` or `prev_sample` stays `None`.') if prev_sample is None: variance_noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype) prev_sample = prev_sample_mean + std_dev_t * variance_noise log_prob = -(prev_sample.detach() - prev_sample_mean) ** 2 / (2 * std_dev_t ** 2) - torch.log(std_dev_t) - torch.log(torch.sqrt(2 * torch.as_tensor(np.pi))) log_prob = log_prob.mean(dim=tuple(range(1, log_prob.ndim))) return DDPOSchedulerOutput(prev_sample.type(sample.dtype), log_prob) @torch.no_grad() def pipeline_step(self, prompt: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None prompt_embeds = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order all_latents = [latents] all_log_probs = [] with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if do_classifier_free_guidance and guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) scheduler_output = scheduler_step(self.scheduler, noise_pred, t, latents, eta) latents = scheduler_output.latents log_prob = scheduler_output.log_probs all_latents.append(latents) all_log_probs.append(log_prob) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.final_offload_hook.offload() return DDPOPipelineOutput(image, all_latents, all_log_probs) def pipeline_step_with_grad(pipeline, prompt: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, truncated_backprop: bool=True, truncated_backprop_rand: bool=True, gradient_checkpoint: bool=True, truncated_backprop_timestep: int=49, truncated_rand_backprop_minmax: tuple=(0, 50), negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0): height = height or pipeline.unet.config.sample_size * pipeline.vae_scale_factor width = width or pipeline.unet.config.sample_size * pipeline.vae_scale_factor with torch.no_grad(): pipeline.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = pipeline._execution_device do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None prompt_embeds = pipeline._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale) pipeline.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = pipeline.scheduler.timesteps num_channels_latents = pipeline.unet.config.in_channels latents = pipeline.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) num_warmup_steps = len(timesteps) - num_inference_steps * pipeline.scheduler.order all_latents = [latents] all_log_probs = [] with pipeline.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = pipeline.scheduler.scale_model_input(latent_model_input, t) if gradient_checkpoint: noise_pred = checkpoint.checkpoint(pipeline.unet, latent_model_input, t, prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, use_reentrant=False)[0] else: noise_pred = pipeline.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if truncated_backprop: if truncated_backprop_rand: rand_timestep = random.randint(truncated_rand_backprop_minmax[0], truncated_rand_backprop_minmax[1]) if i < rand_timestep: noise_pred = noise_pred.detach() elif i < truncated_backprop_timestep: noise_pred = noise_pred.detach() if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if do_classifier_free_guidance and guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) scheduler_output = scheduler_step(pipeline.scheduler, noise_pred, t, latents, eta) latents = scheduler_output.latents log_prob = scheduler_output.log_probs all_latents.append(latents) all_log_probs.append(log_prob) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % pipeline.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) if not output_type == 'latent': image = pipeline.vae.decode(latents / pipeline.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = pipeline.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = pipeline.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if hasattr(pipeline, 'final_offload_hook') and pipeline.final_offload_hook is not None: pipeline.final_offload_hook.offload() return DDPOPipelineOutput(image, all_latents, all_log_probs) class DefaultDDPOStableDiffusionPipeline(DDPOStableDiffusionPipeline): def __init__(self, pretrained_model_name: str, *, pretrained_model_revision: str='main', use_lora: bool=True): self.sd_pipeline = StableDiffusionPipeline.from_pretrained(pretrained_model_name, revision=pretrained_model_revision) self.use_lora = use_lora self.pretrained_model = pretrained_model_name self.pretrained_revision = pretrained_model_revision try: self.sd_pipeline.load_lora_weights(pretrained_model_name, weight_name='pytorch_lora_weights.safetensors', revision=pretrained_model_revision) self.use_lora = True except OSError: if use_lora: warnings.warn('If you are aware that the pretrained model has no lora weights to it, ignore this message. Otherwise please check the if `pytorch_lora_weights.safetensors` exists in the model folder.') self.sd_pipeline.scheduler = DDIMScheduler.from_config(self.sd_pipeline.scheduler.config) self.sd_pipeline.safety_checker = None self.sd_pipeline.vae.requires_grad_(False) self.sd_pipeline.text_encoder.requires_grad_(False) self.sd_pipeline.unet.requires_grad_(not self.use_lora) def __call__(self, *args, **kwargs) -> DDPOPipelineOutput: return pipeline_step(self.sd_pipeline, *args, **kwargs) def rgb_with_grad(self, *args, **kwargs) -> DDPOPipelineOutput: return pipeline_step_with_grad(self.sd_pipeline, *args, **kwargs) def scheduler_step(self, *args, **kwargs) -> DDPOSchedulerOutput: return scheduler_step(self.sd_pipeline.scheduler, *args, **kwargs) @property def unet(self): return self.sd_pipeline.unet @property def vae(self): return self.sd_pipeline.vae @property def tokenizer(self): return self.sd_pipeline.tokenizer @property def scheduler(self): return self.sd_pipeline.scheduler @property def text_encoder(self): return self.sd_pipeline.text_encoder @property def autocast(self): return contextlib.nullcontext if self.use_lora else None def save_pretrained(self, output_dir): if self.use_lora: state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(self.sd_pipeline.unet)) self.sd_pipeline.save_lora_weights(save_directory=output_dir, unet_lora_layers=state_dict) self.sd_pipeline.save_pretrained(output_dir) def set_progress_bar_config(self, *args, **kwargs): self.sd_pipeline.set_progress_bar_config(*args, **kwargs) def get_trainable_layers(self): if self.use_lora: lora_config = LoraConfig(r=4, lora_alpha=4, init_lora_weights='gaussian', target_modules=['to_k', 'to_q', 'to_v', 'to_out.0']) self.sd_pipeline.unet.add_adapter(lora_config) for param in self.sd_pipeline.unet.parameters(): if param.requires_grad: param.data = param.to(torch.float32) return self.sd_pipeline.unet else: return self.sd_pipeline.unet def save_checkpoint(self, models, weights, output_dir): if len(models) != 1: raise ValueError('Given how the trainable params were set, this should be of length 1') if self.use_lora and hasattr(models[0], 'peft_config') and (getattr(models[0], 'peft_config', None) is not None): state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(models[0])) self.sd_pipeline.save_lora_weights(save_directory=output_dir, unet_lora_layers=state_dict) elif not self.use_lora and isinstance(models[0], UNet2DConditionModel): models[0].save_pretrained(os.path.join(output_dir, 'unet')) else: raise ValueError(f'Unknown model type {type(models[0])}') def load_checkpoint(self, models, input_dir): if len(models) != 1: raise ValueError('Given how the trainable params were set, this should be of length 1') if self.use_lora: (lora_state_dict, network_alphas) = self.sd_pipeline.lora_state_dict(input_dir, weight_name='pytorch_lora_weights.safetensors') self.sd_pipeline.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=models[0]) elif not self.use_lora and isinstance(models[0], UNet2DConditionModel): load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder='unet') models[0].register_to_config(**load_model.config) models[0].load_state_dict(load_model.state_dict()) del load_model else: raise ValueError(f'Unknown model type {type(models[0])}') # File: trl-main/trl/models/modeling_value_head.py import torch import torch.nn as nn from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM from ..import_utils import is_npu_available, is_xpu_available from .modeling_base import PreTrainedModelWrapper class ValueHead(nn.Module): def __init__(self, config, **kwargs): super().__init__() if not hasattr(config, 'summary_dropout_prob'): summary_dropout_prob = kwargs.pop('summary_dropout_prob', 0.1) else: summary_dropout_prob = config.summary_dropout_prob self.dropout = nn.Dropout(summary_dropout_prob) if summary_dropout_prob else nn.Identity() if hasattr(config, 'hidden_size'): hidden_size = config.hidden_size if hasattr(config, 'word_embed_proj_dim'): hidden_size = config.word_embed_proj_dim elif hasattr(config, 'is_encoder_decoder'): if config.is_encoder_decoder and hasattr(config, 'decoder'): if hasattr(config.decoder, 'hidden_size'): hidden_size = config.decoder.hidden_size self.summary = nn.Linear(hidden_size, 1) self.flatten = nn.Flatten() def forward(self, hidden_states): output = self.dropout(hidden_states) if output.dtype != self.summary.weight.dtype: output = output.to(self.summary.weight.dtype) output = self.summary(output) return output class AutoModelForCausalLMWithValueHead(PreTrainedModelWrapper): transformers_parent_class = AutoModelForCausalLM lm_head_namings = ['lm_head', 'embed_out'] supported_args = ('summary_dropout_prob', 'v_head_initializer_range', 'v_head_init_strategy') def __init__(self, pretrained_model, **kwargs): super().__init__(pretrained_model, **kwargs) (v_head_kwargs, _, _) = self._split_kwargs(kwargs) if not any((hasattr(self.pretrained_model, attribute) for attribute in self.lm_head_namings)): raise ValueError('The model does not have a language model head, please use a model that has one.') self.v_head = ValueHead(self.pretrained_model.config, **v_head_kwargs) self._init_weights(**v_head_kwargs) def _init_weights(self, **kwargs): initializer_range = kwargs.pop('v_head_initializer_range', 0.2) init_strategy = kwargs.pop('v_head_init_strategy', None) if init_strategy is None: pass elif init_strategy == 'normal': self.v_head.summary.weight.data.normal_(mean=0.0, std=initializer_range) self.v_head.summary.bias.data.zero_() def forward(self, input_ids=None, past_key_values=None, attention_mask=None, return_past_key_values=False, **kwargs): kwargs['output_hidden_states'] = True kwargs['past_key_values'] = past_key_values if self.is_peft_model and self.pretrained_model.active_peft_config.peft_type == 'PREFIX_TUNING': kwargs.pop('past_key_values') base_model_output = self.pretrained_model(input_ids=input_ids, attention_mask=attention_mask, **kwargs) last_hidden_state = base_model_output.hidden_states[-1] lm_logits = base_model_output.logits loss = base_model_output.loss if last_hidden_state.device != self.v_head.summary.weight.device: last_hidden_state = last_hidden_state.to(self.v_head.summary.weight.device) value = self.v_head(last_hidden_state).squeeze(-1) if lm_logits.dtype != torch.float32: lm_logits = lm_logits.float() if return_past_key_values: return (lm_logits, loss, value, base_model_output.past_key_values) else: return (lm_logits, loss, value) def generate(self, *args, **kwargs): return self.pretrained_model.generate(*args, **kwargs) def state_dict(self, *args, **kwargs): if not self.is_peft_model: pretrained_model_state_dict = self.pretrained_model.state_dict(*args, **kwargs) else: pretrained_model_state_dict = {} v_head_state_dict = self.v_head.state_dict(*args, **kwargs) for (k, v) in v_head_state_dict.items(): pretrained_model_state_dict[f'v_head.{k}'] = v return pretrained_model_state_dict def push_to_hub(self, *args, **kwargs): self.pretrained_model.v_head = self.v_head return self.pretrained_model.push_to_hub(*args, **kwargs) def post_init(self, state_dict): for k in list(state_dict.keys()): if 'v_head.' in k: state_dict[k.replace('v_head.', '')] = state_dict.pop(k) self.v_head.load_state_dict(state_dict, strict=False) del state_dict if hasattr(self.pretrained_model, 'hf_device_map'): if 'cpu' in self.pretrained_model.hf_device_map.values() or 'disk' in self.pretrained_model.hf_device_map.values(): raise ValueError('The model is offloaded on CPU or disk - CPU & disk offloading is not supported for ValueHead models.') first_device = list(set(self.pretrained_model.hf_device_map.values()))[0] if isinstance(first_device, int): if is_npu_available(): first_device = f'npu:{first_device}' elif is_xpu_available(): first_device = f'xpu:{first_device}' else: first_device = f'cuda:{first_device}' self.v_head = self.v_head.to(first_device) def set_device_hook(module, input, outputs): new_output = () for output in outputs: if isinstance(output, torch.Tensor): new_output += (output.to(first_device),) else: new_output += (output,) return new_output self.register_forward_hook(set_device_hook) self.is_sequential_parallel = True class AutoModelForSeq2SeqLMWithValueHead(PreTrainedModelWrapper): transformers_parent_class = AutoModelForSeq2SeqLM lm_head_namings = ['lm_head', 'embed_out', 'output_projection'] supported_args = ('summary_dropout_prob', 'v_head_initializer_range', 'v_head_init_strategy') def __init__(self, pretrained_model, **kwargs): super().__init__(pretrained_model, **kwargs) (v_head_kwargs, _, _) = self._split_kwargs(kwargs) self.is_encoder_decoder = True if not self._has_lm_head(): raise ValueError('The model does not have a language model head, please use a model that has one.') self.v_head = ValueHead(self.pretrained_model.config, **v_head_kwargs) self._init_weights(**v_head_kwargs) def _has_lm_head(self): for (name, _module) in self.pretrained_model.named_modules(): if any((attribute in name for attribute in self.lm_head_namings)): return True return False def post_init(self, state_dict): for k in list(state_dict.keys()): if 'v_head.' in k: state_dict[k.replace('v_head.', '')] = state_dict.pop(k) self.v_head.load_state_dict(state_dict, strict=False) del state_dict if hasattr(self.pretrained_model, 'hf_device_map'): if 'cpu' in self.pretrained_model.hf_device_map.values() or 'disk' in self.pretrained_model.hf_device_map.values(): raise ValueError('The model is offloaded on CPU or disk - CPU & disk offloading is not supported for ValueHead models.') for (name, module) in self.pretrained_model.named_modules(): if any((attribute in name for attribute in self.lm_head_namings)): lm_head_device = module.weight.device break self.v_head = self.v_head.to(lm_head_device) def set_device_hook(module, input, outputs): new_output = () for output in outputs: if isinstance(output, torch.Tensor): new_output += (output.to(lm_head_device),) else: new_output += (output,) return new_output self.register_forward_hook(set_device_hook) self.is_sequential_parallel = True def state_dict(self, *args, **kwargs): if not self.is_peft_model: pretrained_model_state_dict = self.pretrained_model.state_dict(*args, **kwargs) else: pretrained_model_state_dict = {} v_head_state_dict = self.v_head.state_dict(*args, **kwargs) for (k, v) in v_head_state_dict.items(): pretrained_model_state_dict[f'v_head.{k}'] = v return pretrained_model_state_dict def push_to_hub(self, *args, **kwargs): self.pretrained_model.v_head = self.v_head return self.pretrained_model.push_to_hub(*args, **kwargs) def _init_weights(self, **kwargs): initializer_range = kwargs.pop('v_head_initializer_range', 0.2) init_strategy = kwargs.pop('v_head_init_strategy', None) if init_strategy is None: pass elif init_strategy == 'normal': self.v_head.summary.weight.data.normal_(mean=0.0, std=initializer_range) self.v_head.summary.bias.data.zero_() def forward(self, input_ids=None, past_key_values=None, attention_mask=None, return_past_key_values=False, **kwargs): kwargs['past_key_values'] = past_key_values if self.is_peft_model and self.pretrained_model.active_peft_config.peft_type == 'PREFIX_TUNING': kwargs.pop('past_key_values') base_model_output = self.pretrained_model(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True, **kwargs) last_hidden_state = base_model_output.decoder_hidden_states[-1] lm_logits = base_model_output.logits loss = base_model_output.loss value = self.v_head(last_hidden_state).squeeze(-1) if lm_logits.dtype != torch.float32: lm_logits = lm_logits.float() if return_past_key_values: return (lm_logits, loss, value, base_model_output.past_key_values) else: return (lm_logits, loss, value) def generate(self, *args, **kwargs): return self.pretrained_model.generate(*args, **kwargs) # File: trl-main/trl/models/sd_utils.py """""" import enum class StateDictType(enum.Enum): DIFFUSERS_OLD = 'diffusers_old' PEFT = 'peft' PEFT_TO_DIFFUSERS = {'.q_proj.lora_B': '.q_proj.lora_linear_layer.up', '.q_proj.lora_A': '.q_proj.lora_linear_layer.down', '.k_proj.lora_B': '.k_proj.lora_linear_layer.up', '.k_proj.lora_A': '.k_proj.lora_linear_layer.down', '.v_proj.lora_B': '.v_proj.lora_linear_layer.up', '.v_proj.lora_A': '.v_proj.lora_linear_layer.down', '.out_proj.lora_B': '.out_proj.lora_linear_layer.up', '.out_proj.lora_A': '.out_proj.lora_linear_layer.down', 'to_k.lora_A': 'to_k.lora.down', 'to_k.lora_B': 'to_k.lora.up', 'to_q.lora_A': 'to_q.lora.down', 'to_q.lora_B': 'to_q.lora.up', 'to_v.lora_A': 'to_v.lora.down', 'to_v.lora_B': 'to_v.lora.up', 'to_out.0.lora_A': 'to_out.0.lora.down', 'to_out.0.lora_B': 'to_out.0.lora.up'} DIFFUSERS_OLD_TO_DIFFUSERS = {'.to_q_lora.up': '.q_proj.lora_linear_layer.up', '.to_q_lora.down': '.q_proj.lora_linear_layer.down', '.to_k_lora.up': '.k_proj.lora_linear_layer.up', '.to_k_lora.down': '.k_proj.lora_linear_layer.down', '.to_v_lora.up': '.v_proj.lora_linear_layer.up', '.to_v_lora.down': '.v_proj.lora_linear_layer.down', '.to_out_lora.up': '.out_proj.lora_linear_layer.up', '.to_out_lora.down': '.out_proj.lora_linear_layer.down'} DIFFUSERS_STATE_DICT_MAPPINGS = {StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_DIFFUSERS, StateDictType.PEFT: PEFT_TO_DIFFUSERS} KEYS_TO_ALWAYS_REPLACE = {'.processor.': '.'} def convert_state_dict(state_dict, mapping): converted_state_dict = {} for (k, v) in state_dict.items(): for pattern in KEYS_TO_ALWAYS_REPLACE.keys(): if pattern in k: new_pattern = KEYS_TO_ALWAYS_REPLACE[pattern] k = k.replace(pattern, new_pattern) for pattern in mapping.keys(): if pattern in k: new_pattern = mapping[pattern] k = k.replace(pattern, new_pattern) break converted_state_dict[k] = v return converted_state_dict def convert_state_dict_to_diffusers(state_dict, original_type=None, **kwargs): peft_adapter_name = kwargs.pop('adapter_name', None) if peft_adapter_name is not None: peft_adapter_name = '.' + peft_adapter_name else: peft_adapter_name = '' if original_type is None: if any(('to_out_lora' in k for k in state_dict.keys())): original_type = StateDictType.DIFFUSERS_OLD elif any((f'.lora_A{peft_adapter_name}.weight' in k for k in state_dict.keys())): original_type = StateDictType.PEFT elif any(('lora_linear_layer' in k for k in state_dict.keys())): return state_dict else: raise ValueError('Could not automatically infer state dict type') if original_type not in DIFFUSERS_STATE_DICT_MAPPINGS.keys(): raise ValueError(f'Original type {original_type} is not supported') mapping = DIFFUSERS_STATE_DICT_MAPPINGS[original_type] return convert_state_dict(state_dict, mapping) # File: trl-main/trl/models/utils.py import itertools from contextlib import contextmanager from dataclasses import dataclass from typing import TYPE_CHECKING, Literal, Optional, Tuple, Union from accelerate.utils import is_deepspeed_available from transformers import PreTrainedModel, PreTrainedTokenizer from .modeling_value_head import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead SUPPORTED_ARCHITECTURES = (AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead) if is_deepspeed_available(): import deepspeed if TYPE_CHECKING: from accelerate import Accelerator from deepspeed.runtime.engine import DeepSpeedEngine from torch.nn.parallel.distributed import DistributedDataParallel from .modeling_base import PreTrainedModelWrapper @dataclass class ChatMlSpecialTokens: bos_token: str = '<|im_start|>' eos_token: str = '<|im_end|>' pad_token: str = '<|im_end|>' @property def system(self): return f'{self.bos_token}system' @property def user(self): return f'{self.bos_token}user' @property def assistant(self): return f'{self.bos_token}assistant' @property def chat_template(self): return f"{{% for message in messages %}}{{{{'{self.bos_token}' + message['role'] + '\n' + message['content'] + '{self.eos_token}' + '\n'}}}}{{% endfor %}}{{% if add_generation_prompt %}}{{{{ '{self.assistant}\n' }}}}{{% endif %}}" FORMAT_MAPPING = {'chatml': ChatMlSpecialTokens} def setup_chat_format(model: PreTrainedModel, tokenizer: PreTrainedTokenizer, format: Optional[Literal['chatml']]='chatml', resize_to_multiple_of: Optional[int]=None) -> Tuple[PreTrainedModel, PreTrainedTokenizer]: if format not in FORMAT_MAPPING: raise ValueError(f'Format {format} not available. Please use one of {FORMAT_MAPPING.keys()}') chat_format = FORMAT_MAPPING[format]() tokenizer.eos_token = chat_format.eos_token tokenizer.pad_token = chat_format.pad_token tokenizer.bos_token = chat_format.bos_token tokenizer.add_special_tokens({'additional_special_tokens': [chat_format.bos_token, chat_format.eos_token]}) tokenizer.chat_template = chat_format.chat_template model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=resize_to_multiple_of if resize_to_multiple_of is not None else None) if getattr(model, 'config', None) is not None: model.config.pad_token_id = tokenizer.pad_token_id model.config.bos_token_id = tokenizer.bos_token_id model.config.eos_token_id = tokenizer.eos_token_id if getattr(model, 'generation_config', None) is not None: model.generation_config.bos_token_id = tokenizer.bos_token_id model.generation_config.eos_token_id = tokenizer.eos_token_id model.generation_config.pad_token_id = tokenizer.pad_token_id return (model, tokenizer) def remove_hooks(model: 'DeepSpeedEngine') -> None: if model.optimizer is not None and hasattr(model.optimizer, 'parameter_offload'): optimizer_offload = model.optimizer.parameter_offload elif model.optimizer is not None: optimizer_offload = model.optimizer for param in iter_params(optimizer_offload.module, recurse=True): param.ds_active_sub_modules.clear() for hook in optimizer_offload.forward_hooks: hook.remove() for hook in optimizer_offload.backward_hooks: hook.remove() optimizer_offload.forward_hooks = [] optimizer_offload.backward_hooks = [] def get_all_parameters(sub_module, recurse=False): return itertools.chain(sub_module.named_parameters(recurse=recurse), sub_module.ds_external_parameters()) def iter_params(module, recurse=False): return [param for (_, param) in get_all_parameters(module, recurse)] def add_hooks(model: 'DeepSpeedEngine') -> None: if model.optimizer is not None and hasattr(model.optimizer, 'parameter_offload'): optimizer_offload = model.optimizer.parameter_offload elif model.optimizer is not None: optimizer_offload = model.optimizer optimizer_offload._register_hooks_recursively(optimizer_offload.module) @contextmanager def unwrap_model_for_generation(model: Union['DistributedDataParallel', 'DeepSpeedEngine'], accelerator: 'Accelerator', is_peft_model: bool=False) -> Union['PreTrainedModelWrapper', 'DeepSpeedEngine']: unwrapped_model = accelerator.unwrap_model(model) if is_peft_model: unwrapped_model.pretrained_model.disable_adapter() if accelerator.state.deepspeed_plugin is not None and accelerator.state.deepspeed_plugin.zero_stage == 3: with deepspeed.zero.GatheredParameters(model.parameters()): remove_hooks(model) yield accelerator.unwrap_model(model) add_hooks(model) else: yield unwrapped_model # File: trl-main/trl/trainer/__init__.py from typing import TYPE_CHECKING from ..import_utils import _LazyModule, is_diffusers_available, OptionalDependencyNotAvailable _import_structure = {'callbacks': ['RichProgressCallback', 'SyncRefModelCallback'], 'utils': ['AdaptiveKLController', 'FixedKLController', 'ConstantLengthDataset', 'DataCollatorForCompletionOnlyLM', 'RunningMoments', 'disable_dropout_in_model', 'peft_module_casting_to_bf16'], 'dpo_config': ['DPOConfig', 'FDivergenceConstants', 'FDivergenceType'], 'dpo_trainer': ['DPOTrainer'], 'cpo_config': ['CPOConfig'], 'cpo_trainer': ['CPOTrainer'], 'alignprop_config': ['AlignPropConfig'], 'alignprop_trainer': ['AlignPropTrainer'], 'iterative_sft_trainer': ['IterativeSFTTrainer'], 'kto_config': ['KTOConfig'], 'kto_trainer': ['KTOTrainer'], 'bco_config': ['BCOConfig'], 'bco_trainer': ['BCOTrainer'], 'model_config': ['ModelConfig'], 'online_dpo_config': ['OnlineDPOConfig'], 'online_dpo_trainer': ['OnlineDPOTrainer'], 'xpo_config': ['XPOConfig'], 'xpo_trainer': ['XPOTrainer'], 'orpo_config': ['ORPOConfig'], 'orpo_trainer': ['ORPOTrainer'], 'ppo_config': ['PPOConfig'], 'ppo_trainer': ['PPOTrainer'], 'ppov2_config': ['PPOv2Config'], 'ppov2_trainer': ['PPOv2Trainer'], 'reward_config': ['RewardConfig'], 'reward_trainer': ['RewardTrainer', 'compute_accuracy'], 'rloo_config': ['RLOOConfig'], 'rloo_trainer': ['RLOOTrainer'], 'sft_config': ['SFTConfig'], 'sft_trainer': ['SFTTrainer'], 'base': ['BaseTrainer'], 'ddpo_config': ['DDPOConfig'], 'gkd_trainer': ['GKDTrainer'], 'gkd_config': ['GKDConfig'], 'callbacks': ['RichProgressCallback', 'SyncRefModelCallback', 'WinRateCallback', 'LogCompletionsCallback'], 'judges': ['BaseJudge', 'BaseRankJudge', 'BasePairwiseJudge', 'RandomRankJudge', 'RandomPairwiseJudge', 'PairRMJudge', 'HfPairwiseJudge', 'OpenAIPairwiseJudge']} try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['ddpo_trainer'] = ['DDPOTrainer'] if TYPE_CHECKING: from .callbacks import RichProgressCallback, SyncRefModelCallback from .utils import AdaptiveKLController, FixedKLController, ConstantLengthDataset, DataCollatorForCompletionOnlyLM, RunningMoments, disable_dropout_in_model, peft_module_casting_to_bf16, empty_cache from .base import BaseTrainer from .ddpo_config import DDPOConfig from .dpo_config import DPOConfig, FDivergenceConstants, FDivergenceType from .dpo_trainer import DPOTrainer from .iterative_sft_trainer import IterativeSFTTrainer from .cpo_config import CPOConfig from .cpo_trainer import CPOTrainer from .alignprop_config import AlignPropConfig from .alignprop_trainer import AlignPropTrainer from .kto_config import KTOConfig from .kto_trainer import KTOTrainer from .bco_config import BCOConfig from .bco_trainer import BCOTrainer from .model_config import ModelConfig from .online_dpo_config import OnlineDPOConfig from .online_dpo_trainer import OnlineDPOTrainer from .xpo_config import XPOConfig from .xpo_trainer import XPOTrainer from .orpo_config import ORPOConfig from .orpo_trainer import ORPOTrainer from .ppo_config import PPOConfig from .ppo_trainer import PPOTrainer from .ppov2_config import PPOv2Config from .ppov2_trainer import PPOv2Trainer from .reward_config import RewardConfig from .reward_trainer import RewardTrainer, compute_accuracy from .rloo_config import RLOOConfig from .rloo_trainer import RLOOTrainer from .sft_config import SFTConfig from .sft_trainer import SFTTrainer from .gkd_trainer import GKDTrainer from .gkd_config import GKDConfig from .callbacks import RichProgressCallback, SyncRefModelCallback, WinRateCallback, LogCompletionsCallback from .judges import BaseJudge, BaseRankJudge, BasePairwiseJudge, RandomRankJudge, RandomPairwiseJudge, PairRMJudge, HfPairwiseJudge, OpenAIPairwiseJudge try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .ddpo_trainer import DDPOTrainer else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: trl-main/trl/trainer/alignprop_config.py import os import sys import warnings from dataclasses import dataclass, field from typing import Any, Dict, Literal, Optional, Tuple from ..core import flatten_dict from ..import_utils import is_bitsandbytes_available, is_torchvision_available @dataclass class AlignPropConfig: exp_name: str = os.path.basename(sys.argv[0])[:-len('.py')] run_name: str = '' seed: int = 0 log_with: Optional[Literal['wandb', 'tensorboard']] = None log_image_freq: int = 1 tracker_kwargs: Dict[str, Any] = field(default_factory=dict) accelerator_kwargs: Dict[str, Any] = field(default_factory=dict) project_kwargs: Dict[str, Any] = field(default_factory=dict) tracker_project_name: str = 'trl' logdir: str = 'logs' num_epochs: int = 100 save_freq: int = 1 num_checkpoint_limit: int = 5 mixed_precision: str = 'fp16' allow_tf32: bool = True resume_from: str = '' sample_num_steps: int = 50 sample_eta: float = 1.0 sample_guidance_scale: float = 5.0 train_batch_size: int = 1 train_use_8bit_adam: bool = False train_learning_rate: float = 0.001 train_adam_beta1: float = 0.9 train_adam_beta2: float = 0.999 train_adam_weight_decay: float = 0.0001 train_adam_epsilon: float = 1e-08 train_gradient_accumulation_steps: int = 1 train_max_grad_norm: float = 1.0 negative_prompts: Optional[str] = None truncated_backprop_rand: bool = True truncated_backprop_timestep: int = 49 truncated_rand_backprop_minmax: Tuple[int, int] = (0, 50) def to_dict(self): output_dict = {} for (key, value) in self.__dict__.items(): output_dict[key] = value return flatten_dict(output_dict) def __post_init__(self): if self.log_with not in ['wandb', 'tensorboard']: warnings.warn("Accelerator tracking only supports image logging if `log_with` is set to 'wandb' or 'tensorboard'.") if self.log_with == 'wandb' and (not is_torchvision_available()): warnings.warn('Wandb image logging requires torchvision to be installed') if self.train_use_8bit_adam and (not is_bitsandbytes_available()): raise ImportError('You need to install bitsandbytes to use 8bit Adam. You can install it with `pip install bitsandbytes`.') # File: trl-main/trl/trainer/alignprop_trainer.py import os import warnings from collections import defaultdict from typing import Any, Callable, Optional, Tuple from warnings import warn import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import whoami from ..models import DDPOStableDiffusionPipeline from . import AlignPropConfig, BaseTrainer logger = get_logger(__name__) MODEL_CARD_TEMPLATE = '---\nlicense: apache-2.0\nlibrary_name: transformers\ntags:\n- trl\n- alignprop\n- diffusers\n- reinforcement-learning\n- text-to-image\n- stable-diffusion\n---\n\n# {model_name}\n\nThis is a pipeline that finetunes a diffusion model with reward backpropagation while using randomized truncation (https://huggingface.co/papers/2310.03739). The model can be used for image generation conditioned with text.\n\n' class AlignPropTrainer(BaseTrainer): _tag_names = ['trl', 'alignprop'] def __init__(self, config: AlignPropConfig, reward_function: Callable[[torch.Tensor, Tuple[str], Tuple[Any]], torch.Tensor], prompt_function: Callable[[], Tuple[str, Any]], sd_pipeline: DDPOStableDiffusionPipeline, image_samples_hook: Optional[Callable[[Any, Any, Any], Any]]=None): if image_samples_hook is None: warn('No image_samples_hook provided; no images will be logged') self.prompt_fn = prompt_function self.reward_fn = reward_function self.config = config self.image_samples_callback = image_samples_hook accelerator_project_config = ProjectConfiguration(**self.config.project_kwargs) if self.config.resume_from: self.config.resume_from = os.path.normpath(os.path.expanduser(self.config.resume_from)) if 'checkpoint_' not in os.path.basename(self.config.resume_from): checkpoints = list(filter(lambda x: 'checkpoint_' in x, os.listdir(self.config.resume_from))) if len(checkpoints) == 0: raise ValueError(f'No checkpoints found in {self.config.resume_from}') checkpoint_numbers = sorted([int(x.split('_')[-1]) for x in checkpoints]) self.config.resume_from = os.path.join(self.config.resume_from, f'checkpoint_{checkpoint_numbers[-1]}') accelerator_project_config.iteration = checkpoint_numbers[-1] + 1 self.accelerator = Accelerator(log_with=self.config.log_with, mixed_precision=self.config.mixed_precision, project_config=accelerator_project_config, gradient_accumulation_steps=self.config.train_gradient_accumulation_steps, **self.config.accelerator_kwargs) is_using_tensorboard = config.log_with is not None and config.log_with == 'tensorboard' if self.accelerator.is_main_process: self.accelerator.init_trackers(self.config.tracker_project_name, config=dict(alignprop_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(), init_kwargs=self.config.tracker_kwargs) logger.info(f'\n{config}') set_seed(self.config.seed, device_specific=True) self.sd_pipeline = sd_pipeline self.sd_pipeline.set_progress_bar_config(position=1, disable=not self.accelerator.is_local_main_process, leave=False, desc='Timestep', dynamic_ncols=True) if self.accelerator.mixed_precision == 'fp16': inference_dtype = torch.float16 elif self.accelerator.mixed_precision == 'bf16': inference_dtype = torch.bfloat16 else: inference_dtype = torch.float32 self.sd_pipeline.vae.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.text_encoder.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.unet.to(self.accelerator.device, dtype=inference_dtype) trainable_layers = self.sd_pipeline.get_trainable_layers() self.accelerator.register_save_state_pre_hook(self._save_model_hook) self.accelerator.register_load_state_pre_hook(self._load_model_hook) if self.config.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True self.optimizer = self._setup_optimizer(trainable_layers.parameters() if not isinstance(trainable_layers, list) else trainable_layers) self.neg_prompt_embed = self.sd_pipeline.text_encoder(self.sd_pipeline.tokenizer([''] if self.config.negative_prompts is None else self.config.negative_prompts, return_tensors='pt', padding='max_length', truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length).input_ids.to(self.accelerator.device))[0] self.autocast = self.sd_pipeline.autocast or self.accelerator.autocast if hasattr(self.sd_pipeline, 'use_lora') and self.sd_pipeline.use_lora: (unet, self.optimizer) = self.accelerator.prepare(trainable_layers, self.optimizer) self.trainable_layers = list(filter(lambda p: p.requires_grad, unet.parameters())) else: (self.trainable_layers, self.optimizer) = self.accelerator.prepare(trainable_layers, self.optimizer) if config.resume_from: logger.info(f'Resuming from {config.resume_from}') self.accelerator.load_state(config.resume_from) self.first_epoch = int(config.resume_from.split('_')[-1]) + 1 else: self.first_epoch = 0 def compute_rewards(self, prompt_image_pairs): (reward, reward_metadata) = self.reward_fn(prompt_image_pairs['images'], prompt_image_pairs['prompts'], prompt_image_pairs['prompt_metadata']) return reward def step(self, epoch: int, global_step: int): info = defaultdict(list) self.sd_pipeline.unet.train() for _ in range(self.config.train_gradient_accumulation_steps): with self.accelerator.accumulate(self.sd_pipeline.unet), self.autocast(), torch.enable_grad(): prompt_image_pairs = self._generate_samples(batch_size=self.config.train_batch_size) rewards = self.compute_rewards(prompt_image_pairs) prompt_image_pairs['rewards'] = rewards rewards_vis = self.accelerator.gather(rewards).detach().cpu().numpy() loss = self.calculate_loss(rewards) self.accelerator.backward(loss) if self.accelerator.sync_gradients: self.accelerator.clip_grad_norm_(self.trainable_layers.parameters() if not isinstance(self.trainable_layers, list) else self.trainable_layers, self.config.train_max_grad_norm) self.optimizer.step() self.optimizer.zero_grad() info['reward_mean'].append(rewards_vis.mean()) info['reward_std'].append(rewards_vis.std()) info['loss'].append(loss.item()) if self.accelerator.sync_gradients: info = {k: torch.mean(torch.tensor(v)) for (k, v) in info.items()} info = self.accelerator.reduce(info, reduction='mean') info.update({'epoch': epoch}) self.accelerator.log(info, step=global_step) global_step += 1 info = defaultdict(list) else: raise ValueError('Optimization step should have been performed by this point. Please check calculated gradient accumulation settings.') if self.image_samples_callback is not None and global_step % self.config.log_image_freq == 0: self.image_samples_callback(prompt_image_pairs, global_step, self.accelerator.trackers[0]) if epoch != 0 and epoch % self.config.save_freq == 0 and self.accelerator.is_main_process: self.accelerator.save_state() return global_step def calculate_loss(self, rewards): loss = 10.0 - rewards.mean() return loss def loss(self, advantages: torch.Tensor, clip_range: float, ratio: torch.Tensor): unclipped_loss = -advantages * ratio clipped_loss = -advantages * torch.clamp(ratio, 1.0 - clip_range, 1.0 + clip_range) return torch.mean(torch.maximum(unclipped_loss, clipped_loss)) def _setup_optimizer(self, trainable_layers_parameters): if self.config.train_use_8bit_adam: import bitsandbytes optimizer_cls = bitsandbytes.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW return optimizer_cls(trainable_layers_parameters, lr=self.config.train_learning_rate, betas=(self.config.train_adam_beta1, self.config.train_adam_beta2), weight_decay=self.config.train_adam_weight_decay, eps=self.config.train_adam_epsilon) def _save_model_hook(self, models, weights, output_dir): self.sd_pipeline.save_checkpoint(models, weights, output_dir) weights.pop() def _load_model_hook(self, models, input_dir): self.sd_pipeline.load_checkpoint(models, input_dir) models.pop() def _generate_samples(self, batch_size, with_grad=True, prompts=None): prompt_image_pairs = {} sample_neg_prompt_embeds = self.neg_prompt_embed.repeat(batch_size, 1, 1) if prompts is None: (prompts, prompt_metadata) = zip(*[self.prompt_fn() for _ in range(batch_size)]) else: prompt_metadata = [{} for _ in range(batch_size)] prompt_ids = self.sd_pipeline.tokenizer(prompts, return_tensors='pt', padding='max_length', truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length).input_ids.to(self.accelerator.device) prompt_embeds = self.sd_pipeline.text_encoder(prompt_ids)[0] if with_grad: sd_output = self.sd_pipeline.rgb_with_grad(prompt_embeds=prompt_embeds, negative_prompt_embeds=sample_neg_prompt_embeds, num_inference_steps=self.config.sample_num_steps, guidance_scale=self.config.sample_guidance_scale, eta=self.config.sample_eta, truncated_backprop_rand=self.config.truncated_backprop_rand, truncated_backprop_timestep=self.config.truncated_backprop_timestep, truncated_rand_backprop_minmax=self.config.truncated_rand_backprop_minmax, output_type='pt') else: sd_output = self.sd_pipeline(prompt_embeds=prompt_embeds, negative_prompt_embeds=sample_neg_prompt_embeds, num_inference_steps=self.config.sample_num_steps, guidance_scale=self.config.sample_guidance_scale, eta=self.config.sample_eta, output_type='pt') images = sd_output.images prompt_image_pairs['images'] = images prompt_image_pairs['prompts'] = prompts prompt_image_pairs['prompt_metadata'] = prompt_metadata return prompt_image_pairs def train(self, epochs: Optional[int]=None): global_step = 0 if epochs is None: epochs = self.config.num_epochs for epoch in range(self.first_epoch, epochs): global_step = self.step(epoch, global_step) def create_model_card(self, path: str, model_name: Optional[str]='TRL AlignProp Model') -> None: try: user = whoami()['name'] except Exception: warnings.warn('Cannot retrieve user information assuming you are running in offline mode.') return if not os.path.exists(path): os.makedirs(path) model_card_content = MODEL_CARD_TEMPLATE.format(model_name=model_name, model_id=f'{user}/{path}') with open(os.path.join(path, 'README.md'), 'w', encoding='utf-8') as f: f.write(model_card_content) def _save_pretrained(self, save_directory): self.sd_pipeline.save_pretrained(save_directory) self.create_model_card(save_directory) # File: trl-main/trl/trainer/base.py from huggingface_hub import PyTorchModelHubMixin class BaseTrainer(PyTorchModelHubMixin): def __init__(self, config): self.config = config def step(self, *args): raise NotImplementedError('Not implemented') def loss(self, *args): raise NotImplementedError('Not implemented') def compute_rewards(self, *args): raise NotImplementedError('Not implemented') def _save_pretrained(self, save_directory): raise NotImplementedError('Not implemented') # File: trl-main/trl/trainer/bco_config.py from dataclasses import dataclass from typing import Any, Dict, Optional from transformers import TrainingArguments @dataclass class BCOConfig(TrainingArguments): max_length: Optional[int] = None max_prompt_length: Optional[int] = None max_completion_length: Optional[int] = None beta: float = 0.1 label_pad_token_id: int = -100 padding_value: Optional[int] = None truncation_mode: str = 'keep_end' generate_during_eval: bool = False is_encoder_decoder: Optional[bool] = None precompute_ref_log_probs: bool = False model_init_kwargs: Optional[Dict[str, Any]] = None ref_model_init_kwargs: Optional[Dict[str, Any]] = None dataset_num_proc: Optional[int] = None prompt_sample_size: int = 1024 min_density_ratio: float = 0.5 max_density_ratio: float = 10.0 # File: trl-main/trl/trainer/bco_trainer.py import inspect import os import random import warnings from collections import defaultdict from contextlib import contextmanager, nullcontext from copy import deepcopy from functools import wraps from operator import itemgetter from typing import TYPE_CHECKING, Any, Callable, Dict, List, Literal, Optional, Tuple, Union import numpy as np import torch import torch.amp as amp import torch.nn as nn import torch.nn.functional as F from accelerate import PartialState from accelerate.utils import is_deepspeed_available, tqdm from datasets import Dataset from torch.utils.data import DataLoader, SequentialSampler from transformers import AutoModelForCausalLM, DataCollator, PreTrainedModel, PreTrainedTokenizerBase, Trainer, TrainingArguments from transformers.trainer_callback import TrainerCallback from transformers.trainer_utils import EvalLoopOutput, has_length from ..import_utils import is_peft_available, is_sklearn_available, is_wandb_available from ..models import PreTrainedModelWrapper, create_reference_model from .bco_config import BCOConfig from .utils import DPODataCollatorWithPadding, RunningMoments, disable_dropout_in_model, pad_to_length, peft_module_casting_to_bf16, trl_sanitze_kwargs_for_tagging if is_peft_available(): from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training if is_wandb_available(): import wandb if is_sklearn_available(): from sklearn.linear_model import LogisticRegression if is_deepspeed_available(): import deepspeed if TYPE_CHECKING: from transformers import PreTrainedModel, PreTrainedTokenizer RUNNING_NAME = 'running.json' CLF_NAME = 'clf.pt' def _tokenize(batch: Dict[str, List[Any]], tokenizer: 'PreTrainedTokenizer', embedding_tokenizer: Optional['PreTrainedTokenizer']=None) -> Dict[str, List[Any]]: prompt_tokenized = tokenizer(batch['prompt'], add_special_tokens=False) prompt_input_ids = prompt_tokenized['input_ids'] prompt_attention_mask = prompt_tokenized['attention_mask'] prompt_and_completion = [prompt + completion for (prompt, completion) in zip(batch['prompt'], batch['completion'])] full_tokenized = tokenizer(prompt_and_completion, add_special_tokens=False) full_input_ids = full_tokenized['input_ids'] full_attention_mask = full_tokenized['attention_mask'] answer_input_ids = [f[len(p):] for (f, p) in zip(full_input_ids, prompt_input_ids)] answer_attention_mask = [f[len(p):] for (f, p) in zip(full_attention_mask, prompt_attention_mask)] full_concat_input_ids = [np.concatenate([p, a]) for (p, a) in zip(prompt_input_ids, answer_input_ids)] full_input_ids = [np.array(f) for f in full_input_ids] for (full, concat) in zip(full_input_ids, full_concat_input_ids): if len(full) != len(concat): raise ValueError('Prompt input ids and answer input ids should have the same length.') response_token_ids_start_idx = [len(p) for p in prompt_input_ids] for (idx, (p, f, r)) in enumerate(zip(prompt_input_ids, full_input_ids, response_token_ids_start_idx)): if not np.array_equal(p, f[:r]): response_token_ids_start_idx[idx] -= 1 prompt_input_ids = [f[:r] for (f, r) in zip(full_input_ids, response_token_ids_start_idx)] prompt_attention_mask = [f[:r] for (f, r) in zip(full_attention_mask, response_token_ids_start_idx)] for (p, m) in zip(prompt_input_ids, prompt_attention_mask): if len(p) != len(m): raise ValueError('Prompt input ids and attention mask should have the same length.') answer_input_ids = [f[r:] for (f, r) in zip(full_input_ids, response_token_ids_start_idx)] answer_attention_mask = [f[r:] for (f, r) in zip(full_attention_mask, response_token_ids_start_idx)] output = dict(prompt_input_ids=prompt_input_ids, prompt_attention_mask=prompt_attention_mask, answer_input_ids=answer_input_ids, answer_attention_mask=answer_attention_mask) if embedding_tokenizer is not None: embedding_tokenized = embedding_tokenizer(batch['prompt'], truncation=True, add_special_tokens=False) output.update({'embedding_input_ids': embedding_tokenized['input_ids'], 'embedding_attention_mask': embedding_tokenized['attention_mask']}) return output def _process_tokens(example: Dict[str, Any], model: 'PreTrainedModel'=None, **kwargs) -> Dict: prompt = example['prompt'] completion = example['completion'] batch = {f"{kwargs['prefix']}prompt": prompt, f"{kwargs['prefix']}completion": completion, f"{kwargs['prefix']}label": example['label']} if not kwargs['is_encoder_decoder']: if not isinstance(prompt, str): raise ValueError(f'prompt should be an str but got {type(prompt)}') if not isinstance(completion, str): raise ValueError(f'completion should be an str but got {type(completion)}') all_tokens = {'prompt_input_ids': example['prompt_input_ids'], 'prompt_attention_mask': example['prompt_attention_mask'], 'answer_input_ids': example['answer_input_ids'], 'answer_attention_mask': example['answer_attention_mask']} max_length = kwargs['max_length'] bos_token_id = kwargs['tokenizer'].bos_token_id eos_token_id = kwargs['tokenizer'].eos_token_id if bos_token_id != all_tokens['prompt_input_ids'][0]: max_length -= 1 if eos_token_id != all_tokens['answer_input_ids'][-1]: max_length -= 1 if len(all_tokens['prompt_input_ids']) + len(all_tokens['answer_input_ids']) > max_length: for k in ['prompt_input_ids', 'prompt_attention_mask']: if kwargs['truncation_mode'] == 'keep_start': all_tokens[k] = all_tokens[k][:kwargs['max_prompt_length']] elif kwargs['truncation_mode'] == 'keep_end': all_tokens[k] = all_tokens[k][-kwargs['max_prompt_length']:] else: raise ValueError(f"Unknown truncation mode: {kwargs['truncation_mode']}") if len(all_tokens['prompt_input_ids']) + len(all_tokens['answer_input_ids']) > max_length: for k in ['answer_input_ids', 'answer_attention_mask']: all_tokens[k] = all_tokens[k][:max_length - kwargs['max_prompt_length']] batch[f"{kwargs['prefix']}prompt_input_ids"] = all_tokens['prompt_input_ids'] batch[f"{kwargs['prefix']}prompt_attention_mask"] = all_tokens['prompt_attention_mask'] batch[f"{kwargs['prefix']}completion_input_ids"] = all_tokens['prompt_input_ids'] + all_tokens['answer_input_ids'] batch[f"{kwargs['prefix']}completion_attention_mask"] = all_tokens['prompt_attention_mask'] + all_tokens['answer_attention_mask'] if len(all_tokens['prompt_input_ids']) == 0 or bos_token_id != all_tokens['prompt_input_ids'][0]: batch[f"{kwargs['prefix']}prompt_input_ids"] = [bos_token_id] + batch[f"{kwargs['prefix']}prompt_input_ids"] batch[f"{kwargs['prefix']}prompt_attention_mask"] = [1] + batch[f"{kwargs['prefix']}prompt_attention_mask"] batch[f"{kwargs['prefix']}completion_input_ids"] = [bos_token_id] + batch[f"{kwargs['prefix']}completion_input_ids"] batch[f"{kwargs['prefix']}completion_attention_mask"] = [1] + batch[f"{kwargs['prefix']}completion_attention_mask"] if len(all_tokens['answer_input_ids']) == 0 or eos_token_id != all_tokens['answer_input_ids'][-1]: batch[f"{kwargs['prefix']}completion_input_ids"] = batch[f"{kwargs['prefix']}completion_input_ids"] + [eos_token_id] batch[f"{kwargs['prefix']}completion_attention_mask"] = batch[f"{kwargs['prefix']}completion_attention_mask"] + [1] batch[f"{kwargs['prefix']}completion_labels"] = batch[f"{kwargs['prefix']}completion_input_ids"][:] batch[f"{kwargs['prefix']}completion_labels"][:len(batch[f"{kwargs['prefix']}prompt_input_ids"])] = [kwargs['label_pad_token_id']] * len(batch[f"{kwargs['prefix']}prompt_input_ids"]) else: completion_tokens = kwargs['tokenizer'](completion, truncation=True, max_length=kwargs['max_completion_length'], add_special_tokens=True) prompt_tokens = kwargs['tokenizer'](prompt, truncation=True, max_length=kwargs['max_prompt_length'], add_special_tokens=True) batch[f"{kwargs['prefix']}prompt_input_ids"] = prompt_tokens['input_ids'] batch[f"{kwargs['prefix']}prompt_attention_mask"] = prompt_tokens['attention_mask'] batch[f"{kwargs['prefix']}completion_labels"] = completion_tokens['input_ids'] batch[f"{kwargs['prefix']}completion_attention_mask"] = completion_tokens['attention_mask'] if model is not None and hasattr(model, 'prepare_decoder_input_ids_from_labels'): batch[f"{kwargs['prefix']}completion_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(labels=torch.tensor(batch['completion_labels'])) return batch class BCOTrainer(Trainer): _tag_names = ['trl', 'bco'] def __init__(self, model: Union[PreTrainedModel, nn.Module, str]=None, ref_model: Optional[Union[PreTrainedModel, nn.Module, str]]=None, args: BCOConfig=None, train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional[PreTrainedTokenizerBase]=None, data_collator: Optional[DataCollator]=None, model_init: Optional[Callable[[], PreTrainedModel]]=None, callbacks: Optional[List[TrainerCallback]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None, peft_config: Optional[Dict]=None, compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]]=None, model_adapter_name: Optional[str]=None, ref_adapter_name: Optional[str]=None, embedding_func: Optional[Callable]=None, embedding_tokenizer: Optional[PreTrainedTokenizerBase]=None): if not is_sklearn_available(): raise ImportError('BCOTrainer requires the scikit-learn library. Please install it with `pip install scikit-learn`.') if type(args) is TrainingArguments: raise ValueError('Please use `BCOConfig` instead `TrainingArguments`.') if args.model_init_kwargs is None: model_init_kwargs = {} elif not isinstance(model, str): raise ValueError('You passed model_kwargs to the BCOTrainer. But your model is already instantiated.') else: model_init_kwargs = args.model_init_kwargs torch_dtype = model_init_kwargs.get('torch_dtype') if torch_dtype is not None: if isinstance(torch_dtype, str) and torch_dtype != 'auto': torch_dtype = getattr(torch, torch_dtype) if torch_dtype != 'auto' and (not isinstance(torch_dtype, torch.dtype)): raise ValueError(f"Invalid `torch_dtype` passed to the BCOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}.") model_init_kwargs['torch_dtype'] = torch_dtype if args.ref_model_init_kwargs is None: ref_model_init_kwargs = {} elif not isinstance(ref_model, str): raise ValueError('You passed ref_model_kwargs to the BCOTrainer. But your ref_model is already instantiated.') else: ref_model_init_kwargs = args.ref_model_init_kwargs torch_dtype = ref_model_init_kwargs.get('torch_dtype') if torch_dtype is not None: if isinstance(torch_dtype, str) and torch_dtype != 'auto': torch_dtype = getattr(torch, torch_dtype) if torch_dtype != 'auto' and (not isinstance(torch_dtype, torch.dtype)): raise ValueError(f"Invalid `torch_dtype` passed to the BCOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}.") ref_model_init_kwargs['torch_dtype'] = torch_dtype if isinstance(model, str): warnings.warn('You passed a model_id to the BCOTrainer. This will automatically create an `AutoModelForCausalLM` or a `PeftModel` (if you passed a `peft_config`) for you.') model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs) if isinstance(ref_model, str): warnings.warn('You passed a ref model_id to the BCOTrainer. This will automatically create an `AutoModelForCausalLM`') ref_model = AutoModelForCausalLM.from_pretrained(ref_model, **ref_model_init_kwargs) self._peft_has_been_casted_to_bf16 = False if not is_peft_available() and peft_config is not None: raise ValueError("PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it with `pip install peft` to use the PEFT models") elif is_peft_available() and peft_config is not None: if isinstance(model, PeftModel): model = model.merge_and_unload() if getattr(model, 'is_loaded_in_8bit', False) or getattr(model, 'is_loaded_in_4bit', False): _support_gc_kwargs = hasattr(args, 'gradient_checkpointing_kwargs') and 'gradient_checkpointing_kwargs' in list(inspect.signature(prepare_model_for_kbit_training).parameters) prepare_model_kwargs = {'use_gradient_checkpointing': args.gradient_checkpointing} if _support_gc_kwargs: prepare_model_kwargs['gradient_checkpointing_kwargs'] = args.gradient_checkpointing_kwargs model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) elif getattr(args, 'gradient_checkpointing', False): if hasattr(model, 'enable_input_require_grads'): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) model = get_peft_model(model, peft_config) if args.bf16 and getattr(model, 'is_loaded_in_4bit', False): peft_module_casting_to_bf16(model) self._peft_has_been_casted_to_bf16 = True elif getattr(args, 'gradient_checkpointing', False): if hasattr(model, 'enable_input_require_grads'): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) if args.generate_during_eval and (not is_wandb_available()): raise ValueError('`generate_during_eval=True` requires Weights and Biases to be installed. Please install with `pip install wandb` to resolve.') if model is not None: self.is_encoder_decoder = model.config.is_encoder_decoder elif args.is_encoder_decoder is None: raise ValueError('When no model is provided, you need to pass the parameter is_encoder_decoder.') else: self.is_encoder_decoder = args.is_encoder_decoder self.is_peft_model = is_peft_available() and isinstance(model, PeftModel) self.model_adapter_name = model_adapter_name self.ref_adapter_name = ref_adapter_name if ref_model: self.ref_model = ref_model elif self.is_peft_model or args.precompute_ref_log_probs: self.ref_model = None else: self.ref_model = create_reference_model(model) if tokenizer is None: raise ValueError('max_length or a tokenizer must be specified when using the default DPODataCollatorWithPadding') if args.max_length is None: warnings.warn('When using DPODataCollatorWithPadding, you should set `max_length` in the `BCOConfig`. It will be set to `512` by default, but you should do it yourself in the future.', UserWarning) max_length = 512 if args.max_length is not None: max_length = args.max_length if args.max_prompt_length is None: warnings.warn('When using DPODataCollatorWithPadding, you should set `max_prompt_length` in the `BCOConfig`. It will be set to `128` by default, but you should do it yourself in the future.', UserWarning) max_prompt_length = 128 if args.max_prompt_length is not None: max_prompt_length = args.max_prompt_length max_completion_length = None if args.max_completion_length is None and self.is_encoder_decoder: warnings.warn("When using DPODataCollatorWithPadding with an encoder decoder architecture, you should set `max_completion_length` in the BCOTrainer's init it will be set to `128` by default, but you should do it yourself in the future.", UserWarning) max_completion_length = 128 if args.max_completion_length is not None and self.is_encoder_decoder: max_completion_length = args.max_completion_length if data_collator is None: data_collator = DPODataCollatorWithPadding(pad_token_id=tokenizer.pad_token_id, label_pad_token_id=args.label_pad_token_id, is_encoder_decoder=self.is_encoder_decoder) if args.remove_unused_columns: args.remove_unused_columns = False warnings.warn('When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your BCOConfig we have set it for you, but you should do it yourself in the future.', UserWarning) self.use_dpo_data_collator = True else: self.use_dpo_data_collator = False disable_dropout_in_model(model) if self.ref_model is not None: disable_dropout_in_model(self.ref_model) self.max_length = max_length self.generate_during_eval = args.generate_during_eval self.label_pad_token_id = args.label_pad_token_id self.padding_value = args.padding_value if args.padding_value is not None else tokenizer.pad_token_id self.max_prompt_length = max_prompt_length self.truncation_mode = args.truncation_mode self.max_completion_length = max_completion_length self.tokenizer = tokenizer self.precompute_ref_log_probs = args.precompute_ref_log_probs self._precomputed_train_ref_log_probs = False self._precomputed_eval_ref_log_probs = False self._stored_metrics = defaultdict(lambda : defaultdict(list)) self.beta = args.beta self.aux_loss_enabled = getattr(model.config, 'output_router_logits', False) self.embedding_func = embedding_func self.embedding_tokenizer = embedding_tokenizer with PartialState().local_main_process_first(): train_dataset = train_dataset.shuffle(seed=args.data_seed) if eval_dataset is not None: eval_dataset = eval_dataset.shuffle(seed=args.data_seed) train_dataset = train_dataset.map(_tokenize, batched=True, fn_kwargs={'tokenizer': self.tokenizer, 'embedding_tokenizer': self.embedding_tokenizer}, num_proc=args.dataset_num_proc, desc='Tokenizing train dataset') fn_kwargs = {'prefix': '', 'is_encoder_decoder': self.is_encoder_decoder, 'tokenizer': self.tokenizer, 'max_length': self.max_length, 'truncation_mode': self.truncation_mode, 'label_pad_token_id': self.label_pad_token_id, 'max_prompt_length': self.max_prompt_length, 'max_completion_length': self.max_completion_length} train_dataset = train_dataset.map(_process_tokens, fn_kwargs=fn_kwargs, num_proc=args.dataset_num_proc, desc='Processing tokenized train dataset') if eval_dataset is not None: eval_dataset = eval_dataset.map(_tokenize, fn_kwargs={'tokenizer': self.tokenizer, 'embedding_tokenizer': self.embedding_tokenizer}, batched=True, num_proc=args.dataset_num_proc, desc='Tokenizing eval dataset') fn_kwargs = {'prefix': '', 'is_encoder_decoder': self.is_encoder_decoder, 'tokenizer': self.tokenizer, 'max_length': self.max_length, 'truncation_mode': self.truncation_mode, 'label_pad_token_id': self.label_pad_token_id, 'max_prompt_length': self.max_prompt_length, 'max_completion_length': self.max_completion_length} eval_dataset = eval_dataset.map(_process_tokens, fn_kwargs=fn_kwargs, num_proc=args.dataset_num_proc, desc='Processing tokenized eval dataset') desirable = train_dataset.filter(lambda x: x['label'], num_proc=args.dataset_num_proc, desc='Filtering desirable examples') undesirable = train_dataset.filter(lambda x: not x['label'], num_proc=args.dataset_num_proc, desc='Filtering undesirable examples') desirable = desirable.shuffle(seed=args.data_seed) undesirable = undesirable.shuffle(seed=args.data_seed) super().__init__(model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics) if hasattr(self.model, 'add_model_tags'): self.model.add_model_tags(self._tag_names) if not hasattr(self, 'accelerator'): raise AttributeError('Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`.') if self.is_deepspeed_enabled: if self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.precompute_ref_log_probs: raise ValueError('You cannot use `precompute_ref_log_probs=True` with Deepspeed ZeRO-3. Please set `precompute_ref_log_probs=False`.') if self.ref_model is None: if not (self.is_peft_model or self.precompute_ref_log_probs): raise ValueError('No reference model and model is not a Peft model. Try setting `precompute_ref_log_probs=True`') elif self.is_deepspeed_enabled: self.ref_model = self._prepare_deepspeed(self.ref_model) else: self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True) self.running = RunningMoments(accelerator=self.accelerator) if self.embedding_func is None: warnings.warn('You did not pass `embedding_func` underlying distribution matching feature is deactivated.') return chosen_embeddings = self._get_sample_prompt_embeddings(desirable, sample_size=self.args.prompt_sample_size) rejected_embeddings = self._get_sample_prompt_embeddings(undesirable, sample_size=self.args.prompt_sample_size) embeddings = torch.cat((chosen_embeddings, rejected_embeddings), dim=0) labels = torch.cat((torch.ones_like(chosen_embeddings[:, 0]), torch.zeros_like(rejected_embeddings[:, 0])), dim=0) self.clf = LogisticRegression(class_weight='balanced').fit(embeddings.cpu().float().numpy(), labels.cpu().numpy()) @property def match_underlying_distribution(self): return self.embedding_func is not None and self.embedding_tokenizer is not None def _get_chosen_prob(self, prompt_embeddings: torch.FloatTensor) -> torch.FloatTensor: dtype = prompt_embeddings.dtype device = prompt_embeddings.device rank = self.accelerator.process_index padded_prompt_embeddings = self.accelerator.pad_across_processes(prompt_embeddings, pad_index=self.embedding_tokenizer.pad_token_id) sample_size = padded_prompt_embeddings.shape[0] nonzero = padded_prompt_embeddings.mean(dim=1) != self.embedding_tokenizer.pad_token_id prompt_embeddings = self.accelerator.gather(padded_prompt_embeddings) if prompt_embeddings.shape[0] == 0: return torch.tensor([], device=device, dtype=dtype) prob = self.clf.predict_proba(prompt_embeddings.cpu().float().numpy())[:, 1] prob = torch.as_tensor(prob, dtype=dtype, device=device) prob = self.accelerator.reduce(prob, reduction='mean') prob = prob[sample_size * rank:sample_size * (rank + 1)] prob = prob[nonzero] return prob def _vectorize_prompt(self, input_ids: torch.LongTensor, attention_mask: torch.LongTensor) -> torch.FloatTensor: input_ids = torch.where(input_ids == self.tokenizer.pad_token_id, self.embedding_tokenizer.pad_token_id, input_ids) with torch.no_grad(): embeddings = self.embedding_func(input_ids=input_ids, attention_mask=attention_mask) return embeddings def _get_prompt_embeddings(self, batch: Dict[str, Union[List, torch.LongTensor]]) -> Tuple[torch.FloatTensor, torch.FloatTensor]: if not self.match_underlying_distribution: return (None, None) embeddings = self._vectorize_prompt(input_ids=batch['embedding_input_ids'], attention_mask=batch['embedding_attention_mask']) chosen_idx = [i for i in range(len(batch['label'])) if batch['label'][i] is True] rejected_idx = [i for i in range(len(batch['label'])) if batch['label'][i] is False] chosen_embeddings = embeddings[chosen_idx, ...] rejected_embeddings = embeddings[rejected_idx, ...] return (chosen_embeddings, rejected_embeddings) def _get_sample_prompt_embeddings(self, dataset: Dataset, sample_size: int=512) -> torch.FloatTensor: n_samples = min(len(dataset), sample_size) rand_indices = np.random.choice(len(dataset), size=(n_samples,)) embedding_dataset = dataset.select(rand_indices) dataloader_params = {'batch_size': self.args.per_device_train_batch_size, 'collate_fn': self.data_collator, 'num_workers': self.args.dataloader_num_workers, 'pin_memory': self.args.dataloader_pin_memory, 'shuffle': False} data_loader = self.accelerator.prepare(DataLoader(embedding_dataset, **dataloader_params)) with torch.no_grad(): all_embeddings = torch.empty(0) for padded_batch in tqdm(iterable=data_loader, desc='Building sample prompt embeddings'): embeddings = self._vectorize_prompt(input_ids=padded_batch['embedding_input_ids'], attention_mask=padded_batch['embedding_attention_mask']) embeddings = self.accelerator.gather_for_metrics(embeddings) all_embeddings = torch.cat((all_embeddings, embeddings.cpu())) return all_embeddings def _prepare_deepspeed(self, model: PreTrainedModelWrapper): deepspeed_plugin = self.accelerator.state.deepspeed_plugin config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config) if model is not None: if hasattr(model, 'config'): hidden_size = max(model.config.hidden_sizes) if getattr(model.config, 'hidden_sizes', None) else getattr(model.config, 'hidden_size', None) if hidden_size is not None and config_kwargs['zero_optimization']['stage'] == 3: config_kwargs.update({'zero_optimization.reduce_bucket_size': hidden_size * hidden_size, 'zero_optimization.stage3_param_persistence_threshold': 10 * hidden_size, 'zero_optimization.stage3_prefetch_bucket_size': 0.9 * hidden_size * hidden_size}) if config_kwargs['zero_optimization']['stage'] != 3: config_kwargs['zero_optimization']['stage'] = 0 (model, *_) = deepspeed.initialize(model=model, config=config_kwargs) model.eval() return model def _save_optimizer_and_scheduler(self, output_dir): super()._save_optimizer_and_scheduler(output_dir) output_dir = output_dir if output_dir is not None else self.args.output_dir self.running.save_to_json(os.path.join(output_dir, RUNNING_NAME)) if self.match_underlying_distribution: torch.save(self.clf.get_params(), os.path.join(output_dir, CLF_NAME)) def _load_optimizer_and_scheduler(self, checkpoint): super()._load_optimizer_and_scheduler(checkpoint) if checkpoint is None: return running_file = os.path.join(checkpoint, RUNNING_NAME) if not os.path.isfile(running_file): warnings.warn(f'Missing file {running_file}. Will use a new running delta value for BCO loss calculation') else: self.running = RunningMoments.load_from_json(self.accelerator, running_file) if self.match_underlying_distribution: clf_file = os.path.join(checkpoint, CLF_NAME) if not os.path.isfile(running_file): warnings.warn(f'Missing file {clf_file}. Will use a new UDM classifier for BCO loss calculation') else: self.clf.set_params(**torch.load(clf_file, weights_only=True, map_location='cpu')) @contextmanager def null_ref_context(self): with self.accelerator.unwrap_model(self.model).disable_adapter() if self.is_peft_model and (not self.ref_adapter_name) else nullcontext(): if self.ref_adapter_name: self.model.set_adapter(self.ref_adapter_name) yield if self.ref_adapter_name: self.model.set_adapter(self.model_adapter_name or 'default') def get_train_dataloader(self) -> DataLoader: if self.precompute_ref_log_probs and (not self._precomputed_train_ref_log_probs): dataloader_params = {'batch_size': self.args.per_device_train_batch_size, 'collate_fn': self.data_collator, 'num_workers': self.args.dataloader_num_workers, 'pin_memory': self.args.dataloader_pin_memory, 'shuffle': False} data_loader = self.accelerator.prepare(DataLoader(self.train_dataset, **dataloader_params)) reference_completion_logps = [] for padded_batch in tqdm(iterable=data_loader, desc='Train dataset reference log probs'): reference_completion_logp = self.compute_reference_log_probs(padded_batch) reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp) reference_completion_logps.append(reference_completion_logp.cpu()) self.train_dataset = self.train_dataset.add_column(name='reference_logps', column=torch.cat(reference_completion_logps).float().numpy()) self._precomputed_train_ref_log_probs = True return super().get_train_dataloader() def get_eval_dataloader(self, eval_dataset: Optional[Dataset]=None) -> DataLoader: if eval_dataset is None and self.eval_dataset is None: raise ValueError('Trainer: evaluation requires an eval_dataset.') eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset if self.precompute_ref_log_probs and (not self._precomputed_eval_ref_log_probs): dataloader_params = {'batch_size': self.args.per_device_eval_batch_size, 'collate_fn': self.data_collator, 'num_workers': self.args.dataloader_num_workers, 'pin_memory': self.args.dataloader_pin_memory, 'shuffle': False} data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params)) reference_completion_logps = [] for padded_batch in tqdm(iterable=data_loader, desc='Eval dataset reference log probs'): reference_completion_logp = self.compute_reference_log_probs(padded_batch) reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp) reference_completion_logps.append(reference_completion_logp.cpu()) eval_dataset = eval_dataset.add_column(name='reference_logps', column=torch.cat(reference_completion_logps).float().numpy()) if self.eval_dataset is not None: self.eval_dataset = eval_dataset self._precomputed_eval_ref_log_probs = True return super().get_eval_dataloader(eval_dataset=eval_dataset) def compute_reference_log_probs(self, padded_batch: Dict) -> Dict: with torch.no_grad(): if self.ref_model is None: with self.null_ref_context(): if self.is_encoder_decoder: completion_logits = self.model(padded_batch['prompt_input_ids'], attention_mask=padded_batch['prompt_attention_mask'], decoder_input_ids=padded_batch.get('completion_decoder_input_ids'), labels=padded_batch['completion_labels']).logits else: completion_logits = self.model(padded_batch['completion_input_ids'], attention_mask=padded_batch['completion_attention_mask']).logits elif self.is_encoder_decoder: completion_logits = self.ref_model(padded_batch['prompt_input_ids'], attention_mask=padded_batch['prompt_attention_mask'], decoder_input_ids=padded_batch.get('completion_decoder_input_ids'), labels=padded_batch['completion_labels']).logits else: completion_logits = self.ref_model(padded_batch['completion_input_ids'], attention_mask=padded_batch['completion_attention_mask']).logits completion_logps = self.get_batch_logps(completion_logits, padded_batch['completion_labels'], average_log_prob=False, is_encoder_decoder=self.is_encoder_decoder, label_pad_token_id=self.label_pad_token_id) return completion_logps @staticmethod def get_batch_logps(logits: torch.FloatTensor, labels: torch.LongTensor, average_log_prob: bool=False, label_pad_token_id: int=-100, is_encoder_decoder: bool=False) -> torch.FloatTensor: if logits.shape[:-1] != labels.shape: raise ValueError('Logits (batch and sequence length dim) and labels must have the same shape.') if not is_encoder_decoder: labels = labels[:, 1:].clone() logits = logits[:, :-1, :] else: labels = labels.clone() loss_mask = labels != label_pad_token_id labels[labels == label_pad_token_id] = 0 per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2) if average_log_prob: return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1) else: return (per_token_logps * loss_mask).sum(-1) def forward(self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: model_kwargs = {'labels': batch['completion_labels'], 'decoder_input_ids': batch.get('completion_decoder_input_ids')} if self.is_encoder_decoder else {} if self.aux_loss_enabled: model_kwargs['output_router_logits'] = True outputs = model(batch['completion_input_ids'], attention_mask=batch['completion_attention_mask'], **model_kwargs) completion_logits = outputs.logits completion_logps = self.get_batch_logps(completion_logits, batch['completion_labels'], average_log_prob=False, is_encoder_decoder=self.is_encoder_decoder, label_pad_token_id=self.label_pad_token_id) if completion_logps.shape[0] != len(batch['label']): raise ValueError('There is a mismatch between the number of examples in this batch and the number of examples for which an output sequence was predicted.') chosen_idx = [i for i in range(completion_logps.shape[0]) if batch['label'][i] is True] rejected_idx = [i for i in range(completion_logps.shape[0]) if batch['label'][i] is False] chosen_logps = completion_logps[chosen_idx, ...] rejected_logps = completion_logps[rejected_idx, ...] chosen_logits = completion_logits[chosen_idx, ...] rejected_logits = completion_logits[rejected_idx, ...] if self.aux_loss_enabled: return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, outputs.aux_loss) else: return (chosen_logps, rejected_logps, chosen_logits, rejected_logits) def _get_udm_weight(self, rejected_embeddings: torch.FloatTensor) -> torch.FloatTensor: prob_desirable = self._get_chosen_prob(rejected_embeddings) min_ratio = self.args.min_density_ratio max_ratio = self.args.max_density_ratio weight = (prob_desirable / (1 - prob_desirable + 1e-08)).clamp(min=min_ratio, max=max_ratio) return weight def bco_loss(self, policy_chosen_logps: torch.FloatTensor, policy_rejected_logps: torch.FloatTensor, reference_chosen_logps: torch.FloatTensor, reference_rejected_logps: torch.FloatTensor, chosen_embeddings: Optional[torch.FloatTensor], rejected_embeddings: Optional[torch.FloatTensor]) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: if policy_chosen_logps.shape[0] != 0 or reference_chosen_logps.shape[0] != 0: chosen_logratios = policy_chosen_logps - reference_chosen_logps chosen_rewards = self.beta * chosen_logratios else: chosen_losses = torch.Tensor([]).to(self.accelerator.device) chosen_rewards = torch.Tensor([]).to(self.accelerator.device) if policy_rejected_logps.shape[0] != 0 or reference_rejected_logps.shape[0] != 0: rejected_logratios = policy_rejected_logps - reference_rejected_logps rejected_rewards = self.beta * rejected_logratios else: rejected_losses = torch.Tensor([]).to(self.accelerator.device) rejected_rewards = torch.Tensor([]).to(self.accelerator.device) rewards = torch.cat((chosen_rewards, rejected_rewards), 0).mean().detach() self.running.update(rewards) delta = self.running.mean if policy_chosen_logps.shape[0] != 0 or reference_chosen_logps.shape[0] != 0: chosen_losses = -F.logsigmoid(chosen_rewards - delta) if policy_rejected_logps.shape[0] != 0 or reference_rejected_logps.shape[0] != 0: rejected_losses = -F.logsigmoid(-(rejected_rewards - delta)) if self.match_underlying_distribution: chosen_weight = torch.ones_like(chosen_losses) rejected_weight = self._get_udm_weight(rejected_embeddings) losses = torch.cat((chosen_weight * chosen_losses, rejected_weight * rejected_losses), dim=0) else: losses = torch.cat((chosen_losses, rejected_losses), dim=0) return (losses, chosen_rewards, rejected_rewards, torch.as_tensor(delta)) def get_batch_loss_metrics(self, model, batch: Dict[str, Union[List, torch.LongTensor]]): metrics = {} batch = {k: v.to(self.accelerator.device) if isinstance(v, torch.Tensor) else v for (k, v) in batch.items()} forward_output = self.forward(model, batch) (policy_chosen_logps, policy_rejected_logps, policy_chosen_logits, policy_rejected_logits) = forward_output[:4] if self.aux_loss_enabled: aux_loss = forward_output[4] if 'reference_logps' in batch: chosen_idx = [i for i in range(batch['reference_logps'].shape[0]) if batch['label'][i] is True] rejected_idx = [i for i in range(batch['reference_logps'].shape[0]) if batch['label'][i] is False] reference_chosen_logps = batch['reference_logps'][chosen_idx, ...] reference_rejected_logps = batch['reference_logps'][rejected_idx, ...] else: with torch.no_grad(): if self.ref_model is None: with self.null_ref_context(): (reference_chosen_logps, reference_rejected_logps, _, _) = self.forward(self.model, batch)[:4] else: (reference_chosen_logps, reference_rejected_logps, _, _) = self.forward(self.ref_model, batch)[:4] (chosen_embeddings, rejected_embeddings) = self._get_prompt_embeddings(batch) (losses, chosen_rewards, rejected_rewards, delta) = self.bco_loss(policy_chosen_logps, policy_rejected_logps, reference_chosen_logps, reference_rejected_logps, chosen_embeddings, rejected_embeddings) metrics['delta'] = delta.item() num_chosen = torch.Tensor([len(chosen_rewards)]).to(self.accelerator.device) num_rejected = torch.Tensor([len(rejected_rewards)]).to(self.accelerator.device) all_num_chosen = self.accelerator.gather(num_chosen).sum().item() all_num_rejected = self.accelerator.gather(num_rejected).sum().item() if all_num_chosen > 0: metrics['rewards/chosen_sum'] = self.accelerator.gather(chosen_rewards.nansum()).nansum().item() metrics['logps/chosen_sum'] = self.accelerator.gather(policy_chosen_logps.nansum()).nansum().item() metrics['count/chosen'] = all_num_chosen if all_num_rejected > 0: metrics['rewards/rejected_sum'] = self.accelerator.gather(rejected_rewards.nansum()).nansum().item() metrics['logps/rejected_sum'] = self.accelerator.gather(policy_rejected_logps.nansum()).nansum().item() metrics['count/rejected'] = all_num_rejected loss = losses.nanmean() if self.aux_loss_enabled: loss += getattr(model.config, 'router_aux_loss_coef', 0.0) * aux_loss return (loss, metrics) def compute_loss(self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], return_outputs=False) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]: if not self.use_dpo_data_collator: warnings.warn('compute_loss is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator') compute_loss_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with compute_loss_context_manager: (loss, metrics) = self.get_batch_loss_metrics(model, inputs) loss = loss.to(self.args.device) if self.accelerator.is_main_process: self.store_metrics(metrics, train_eval='train') if return_outputs: return (loss, metrics) return loss def store_metrics(self, metrics: Dict[str, float], train_eval: Literal['train', 'eval']='train') -> None: for (key, value) in metrics.items(): self._stored_metrics[train_eval][key].append(value) def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: if self.train_dataset is None or not has_length(self.train_dataset): return None return SequentialSampler(self.train_dataset) def get_batch_samples(self, model, batch: Dict[str, torch.LongTensor]) -> Tuple[str, str]: generate_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with generate_context_manager: policy_output = model.generate(input_ids=batch['prompt_input_ids'], attention_mask=batch['prompt_attention_mask'], max_length=self.max_length, do_sample=True, pad_token_id=self.tokenizer.pad_token_id) if 'reference_output' in batch: reference_output = batch['reference_output'] elif self.ref_model is None: with self.null_ref_context(): reference_output = self.model.generate(input_ids=batch['prompt_input_ids'], attention_mask=batch['prompt_attention_mask'], max_length=self.max_length, do_sample=True, pad_token_id=self.tokenizer.pad_token_id) else: reference_output = self.ref_model.generate(input_ids=batch['prompt_input_ids'], attention_mask=batch['prompt_attention_mask'], max_length=self.max_length, do_sample=True, pad_token_id=self.tokenizer.pad_token_id) policy_output = pad_to_length(policy_output, self.max_length, self.tokenizer.pad_token_id) policy_output_decoded = self.tokenizer.batch_decode(policy_output, skip_special_tokens=True) reference_output = pad_to_length(reference_output, self.max_length, self.tokenizer.pad_token_id) reference_output_decoded = self.tokenizer.batch_decode(reference_output, skip_special_tokens=True) return (policy_output_decoded, reference_output_decoded) def prediction_step(self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]]=None): if not self.use_dpo_data_collator: warnings.warn('prediction_step is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator') if ignore_keys is None: if hasattr(model, 'config'): ignore_keys = getattr(model.config, 'keys_to_ignore_at_inference', []) else: ignore_keys = [] prediction_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with torch.no_grad(), prediction_context_manager: (loss, metrics) = self.get_batch_loss_metrics(model, inputs) if self.accelerator.is_main_process: self.store_metrics(metrics, train_eval='eval') if prediction_loss_only: return (loss.detach(), None, None) logits_dict = {'eval_logits/chosen': metrics['logits/chosen'], 'eval_logits/rejected': metrics['logits/rejected']} logits = tuple((v.unsqueeze(dim=0) for (k, v) in logits_dict.items() if k not in ignore_keys)) logits = torch.stack(logits).mean(axis=1).to(self.accelerator.device) labels = torch.zeros(logits.shape[0], device=self.accelerator.device) return (loss.detach(), logits, labels) def evaluation_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval') -> EvalLoopOutput: if self.generate_during_eval: num_samples = len(dataloader.dataset) random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size) random_batch_dataset = dataloader.dataset.select(random_indices) random_batch = self.data_collator(random_batch_dataset) random_batch = self._prepare_inputs(random_batch) target_indicies = [i for i in range(len(random_batch['delta'])) if random_batch['delta'][i] is False] target_batch = {'prompt_input_ids': itemgetter(*target_indicies)(random_batch['prompt_input_ids']), 'prompt_attention_mask': itemgetter(*target_indicies)(random_batch['prompt_attention_mask']), 'prompt': itemgetter(*target_indicies)(random_batch['prompt'])} (policy_output_decoded, ref_output_decoded) = self.get_batch_samples(self.model, target_batch) self.log({'game_log': wandb.Table(columns=['Prompt', 'Policy', 'Ref Model'], rows=[[prompt, pol[len(prompt):], ref[len(prompt):]] for (prompt, pol, ref) in zip(target_batch['prompt'], policy_output_decoded, ref_output_decoded)])}) self.state.log_history.pop() initial_output = super().evaluation_loop(dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix) return initial_output def log(self, logs: Dict[str, float]) -> None: train_eval = 'train' if 'loss' in logs else 'eval' prefix = 'eval_' if train_eval == 'eval' else '' for split in ['chosen', 'rejected']: if f'count/{split}' in self._stored_metrics[train_eval]: count_sum = torch.Tensor(self._stored_metrics[train_eval][f'count/{split}']).sum().item() logs[f'{prefix}rewards/{split}'] = torch.Tensor(self._stored_metrics[train_eval][f'rewards/{split}_sum']).sum().item() / count_sum logs[f'{prefix}logps/{split}'] = torch.Tensor(self._stored_metrics[train_eval][f'logps/{split}_sum']).sum().item() / count_sum for key in [f'count/{split}', f'rewards/{split}_sum', f'logps/{split}_sum']: del self._stored_metrics[train_eval][key] if f'{prefix}rewards/chosen' in logs and f'{prefix}rewards/rejected' in logs: logs[f'{prefix}rewards/margins'] = logs[f'{prefix}rewards/chosen'] - logs[f'{prefix}rewards/rejected'] for (key, metrics) in self._stored_metrics[train_eval].items(): logs[f'{prefix}{key}'] = torch.Tensor(metrics).mean().item() del self._stored_metrics[train_eval] return super().log(logs) @wraps(Trainer.push_to_hub) def push_to_hub(self, commit_message: Optional[str]='End of training', blocking: bool=True, **kwargs) -> str: kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs) # File: trl-main/trl/trainer/callbacks.py from typing import List, Optional, Union import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.utils import gather_object, is_deepspeed_available from rich.console import Console, Group from rich.live import Live from rich.panel import Panel from rich.progress import Progress from transformers import GenerationConfig, PreTrainedModel, PreTrainedTokenizerBase, Trainer, TrainerCallback, TrainerControl, TrainerState, TrainingArguments from transformers.integrations import WandbCallback from transformers.trainer_utils import has_length from ..models.utils import unwrap_model_for_generation from .judges import BasePairwiseJudge if is_deepspeed_available(): import deepspeed def _generate_completions(prompts: List[str], model: PreTrainedModel, tokenizer: PreTrainedTokenizerBase, accelerator: Accelerator, generation_config: GenerationConfig, batch_size: int=1) -> List[str]: completions = [] with unwrap_model_for_generation(model, accelerator) as unwrapped_model: unwrapped_model.eval() for idx in range(0, len(prompts), batch_size): batch = prompts[idx:idx + batch_size] tokenized_batch = tokenizer(batch, return_tensors='pt', padding=True, truncation=True).to(model.device) generations = unwrapped_model.generate(**tokenized_batch, generation_config=generation_config) for (prompt, generation) in zip(tokenized_batch.input_ids, generations): generation = generation[len(prompt):] completion = tokenizer.decode(generation, skip_special_tokens=True) completions.append(completion) unwrapped_model.train() return completions class SyncRefModelCallback(TrainerCallback): def __init__(self, ref_model: Union[PreTrainedModel, torch.nn.Module], accelerator: Optional[Accelerator]): self.accelerator = accelerator self.ref_model = ref_model @staticmethod def _sync_target_model(model, target_model, alpha): for (target_param, copy_param) in zip(target_model.parameters(), model.parameters()): target_param.data.mul_(1.0 - alpha).add_(copy_param.data, alpha=alpha) @staticmethod def sync_target_model(model, target_model, alpha): deepspeed_plugin = AcceleratorState().deepspeed_plugin if deepspeed_plugin is not None and deepspeed_plugin.zero_stage == 3: with deepspeed.zero.GatheredParameters(list(model.parameters()) + list(target_model.parameters()), modifier_rank=0): if deepspeed.comm.get_rank() == 0: SyncRefModelCallback._sync_target_model(model, target_model, alpha) else: SyncRefModelCallback._sync_target_model(model, target_model, alpha) def on_step_end(self, args, state, control, **kwargs): model: PreTrainedModel = kwargs['model'] if self.ref_model is not None and state.global_step % args.ref_model_sync_steps == 0: if self.accelerator: model = self.accelerator.unwrap_model(model) self.sync_target_model(model, self.ref_model, args.ref_model_mixup_alpha) class RichProgressCallback(TrainerCallback): def __init__(self): self.training_bar = None self.prediction_bar = None self.training_task_id = None self.prediction_task_id = None self.rich_group = None self.rich_console = None self.training_status = None self.current_step = None def on_train_begin(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar = Progress() self.prediction_bar = Progress() self.rich_console = Console() self.training_status = self.rich_console.status('Nothing to log yet ...') self.rich_group = Live(Panel(Group(self.training_bar, self.prediction_bar, self.training_status))) self.rich_group.start() self.training_task_id = self.training_bar.add_task('[blue]Training the model', total=state.max_steps) self.current_step = 0 def on_step_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.update(self.training_task_id, advance=state.global_step - self.current_step, update=True) self.current_step = state.global_step def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): if state.is_world_process_zero and has_length(eval_dataloader): if self.prediction_task_id is None: self.prediction_task_id = self.prediction_bar.add_task('[blue]Predicting on the evaluation dataset', total=len(eval_dataloader)) self.prediction_bar.update(self.prediction_task_id, advance=1, update=True) def on_evaluate(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_task_id is not None: self.prediction_bar.remove_task(self.prediction_task_id) self.prediction_task_id = None def on_predict(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_task_id is not None: self.prediction_bar.remove_task(self.prediction_task_id) self.prediction_task_id = None def on_log(self, args, state, control, logs=None, **kwargs): if state.is_world_process_zero and self.training_bar is not None: _ = logs.pop('total_flos', None) self.training_status.update(f'[bold green]Status = {str(logs)}') def on_train_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.rich_group.stop() self.training_bar = None self.prediction_bar = None self.training_task_id = None self.prediction_task_id = None self.rich_group = None self.rich_console = None self.training_status = None self.current_step = None class WinRateCallback(TrainerCallback): def __init__(self, judge: BasePairwiseJudge, trainer: Trainer, generation_config: Optional[GenerationConfig]=None, num_prompts: int=None): self.judge = judge self.trainer = trainer self.generation_config = generation_config self.ref_completions = [] if self.trainer.eval_dataset is None: raise ValueError('Trainer must have an evaluation dataset to use the WinRateCallback.') else: self.eval_dataset = self.trainer.eval_dataset if num_prompts is not None: self.eval_dataset = self.eval_dataset.select(range(num_prompts)) def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): tokenizer = kwargs['tokenizer'] tokenizer.padding_side = 'left' accelerator = self.trainer.accelerator model = getattr(self.trainer, 'ref_model', kwargs['model']) with accelerator.split_between_processes(self.eval_dataset['prompt']) as prompts: self.ref_completions = _generate_completions(prompts, model=model, tokenizer=tokenizer, accelerator=accelerator, generation_config=self.generation_config, batch_size=args.per_device_eval_batch_size) def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): tokenizer = kwargs['tokenizer'] tokenizer.padding_side = 'left' accelerator = self.trainer.accelerator model = self.trainer.model_wrapped with accelerator.split_between_processes(self.eval_dataset['prompt']) as prompts: completions = _generate_completions(prompts, model=model, tokenizer=tokenizer, accelerator=accelerator, generation_config=self.generation_config, batch_size=args.per_device_eval_batch_size) completions = list(zip(self.ref_completions, completions)) winner_indices = self.judge.judge(prompts, completions) winner_indices = gather_object(winner_indices) if self.trainer.accelerator.is_main_process: win_rate = sum((winner_idx == 1 for winner_idx in winner_indices)) / len(winner_indices) self.trainer.log({'eval_win_rate': win_rate}) class LogCompletionsCallback(WandbCallback): def __init__(self, trainer: Trainer, generation_config: Optional[GenerationConfig]=None, num_prompts: int=None, freq: int=None): super().__init__() self.trainer = trainer self.generation_config = generation_config self.freq = freq self.table = [] self._last_logged_step = -1 if self.trainer.eval_dataset is None: raise ValueError('Trainer must have an evaluation dataset to use the LogCompletionsCallback.') else: self.eval_dataset = self.trainer.eval_dataset if num_prompts is not None: self.eval_dataset = self.eval_dataset.select(range(num_prompts)) def on_step_end(self, args, state, control, **kwargs): if state.global_step == self._last_logged_step: return freq = self.freq or state.logging_steps if state.global_step % freq != 0: return tokenizer = kwargs['tokenizer'] tokenizer.padding_side = 'left' accelerator = self.trainer.accelerator model = self.trainer.model_wrapped with accelerator.split_between_processes(self.eval_dataset['prompt']) as prompts: completions = _generate_completions(prompts, model=model, tokenizer=tokenizer, accelerator=accelerator, generation_config=self.generation_config, batch_size=args.per_device_eval_batch_size) completions = gather_object(completions) prompts = gather_object(prompts) if self.trainer.accelerator.is_main_process: global_step = [str(state.global_step)] * len(prompts) data = list(zip(global_step, prompts, completions)) self.table.extend(data) table = self._wandb.Table(columns=['step', 'prompt', 'completion'], data=self.table) self._wandb.log({'completions': table}) self._last_logged_step = state.global_step # File: trl-main/trl/trainer/cpo_config.py from dataclasses import dataclass from typing import Any, Dict, Literal, Optional from transformers import TrainingArguments @dataclass class CPOConfig(TrainingArguments): max_length: Optional[int] = None max_prompt_length: Optional[int] = None max_completion_length: Optional[int] = None beta: float = 0.1 label_smoothing: float = 0.0 loss_type: Literal['sigmoid', 'hinge', 'ipo', 'simpo'] = 'sigmoid' disable_dropout: bool = True cpo_alpha: float = 1.0 simpo_gamma: float = 0.5 label_pad_token_id: int = -100 padding_value: Optional[int] = None truncation_mode: str = 'keep_end' generate_during_eval: bool = False is_encoder_decoder: Optional[bool] = None model_init_kwargs: Optional[Dict[str, Any]] = None dataset_num_proc: Optional[int] = None # File: trl-main/trl/trainer/cpo_trainer.py import inspect import random import warnings from collections import defaultdict from contextlib import nullcontext from functools import wraps from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union import numpy as np import torch import torch.amp as amp import torch.nn as nn import torch.nn.functional as F from accelerate import PartialState from datasets import Dataset from torch.utils.data import DataLoader from transformers import AutoModelForCausalLM, DataCollator, PreTrainedModel, PreTrainedTokenizerBase, Trainer from transformers.trainer_callback import TrainerCallback from transformers.trainer_utils import EvalLoopOutput from transformers.utils import is_torch_fx_proxy from ..import_utils import is_peft_available, is_wandb_available from .cpo_config import CPOConfig from .utils import DPODataCollatorWithPadding, add_bos_token_if_needed, add_eos_token_if_needed, disable_dropout_in_model, pad_to_length, peft_module_casting_to_bf16, trl_sanitze_kwargs_for_tagging if is_peft_available(): from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training if is_wandb_available(): import wandb class CPOTrainer(Trainer): _tag_names = ['trl', 'cpo'] def __init__(self, model: Optional[Union[PreTrainedModel, nn.Module, str]]=None, args: Optional[CPOConfig]=None, data_collator: Optional[DataCollator]=None, train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional[PreTrainedTokenizerBase]=None, model_init: Optional[Callable[[], PreTrainedModel]]=None, callbacks: Optional[List[TrainerCallback]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None, peft_config: Optional[Dict]=None, compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]]=None): if args.model_init_kwargs is None: model_init_kwargs = {} elif not isinstance(model, str): raise ValueError('You passed model_kwargs to the CPOTrainer. But your model is already instantiated.') else: model_init_kwargs = args.model_init_kwargs torch_dtype = model_init_kwargs.get('torch_dtype') if torch_dtype is not None: if isinstance(torch_dtype, str) and torch_dtype != 'auto': torch_dtype = getattr(torch, torch_dtype) if torch_dtype != 'auto' and (not isinstance(torch_dtype, torch.dtype)): raise ValueError(f"Invalid `torch_dtype` passed to the CPOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}.") model_init_kwargs['torch_dtype'] = torch_dtype if isinstance(model, str): warnings.warn('You passed a model_id to the CPOTrainer. This will automatically create an `AutoModelForCausalLM` or a `PeftModel` (if you passed a `peft_config`) for you.') model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs) self._peft_has_been_casted_to_bf16 = False if not is_peft_available() and peft_config is not None: raise ValueError("PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models") elif is_peft_available() and peft_config is not None: if isinstance(model, PeftModel): model = model.merge_and_unload() if getattr(model, 'is_loaded_in_8bit', False) or getattr(model, 'is_loaded_in_4bit', False): _support_gc_kwargs = hasattr(args, 'gradient_checkpointing_kwargs') and 'gradient_checkpointing_kwargs' in list(inspect.signature(prepare_model_for_kbit_training).parameters) prepare_model_kwargs = {'use_gradient_checkpointing': args.gradient_checkpointing} if _support_gc_kwargs: prepare_model_kwargs['gradient_checkpointing_kwargs'] = args.gradient_checkpointing_kwargs model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) elif getattr(args, 'gradient_checkpointing', False): if hasattr(model, 'enable_input_require_grads'): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) model = get_peft_model(model, peft_config) if args.bf16 and getattr(model, 'is_loaded_in_4bit', False): peft_module_casting_to_bf16(model) self._peft_has_been_casted_to_bf16 = True elif getattr(args, 'gradient_checkpointing', False): if hasattr(model, 'enable_input_require_grads'): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) if args.generate_during_eval and (not is_wandb_available()): raise ValueError('`generate_during_eval=True` requires Weights and Biases to be installed. Please install `wandb` to resolve.') if model is not None: self.is_encoder_decoder = model.config.is_encoder_decoder elif args.is_encoder_decoder is None: raise ValueError('When no model is provided, you need to pass the parameter is_encoder_decoder.') else: self.is_encoder_decoder = args.is_encoder_decoder if self.is_encoder_decoder: self.decoder_start_token_id = model.config.decoder_start_token_id self.pad_token_id = model.config.pad_token_id if tokenizer is None: raise ValueError('tokenizer must be specified to tokenize a CPO dataset.') if args.max_length is None: warnings.warn("`max_length` is not set in the CPOConfig's init it will default to `512` by default, but you should do it yourself in the future.", UserWarning) max_length = 512 else: max_length = args.max_length if args.max_prompt_length is None: warnings.warn("`max_prompt_length` is not set in the CPOConfig's init it will default to `128` by default, but you should do it yourself in the future.", UserWarning) max_prompt_length = 128 else: max_prompt_length = args.max_prompt_length if args.max_completion_length is None and self.is_encoder_decoder: warnings.warn("When using an encoder decoder architecture, you should set `max_completion_length` in the CPOConfig's init it will default to `128` by default, but you should do it yourself in the future.", UserWarning) max_completion_length = 128 else: max_completion_length = args.max_completion_length if data_collator is None: data_collator = DPODataCollatorWithPadding(pad_token_id=tokenizer.pad_token_id, label_pad_token_id=args.label_pad_token_id, is_encoder_decoder=self.is_encoder_decoder) if args.remove_unused_columns: args.remove_unused_columns = False warnings.warn('When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments we have set it for you, but you should do it yourself in the future.', UserWarning) self.use_dpo_data_collator = True else: self.use_dpo_data_collator = False if args.disable_dropout: disable_dropout_in_model(model) self.max_length = max_length self.generate_during_eval = args.generate_during_eval self.label_pad_token_id = args.label_pad_token_id self.padding_value = args.padding_value if args.padding_value is not None else tokenizer.pad_token_id self.max_prompt_length = max_prompt_length self.truncation_mode = args.truncation_mode self.max_completion_length = max_completion_length self.tokenizer = tokenizer if args.loss_type in ['hinge', 'ipo'] and args.label_smoothing > 0: warnings.warn('You are using a loss type that does not support label smoothing. Ignoring label_smoothing parameter.') if args.loss_type == 'kto_pair': raise ValueError('Support for kto_pair has been removed in CPOTrainer. Please use KTOTrainer.') self.beta = args.beta self.label_smoothing = args.label_smoothing self.loss_type = args.loss_type self.cpo_alpha = args.cpo_alpha self.aux_loss_enabled = getattr(model.config, 'output_router_logits', False) if args.loss_type == 'simpo': self.simpo_gamma = args.simpo_gamma if self.cpo_alpha > 0: warnings.warn('You are using CPO-SimPO method because you set a non-zero cpo_alpha. This will result in the CPO-SimPO method (https://github.com/fe1ixxu/CPO_SIMPO/tree/main). If you want to use a pure SimPO method, please set cpo_alpha to 0.') self._stored_metrics = defaultdict(lambda : defaultdict(list)) with PartialState().local_main_process_first(): train_dataset = train_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc) if eval_dataset is not None: eval_dataset = eval_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc) super().__init__(model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics) if hasattr(self.model, 'add_model_tags'): self.model.add_model_tags(self._tag_names) if not hasattr(self, 'accelerator'): raise AttributeError('Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`.') def build_tokenized_answer(self, prompt, answer): full_tokenized = self.tokenizer(prompt + answer, add_special_tokens=False) prompt_input_ids = self.tokenizer(prompt, add_special_tokens=False)['input_ids'] answer_input_ids = full_tokenized['input_ids'][len(prompt_input_ids):] answer_attention_mask = full_tokenized['attention_mask'][len(prompt_input_ids):] full_concat_input_ids = np.concatenate([prompt_input_ids, answer_input_ids]) full_input_ids = np.array(full_tokenized['input_ids']) if len(full_input_ids) != len(full_concat_input_ids): raise ValueError('Prompt input ids and answer input ids should have the same length.') response_token_ids_start_idx = len(prompt_input_ids) if prompt_input_ids != full_tokenized['input_ids'][:response_token_ids_start_idx]: response_token_ids_start_idx -= 1 prompt_input_ids = full_tokenized['input_ids'][:response_token_ids_start_idx] prompt_attention_mask = full_tokenized['attention_mask'][:response_token_ids_start_idx] if len(prompt_input_ids) != len(prompt_attention_mask): raise ValueError('Prompt input ids and attention mask should have the same length.') answer_input_ids = full_tokenized['input_ids'][response_token_ids_start_idx:] answer_attention_mask = full_tokenized['attention_mask'][response_token_ids_start_idx:] return dict(prompt_input_ids=prompt_input_ids, prompt_attention_mask=prompt_attention_mask, input_ids=answer_input_ids, attention_mask=answer_attention_mask) def tokenize_row(self, feature, model: Optional[Union[PreTrainedModel, nn.Module]]=None) -> Dict: batch = {} prompt = feature['prompt'] chosen = feature['chosen'] rejected = feature['rejected'] if not self.is_encoder_decoder: if not isinstance(prompt, str): raise ValueError(f'prompt should be an str but got {type(prompt)}') prompt_tokens = self.tokenizer(prompt, add_special_tokens=False) prompt_tokens = {f'prompt_{k}': v for (k, v) in prompt_tokens.items()} if not isinstance(chosen, str): raise ValueError(f'chosen should be an str but got {type(chosen)}') chosen_tokens = self.build_tokenized_answer(prompt, chosen) if not isinstance(rejected, str): raise ValueError(f'rejected should be an str but got {type(rejected)}') rejected_tokens = self.build_tokenized_answer(prompt, rejected) prompt_len_input_ids = len(prompt_tokens['prompt_input_ids']) chosen_prompt_len_input_ids = len(chosen_tokens['prompt_input_ids']) rejected_prompt_len_input_ids = len(rejected_tokens['prompt_input_ids']) prompt_len_input_ids = min(chosen_prompt_len_input_ids, rejected_prompt_len_input_ids) for (k, v) in prompt_tokens.items(): prompt_tokens[k] = v[:prompt_len_input_ids] num_diff_tokens = sum([a != b for (a, b) in zip(chosen_tokens['prompt_input_ids'], rejected_tokens['prompt_input_ids'])]) num_diff_len = abs(chosen_prompt_len_input_ids - rejected_prompt_len_input_ids) if num_diff_tokens > 1 or num_diff_len > 1: raise ValueError('Chosen and rejected prompt_input_ids might only differ on the last token due to tokenizer merge ops.') (prompt_tokens, chosen_tokens, rejected_tokens) = add_bos_token_if_needed(self.tokenizer.bos_token_id, prompt_len_input_ids, prompt_tokens, chosen_prompt_len_input_ids, chosen_tokens, rejected_prompt_len_input_ids, rejected_tokens) (chosen_tokens, rejected_tokens) = add_eos_token_if_needed(self.tokenizer.eos_token_id, chosen_tokens, rejected_tokens) longer_response_length = max(len(chosen_tokens['input_ids']), len(rejected_tokens['input_ids'])) for answer_tokens in [chosen_tokens, rejected_tokens, prompt_tokens]: if len(answer_tokens['prompt_input_ids']) + longer_response_length > self.max_length: if self.truncation_mode == 'keep_start': for k in ['prompt_input_ids', 'prompt_attention_mask']: answer_tokens[k] = answer_tokens[k][:self.max_prompt_length] elif self.truncation_mode == 'keep_end': for k in ['prompt_input_ids', 'prompt_attention_mask']: answer_tokens[k] = answer_tokens[k][-self.max_prompt_length:] else: raise ValueError(f'Unknown truncation mode: {self.truncation_mode}') for answer_tokens in [chosen_tokens, rejected_tokens]: if len(answer_tokens['prompt_input_ids']) + longer_response_length > self.max_length: for k in ['input_ids', 'attention_mask']: answer_tokens[k] = answer_tokens[k][:self.max_length - self.max_prompt_length] chosen_sequence_tokens = {k: chosen_tokens[f'prompt_{k}'] + chosen_tokens[k] for k in ['input_ids', 'attention_mask']} rejected_sequence_tokens = {k: rejected_tokens[f'prompt_{k}'] + rejected_tokens[k] for k in ['input_ids', 'attention_mask']} chosen_sequence_tokens['labels'] = chosen_sequence_tokens['input_ids'][:] chosen_sequence_tokens['labels'][:len(chosen_tokens['prompt_input_ids'])] = [self.label_pad_token_id] * len(chosen_tokens['prompt_input_ids']) rejected_sequence_tokens['labels'] = rejected_sequence_tokens['input_ids'][:] rejected_sequence_tokens['labels'][:len(rejected_tokens['prompt_input_ids'])] = [self.label_pad_token_id] * len(rejected_tokens['prompt_input_ids']) for (k, toks) in {'chosen_': chosen_sequence_tokens, 'rejected_': rejected_sequence_tokens, '': prompt_tokens}.items(): for (type_key, tokens) in toks.items(): if type_key == 'token_type_ids': continue batch[f'{k}{type_key}'] = tokens else: chosen_tokens = self.tokenizer(chosen, truncation=True, max_length=self.max_completion_length, add_special_tokens=True) rejected_tokens = self.tokenizer(rejected, truncation=True, max_length=self.max_completion_length, add_special_tokens=True) prompt_tokens = self.tokenizer(prompt, truncation=True, max_length=self.max_prompt_length, add_special_tokens=True) batch['chosen_labels'] = chosen_tokens['input_ids'] batch['rejected_labels'] = rejected_tokens['input_ids'] batch['prompt_input_ids'] = prompt_tokens['input_ids'] batch['prompt_attention_mask'] = prompt_tokens['attention_mask'] if model is not None and hasattr(model, 'prepare_decoder_input_ids_from_labels'): batch['rejected_decoder_input_ids'] = model.prepare_decoder_input_ids_from_labels(labels=torch.tensor(batch['rejected_labels'])) batch['chosen_decoder_input_ids'] = model.prepare_decoder_input_ids_from_labels(labels=torch.tensor(batch['chosen_labels'])) return batch @staticmethod def concatenated_inputs(batch: Dict[str, Union[List, torch.LongTensor]], is_encoder_decoder: bool=False, label_pad_token_id: int=-100, padding_value: int=0, device: Optional[torch.device]=None) -> Dict[str, torch.LongTensor]: concatenated_batch = {} if is_encoder_decoder: max_length = max(batch['chosen_labels'].shape[1], batch['rejected_labels'].shape[1]) else: max_length = max(batch['chosen_input_ids'].shape[1], batch['rejected_input_ids'].shape[1]) for k in batch: if k.startswith('chosen') and isinstance(batch[k], torch.Tensor): if 'labels' in k or is_encoder_decoder: pad_value = label_pad_token_id elif k.endswith('_input_ids'): pad_value = padding_value elif k.endswith('_attention_mask'): pad_value = 0 concatenated_key = k.replace('chosen', 'concatenated') concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value) for k in batch: if k.startswith('rejected') and isinstance(batch[k], torch.Tensor): if 'labels' in k or is_encoder_decoder: pad_value = label_pad_token_id elif k.endswith('_input_ids'): pad_value = padding_value elif k.endswith('_attention_mask'): pad_value = 0 concatenated_key = k.replace('rejected', 'concatenated') concatenated_batch[concatenated_key] = torch.cat((concatenated_batch[concatenated_key], pad_to_length(batch[k], max_length, pad_value=pad_value)), dim=0).to(device=device) if is_encoder_decoder: concatenated_batch['concatenated_input_ids'] = batch['prompt_input_ids'].repeat(2, 1).to(device=device) concatenated_batch['concatenated_attention_mask'] = batch['prompt_attention_mask'].repeat(2, 1).to(device=device) return concatenated_batch def cpo_loss(self, policy_chosen_logps: torch.FloatTensor, policy_rejected_logps: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: logits = (policy_chosen_logps - policy_rejected_logps).to(self.accelerator.device) if self.loss_type == 'simpo': gamma_logratios = self.simpo_gamma / self.beta logits = logits - gamma_logratios losses = -F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing) - F.logsigmoid(-self.beta * logits) * self.label_smoothing elif self.loss_type == 'sigmoid': losses = -F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing) - F.logsigmoid(-self.beta * logits) * self.label_smoothing elif self.loss_type == 'hinge': losses = torch.relu(1 - self.beta * logits) elif self.loss_type == 'ipo': losses = (logits - 1 / (2 * self.beta)) ** 2 else: raise ValueError(f"Unknown loss type: {self.loss_type}. Should be one of ['sigmoid', 'hinge', 'ipo', 'simpo']") chosen_rewards = self.beta * policy_chosen_logps.to(self.accelerator.device).detach() rejected_rewards = self.beta * policy_rejected_logps.to(self.accelerator.device).detach() return (losses, chosen_rewards, rejected_rewards) @staticmethod def get_batch_logps(logits: torch.FloatTensor, labels: torch.LongTensor, average_log_prob: bool=False, label_pad_token_id: int=-100, is_encoder_decoder: bool=False) -> torch.FloatTensor: if logits.shape[:-1] != labels.shape: raise ValueError('Logits (batch and sequence length dim) and labels must have the same shape.') if not is_encoder_decoder: labels = labels[:, 1:].clone() logits = logits[:, :-1, :] loss_mask = labels != label_pad_token_id labels[labels == label_pad_token_id] = 0 per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2) if average_log_prob: return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1) else: return (per_token_logps * loss_mask).sum(-1) def concatenated_forward(self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: concatenated_batch = self.concatenated_inputs(batch, is_encoder_decoder=self.is_encoder_decoder, label_pad_token_id=self.label_pad_token_id, padding_value=self.padding_value, device=self.accelerator.device) len_chosen = batch['chosen_labels'].shape[0] model_kwargs = {'decoder_input_ids': self._shift_right(concatenated_batch['concatenated_labels'])} if self.is_encoder_decoder else {} if self.aux_loss_enabled: model_kwargs['output_router_logits'] = True outputs = model(concatenated_batch['concatenated_input_ids'], attention_mask=concatenated_batch['concatenated_attention_mask'], use_cache=False, **model_kwargs) all_logits = outputs.logits def cross_entropy_loss(logits, labels): if not self.is_encoder_decoder: logits = logits[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() loss_fct = nn.CrossEntropyLoss() logits = logits.view(-1, logits.shape[-1]) labels = labels.view(-1) labels = labels.to(logits.device) loss = loss_fct(logits, labels) return loss labels = concatenated_batch['concatenated_labels'].clone() if self.cpo_alpha == 0: nll_loss = torch.tensor(0.0).to(self.accelerator.device) else: nll_loss = cross_entropy_loss(all_logits[:len_chosen], labels[:len_chosen]) all_logps = self.get_batch_logps(all_logits, concatenated_batch['concatenated_labels'], average_log_prob=self.loss_type in ['ipo', 'simpo'], is_encoder_decoder=self.is_encoder_decoder, label_pad_token_id=self.label_pad_token_id) chosen_logps = all_logps[:len_chosen] rejected_logps = all_logps[len_chosen:] chosen_logits = all_logits[:len_chosen] rejected_logits = all_logits[len_chosen:] if self.aux_loss_enabled: return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, nll_loss, outputs.aux_loss) return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, nll_loss) def get_batch_loss_metrics(self, model, batch: Dict[str, Union[List, torch.LongTensor]], train_eval: Literal['train', 'eval']='train'): metrics = {} forward_output = self.concatenated_forward(model, batch) (policy_chosen_logps, policy_rejected_logps, policy_chosen_logits, policy_rejected_logits, policy_nll_loss) = forward_output[:5] if self.aux_loss_enabled: aux_loss = forward_output[5] (losses, chosen_rewards, rejected_rewards) = self.cpo_loss(policy_chosen_logps, policy_rejected_logps) loss = losses.mean() + self.cpo_alpha * policy_nll_loss reward_accuracies = (chosen_rewards > rejected_rewards).float() prefix = 'eval_' if train_eval == 'eval' else '' metrics[f'{prefix}rewards/chosen'] = chosen_rewards.mean().cpu() metrics[f'{prefix}rewards/rejected'] = rejected_rewards.mean().cpu() metrics[f'{prefix}rewards/accuracies'] = reward_accuracies.mean().cpu() metrics[f'{prefix}rewards/margins'] = (chosen_rewards - rejected_rewards).mean().cpu() metrics[f'{prefix}logps/rejected'] = policy_rejected_logps.detach().mean().cpu() metrics[f'{prefix}logps/chosen'] = policy_chosen_logps.detach().mean().cpu() metrics[f'{prefix}logits/rejected'] = policy_rejected_logits.detach().mean().cpu() metrics[f'{prefix}logits/chosen'] = policy_chosen_logits.detach().mean().cpu() metrics[f'{prefix}nll_loss'] = policy_nll_loss.detach().mean().cpu() if self.aux_loss_enabled: loss += getattr(model.config, 'router_aux_loss_coef', 0.0) * aux_loss return (loss, metrics) def compute_loss(self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], return_outputs=False) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]: if not self.use_dpo_data_collator: warnings.warn('compute_loss is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator') compute_loss_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with compute_loss_context_manager: (loss, metrics) = self.get_batch_loss_metrics(model, inputs, train_eval='train') self.store_metrics(metrics, train_eval='train') if return_outputs: return (loss, metrics) return loss def get_batch_samples(self, model, batch: Dict[str, torch.LongTensor]) -> Tuple[str, str]: generate_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with generate_context_manager: policy_output = model.generate(input_ids=batch['prompt_input_ids'], attention_mask=batch['prompt_attention_mask'], max_length=self.max_length, do_sample=True, pad_token_id=self.tokenizer.pad_token_id) policy_output = pad_to_length(policy_output, self.max_length, self.tokenizer.pad_token_id) policy_output_decoded = self.tokenizer.batch_decode(policy_output, skip_special_tokens=True) return policy_output_decoded def prediction_step(self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]]=None): if not self.use_dpo_data_collator: warnings.warn('prediction_step is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator') if ignore_keys is None: if hasattr(model, 'config'): ignore_keys = getattr(model.config, 'keys_to_ignore_at_inference', []) else: ignore_keys = [] prediction_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with torch.no_grad(), prediction_context_manager: (loss, metrics) = self.get_batch_loss_metrics(model, inputs, train_eval='eval') self.store_metrics(metrics, train_eval='eval') if prediction_loss_only: return (loss.detach(), None, None) logits_dict = {'eval_logits/chosen': metrics['eval_logits/chosen'], 'eval_logits/rejected': metrics['eval_logits/rejected']} logits = tuple((v.unsqueeze(dim=0) for (k, v) in logits_dict.items() if k not in ignore_keys)) logits = torch.stack(logits).mean(axis=1).to(self.accelerator.device) labels = torch.zeros(logits.shape[0], device=self.accelerator.device) return (loss.detach(), logits, labels) def store_metrics(self, metrics: Dict[str, float], train_eval: Literal['train', 'eval']='train') -> None: for (key, value) in metrics.items(): self._stored_metrics[train_eval][key].append(value) def evaluation_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval') -> EvalLoopOutput: if self.generate_during_eval: num_samples = len(dataloader.dataset) random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size) random_batch_dataset = dataloader.dataset.select(random_indices) random_batch = self.data_collator(random_batch_dataset) random_batch = self._prepare_inputs(random_batch) policy_output_decoded = self.get_batch_samples(self.model, random_batch) self.log({'game_log': wandb.Table(columns=['Prompt', 'Policy'], rows=[[prompt, pol[len(prompt):]] for (prompt, pol) in zip(random_batch['prompt'], policy_output_decoded)])}) self.state.log_history.pop() initial_output = super().evaluation_loop(dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix) return initial_output def log(self, logs: Dict[str, float]) -> None: train_eval = 'train' if 'loss' in logs else 'eval' for (key, metrics) in self._stored_metrics[train_eval].items(): logs[key] = torch.tensor(metrics).mean().item() del self._stored_metrics[train_eval] return super().log(logs) def _shift_right(self, input_ids): if self.decoder_start_token_id is None: raise ValueError('model.config.decoder_start_token_id has to be defined. It is usually set to the pad_token_id.') if is_torch_fx_proxy(input_ids): shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), self.decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = self.decoder_start_token_id if self.pad_token_id is None: raise ValueError('model.config.pad_token_id has to be defined.') shifted_input_ids.masked_fill_(shifted_input_ids == -100, self.pad_token_id) return shifted_input_ids @wraps(Trainer.push_to_hub) def push_to_hub(self, commit_message: Optional[str]='End of training', blocking: bool=True, **kwargs) -> str: kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs) # File: trl-main/trl/trainer/ddpo_config.py import os import sys import warnings from dataclasses import dataclass, field from typing import Literal, Optional from ..core import flatten_dict from ..import_utils import is_bitsandbytes_available, is_torchvision_available @dataclass class DDPOConfig: exp_name: str = os.path.basename(sys.argv[0])[:-len('.py')] run_name: str = '' seed: int = 0 log_with: Optional[Literal['wandb', 'tensorboard']] = None tracker_kwargs: dict = field(default_factory=dict) accelerator_kwargs: dict = field(default_factory=dict) project_kwargs: dict = field(default_factory=dict) tracker_project_name: str = 'trl' logdir: str = 'logs' num_epochs: int = 100 save_freq: int = 1 num_checkpoint_limit: int = 5 mixed_precision: str = 'fp16' allow_tf32: bool = True resume_from: str = '' sample_num_steps: int = 50 sample_eta: float = 1.0 sample_guidance_scale: float = 5.0 sample_batch_size: int = 1 sample_num_batches_per_epoch: int = 2 train_batch_size: int = 1 train_use_8bit_adam: bool = False train_learning_rate: float = 0.0003 train_adam_beta1: float = 0.9 train_adam_beta2: float = 0.999 train_adam_weight_decay: float = 0.0001 train_adam_epsilon: float = 1e-08 train_gradient_accumulation_steps: int = 1 train_max_grad_norm: float = 1.0 train_num_inner_epochs: int = 1 train_cfg: bool = True train_adv_clip_max: float = 5.0 train_clip_range: float = 0.0001 train_timestep_fraction: float = 1.0 per_prompt_stat_tracking: bool = False per_prompt_stat_tracking_buffer_size: int = 16 per_prompt_stat_tracking_min_count: int = 16 async_reward_computation: bool = False max_workers: int = 2 negative_prompts: str = '' def to_dict(self): output_dict = {} for (key, value) in self.__dict__.items(): output_dict[key] = value return flatten_dict(output_dict) def __post_init__(self): if self.log_with not in ['wandb', 'tensorboard']: warnings.warn("Accelerator tracking only supports image logging if `log_with` is set to 'wandb' or 'tensorboard'.") if self.log_with == 'wandb' and (not is_torchvision_available()): warnings.warn('Wandb image logging requires torchvision to be installed') if self.train_use_8bit_adam and (not is_bitsandbytes_available()): raise ImportError('You need to install bitsandbytes to use 8bit Adam. You can install it with `pip install bitsandbytes`.') # File: trl-main/trl/trainer/ddpo_trainer.py import os import warnings from collections import defaultdict from concurrent import futures from typing import Any, Callable, Optional, Tuple from warnings import warn import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import whoami from ..models import DDPOStableDiffusionPipeline from . import BaseTrainer, DDPOConfig from .utils import PerPromptStatTracker logger = get_logger(__name__) MODEL_CARD_TEMPLATE = '---\nlicense: apache-2.0\nlibrary_name: transformers\ntags:\n- trl\n- ddpo\n- diffusers\n- reinforcement-learning\n- text-to-image\n- stable-diffusion\n---\n\n# {model_name}\n\nThis is a diffusion model that has been fine-tuned with reinforcement learning to\n guide the model outputs according to a value, function, or human feedback. The model can be used for image generation conditioned with text.\n\n' class DDPOTrainer(BaseTrainer): _tag_names = ['trl', 'ddpo'] def __init__(self, config: DDPOConfig, reward_function: Callable[[torch.Tensor, Tuple[str], Tuple[Any]], torch.Tensor], prompt_function: Callable[[], Tuple[str, Any]], sd_pipeline: DDPOStableDiffusionPipeline, image_samples_hook: Optional[Callable[[Any, Any, Any], Any]]=None): if image_samples_hook is None: warn('No image_samples_hook provided; no images will be logged') self.prompt_fn = prompt_function self.reward_fn = reward_function self.config = config self.image_samples_callback = image_samples_hook accelerator_project_config = ProjectConfiguration(**self.config.project_kwargs) if self.config.resume_from: self.config.resume_from = os.path.normpath(os.path.expanduser(self.config.resume_from)) if 'checkpoint_' not in os.path.basename(self.config.resume_from): checkpoints = list(filter(lambda x: 'checkpoint_' in x, os.listdir(self.config.resume_from))) if len(checkpoints) == 0: raise ValueError(f'No checkpoints found in {self.config.resume_from}') checkpoint_numbers = sorted([int(x.split('_')[-1]) for x in checkpoints]) self.config.resume_from = os.path.join(self.config.resume_from, f'checkpoint_{checkpoint_numbers[-1]}') accelerator_project_config.iteration = checkpoint_numbers[-1] + 1 self.num_train_timesteps = int(self.config.sample_num_steps * self.config.train_timestep_fraction) self.accelerator = Accelerator(log_with=self.config.log_with, mixed_precision=self.config.mixed_precision, project_config=accelerator_project_config, gradient_accumulation_steps=self.config.train_gradient_accumulation_steps * self.num_train_timesteps, **self.config.accelerator_kwargs) (is_okay, message) = self._config_check() if not is_okay: raise ValueError(message) is_using_tensorboard = config.log_with is not None and config.log_with == 'tensorboard' if self.accelerator.is_main_process: self.accelerator.init_trackers(self.config.tracker_project_name, config=dict(ddpo_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(), init_kwargs=self.config.tracker_kwargs) logger.info(f'\n{config}') set_seed(self.config.seed, device_specific=True) self.sd_pipeline = sd_pipeline self.sd_pipeline.set_progress_bar_config(position=1, disable=not self.accelerator.is_local_main_process, leave=False, desc='Timestep', dynamic_ncols=True) if self.accelerator.mixed_precision == 'fp16': inference_dtype = torch.float16 elif self.accelerator.mixed_precision == 'bf16': inference_dtype = torch.bfloat16 else: inference_dtype = torch.float32 self.sd_pipeline.vae.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.text_encoder.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.unet.to(self.accelerator.device, dtype=inference_dtype) trainable_layers = self.sd_pipeline.get_trainable_layers() self.accelerator.register_save_state_pre_hook(self._save_model_hook) self.accelerator.register_load_state_pre_hook(self._load_model_hook) if self.config.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True self.optimizer = self._setup_optimizer(trainable_layers.parameters() if not isinstance(trainable_layers, list) else trainable_layers) self.neg_prompt_embed = self.sd_pipeline.text_encoder(self.sd_pipeline.tokenizer([''] if self.config.negative_prompts is None else self.config.negative_prompts, return_tensors='pt', padding='max_length', truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length).input_ids.to(self.accelerator.device))[0] if config.per_prompt_stat_tracking: self.stat_tracker = PerPromptStatTracker(config.per_prompt_stat_tracking_buffer_size, config.per_prompt_stat_tracking_min_count) self.autocast = self.sd_pipeline.autocast or self.accelerator.autocast if hasattr(self.sd_pipeline, 'use_lora') and self.sd_pipeline.use_lora: (unet, self.optimizer) = self.accelerator.prepare(trainable_layers, self.optimizer) self.trainable_layers = list(filter(lambda p: p.requires_grad, unet.parameters())) else: (self.trainable_layers, self.optimizer) = self.accelerator.prepare(trainable_layers, self.optimizer) if self.config.async_reward_computation: self.executor = futures.ThreadPoolExecutor(max_workers=config.max_workers) if config.resume_from: logger.info(f'Resuming from {config.resume_from}') self.accelerator.load_state(config.resume_from) self.first_epoch = int(config.resume_from.split('_')[-1]) + 1 else: self.first_epoch = 0 def compute_rewards(self, prompt_image_pairs, is_async=False): if not is_async: rewards = [] for (images, prompts, prompt_metadata) in prompt_image_pairs: (reward, reward_metadata) = self.reward_fn(images, prompts, prompt_metadata) rewards.append((torch.as_tensor(reward, device=self.accelerator.device), reward_metadata)) else: rewards = self.executor.map(lambda x: self.reward_fn(*x), prompt_image_pairs) rewards = [(torch.as_tensor(reward.result(), device=self.accelerator.device), reward_metadata.result()) for (reward, reward_metadata) in rewards] return zip(*rewards) def step(self, epoch: int, global_step: int): (samples, prompt_image_data) = self._generate_samples(iterations=self.config.sample_num_batches_per_epoch, batch_size=self.config.sample_batch_size) samples = {k: torch.cat([s[k] for s in samples]) for k in samples[0].keys()} (rewards, rewards_metadata) = self.compute_rewards(prompt_image_data, is_async=self.config.async_reward_computation) for (i, image_data) in enumerate(prompt_image_data): image_data.extend([rewards[i], rewards_metadata[i]]) if self.image_samples_callback is not None: self.image_samples_callback(prompt_image_data, global_step, self.accelerator.trackers[0]) rewards = torch.cat(rewards) rewards = self.accelerator.gather(rewards).cpu().numpy() self.accelerator.log({'reward': rewards, 'epoch': epoch, 'reward_mean': rewards.mean(), 'reward_std': rewards.std()}, step=global_step) if self.config.per_prompt_stat_tracking: prompt_ids = self.accelerator.gather(samples['prompt_ids']).cpu().numpy() prompts = self.sd_pipeline.tokenizer.batch_decode(prompt_ids, skip_special_tokens=True) advantages = self.stat_tracker.update(prompts, rewards) else: advantages = (rewards - rewards.mean()) / (rewards.std() + 1e-08) samples['advantages'] = torch.as_tensor(advantages).reshape(self.accelerator.num_processes, -1)[self.accelerator.process_index].to(self.accelerator.device) del samples['prompt_ids'] (total_batch_size, num_timesteps) = samples['timesteps'].shape for inner_epoch in range(self.config.train_num_inner_epochs): perm = torch.randperm(total_batch_size, device=self.accelerator.device) samples = {k: v[perm] for (k, v) in samples.items()} perms = torch.stack([torch.randperm(num_timesteps, device=self.accelerator.device) for _ in range(total_batch_size)]) for key in ['timesteps', 'latents', 'next_latents', 'log_probs']: samples[key] = samples[key][torch.arange(total_batch_size, device=self.accelerator.device)[:, None], perms] original_keys = samples.keys() original_values = samples.values() reshaped_values = [v.reshape(-1, self.config.train_batch_size, *v.shape[1:]) for v in original_values] transposed_values = zip(*reshaped_values) samples_batched = [dict(zip(original_keys, row_values)) for row_values in transposed_values] self.sd_pipeline.unet.train() global_step = self._train_batched_samples(inner_epoch, epoch, global_step, samples_batched) if not self.accelerator.sync_gradients: raise ValueError('Optimization step should have been performed by this point. Please check calculated gradient accumulation settings.') if epoch != 0 and epoch % self.config.save_freq == 0 and self.accelerator.is_main_process: self.accelerator.save_state() return global_step def calculate_loss(self, latents, timesteps, next_latents, log_probs, advantages, embeds): with self.autocast(): if self.config.train_cfg: noise_pred = self.sd_pipeline.unet(torch.cat([latents] * 2), torch.cat([timesteps] * 2), embeds).sample (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.config.sample_guidance_scale * (noise_pred_text - noise_pred_uncond) else: noise_pred = self.sd_pipeline.unet(latents, timesteps, embeds).sample scheduler_step_output = self.sd_pipeline.scheduler_step(noise_pred, timesteps, latents, eta=self.config.sample_eta, prev_sample=next_latents) log_prob = scheduler_step_output.log_probs advantages = torch.clamp(advantages, -self.config.train_adv_clip_max, self.config.train_adv_clip_max) ratio = torch.exp(log_prob - log_probs) loss = self.loss(advantages, self.config.train_clip_range, ratio) approx_kl = 0.5 * torch.mean((log_prob - log_probs) ** 2) clipfrac = torch.mean((torch.abs(ratio - 1.0) > self.config.train_clip_range).float()) return (loss, approx_kl, clipfrac) def loss(self, advantages: torch.Tensor, clip_range: float, ratio: torch.Tensor): unclipped_loss = -advantages * ratio clipped_loss = -advantages * torch.clamp(ratio, 1.0 - clip_range, 1.0 + clip_range) return torch.mean(torch.maximum(unclipped_loss, clipped_loss)) def _setup_optimizer(self, trainable_layers_parameters): if self.config.train_use_8bit_adam: import bitsandbytes optimizer_cls = bitsandbytes.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW return optimizer_cls(trainable_layers_parameters, lr=self.config.train_learning_rate, betas=(self.config.train_adam_beta1, self.config.train_adam_beta2), weight_decay=self.config.train_adam_weight_decay, eps=self.config.train_adam_epsilon) def _save_model_hook(self, models, weights, output_dir): self.sd_pipeline.save_checkpoint(models, weights, output_dir) weights.pop() def _load_model_hook(self, models, input_dir): self.sd_pipeline.load_checkpoint(models, input_dir) models.pop() def _generate_samples(self, iterations, batch_size): samples = [] prompt_image_pairs = [] self.sd_pipeline.unet.eval() sample_neg_prompt_embeds = self.neg_prompt_embed.repeat(batch_size, 1, 1) for _ in range(iterations): (prompts, prompt_metadata) = zip(*[self.prompt_fn() for _ in range(batch_size)]) prompt_ids = self.sd_pipeline.tokenizer(prompts, return_tensors='pt', padding='max_length', truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length).input_ids.to(self.accelerator.device) prompt_embeds = self.sd_pipeline.text_encoder(prompt_ids)[0] with self.autocast(): sd_output = self.sd_pipeline(prompt_embeds=prompt_embeds, negative_prompt_embeds=sample_neg_prompt_embeds, num_inference_steps=self.config.sample_num_steps, guidance_scale=self.config.sample_guidance_scale, eta=self.config.sample_eta, output_type='pt') images = sd_output.images latents = sd_output.latents log_probs = sd_output.log_probs latents = torch.stack(latents, dim=1) log_probs = torch.stack(log_probs, dim=1) timesteps = self.sd_pipeline.scheduler.timesteps.repeat(batch_size, 1) samples.append({'prompt_ids': prompt_ids, 'prompt_embeds': prompt_embeds, 'timesteps': timesteps, 'latents': latents[:, :-1], 'next_latents': latents[:, 1:], 'log_probs': log_probs, 'negative_prompt_embeds': sample_neg_prompt_embeds}) prompt_image_pairs.append([images, prompts, prompt_metadata]) return (samples, prompt_image_pairs) def _train_batched_samples(self, inner_epoch, epoch, global_step, batched_samples): info = defaultdict(list) for (_i, sample) in enumerate(batched_samples): if self.config.train_cfg: embeds = torch.cat([sample['negative_prompt_embeds'], sample['prompt_embeds']]) else: embeds = sample['prompt_embeds'] for j in range(self.num_train_timesteps): with self.accelerator.accumulate(self.sd_pipeline.unet): (loss, approx_kl, clipfrac) = self.calculate_loss(sample['latents'][:, j], sample['timesteps'][:, j], sample['next_latents'][:, j], sample['log_probs'][:, j], sample['advantages'], embeds) info['approx_kl'].append(approx_kl) info['clipfrac'].append(clipfrac) info['loss'].append(loss) self.accelerator.backward(loss) if self.accelerator.sync_gradients: self.accelerator.clip_grad_norm_(self.trainable_layers.parameters() if not isinstance(self.trainable_layers, list) else self.trainable_layers, self.config.train_max_grad_norm) self.optimizer.step() self.optimizer.zero_grad() if self.accelerator.sync_gradients: info = {k: torch.mean(torch.stack(v)) for (k, v) in info.items()} info = self.accelerator.reduce(info, reduction='mean') info.update({'epoch': epoch, 'inner_epoch': inner_epoch}) self.accelerator.log(info, step=global_step) global_step += 1 info = defaultdict(list) return global_step def _config_check(self) -> Tuple[bool, str]: samples_per_epoch = self.config.sample_batch_size * self.accelerator.num_processes * self.config.sample_num_batches_per_epoch total_train_batch_size = self.config.train_batch_size * self.accelerator.num_processes * self.config.train_gradient_accumulation_steps if not self.config.sample_batch_size >= self.config.train_batch_size: return (False, f'Sample batch size ({self.config.sample_batch_size}) must be greater than or equal to the train batch size ({self.config.train_batch_size})') if not self.config.sample_batch_size % self.config.train_batch_size == 0: return (False, f'Sample batch size ({self.config.sample_batch_size}) must be divisible by the train batch size ({self.config.train_batch_size})') if not samples_per_epoch % total_train_batch_size == 0: return (False, f'Number of samples per epoch ({samples_per_epoch}) must be divisible by the total train batch size ({total_train_batch_size})') return (True, '') def train(self, epochs: Optional[int]=None): global_step = 0 if epochs is None: epochs = self.config.num_epochs for epoch in range(self.first_epoch, epochs): global_step = self.step(epoch, global_step) def create_model_card(self, path: str, model_name: Optional[str]='TRL DDPO Model') -> None: try: user = whoami()['name'] except Exception: warnings.warn('Cannot retrieve user information assuming you are running in offline mode.') return if not os.path.exists(path): os.makedirs(path) model_card_content = MODEL_CARD_TEMPLATE.format(model_name=model_name, model_id=f'{user}/{path}') with open(os.path.join(path, 'README.md'), 'w', encoding='utf-8') as f: f.write(model_card_content) def _save_pretrained(self, save_directory): self.sd_pipeline.save_pretrained(save_directory) self.create_model_card(save_directory) # File: trl-main/trl/trainer/dpo_config.py import warnings from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Literal, Optional from transformers import TrainingArguments class FDivergenceType(Enum): REVERSE_KL = 'reverse_kl' JS_DIVERGENCE = 'js_divergence' ALPHA_DIVERGENCE = 'alpha_divergence' class FDivergenceConstants: ALPHA_DIVERGENCE_COEF_KEY = 'alpha_divergence_coef' ALPHA_DIVERGENCE_COEF_DEFAULT = 1.0 @dataclass class DPOConfig(TrainingArguments): beta: float = 0.1 label_smoothing: float = 0.0 loss_type: Literal['sigmoid', 'hinge', 'ipo', 'exo_pair', 'nca_pair', 'robust', 'bco_pair', 'sppo_hard', 'aot', 'aot_pair', 'apo_zero', 'apo_down'] = 'sigmoid' label_pad_token_id: int = -100 padding_value: Optional[int] = None truncation_mode: str = 'keep_end' max_length: Optional[int] = None max_prompt_length: Optional[int] = None max_target_length: Optional[int] = None max_completion_length: Optional[int] = None is_encoder_decoder: Optional[bool] = None disable_dropout: bool = True generate_during_eval: bool = False precompute_ref_log_probs: bool = False dataset_num_proc: Optional[int] = None model_init_kwargs: Optional[Dict[str, Any]] = None ref_model_init_kwargs: Optional[Dict[str, Any]] = None model_adapter_name: Optional[str] = None ref_adapter_name: Optional[str] = None reference_free: bool = False force_use_ref_model: bool = False f_divergence_type: FDivergenceType = FDivergenceType.REVERSE_KL f_alpha_divergence_coef: float = 1.0 sync_ref_model: bool = False ref_model_mixup_alpha: float = 0.9 ref_model_sync_steps: int = 64 rpo_alpha: Optional[float] = None def __post_init__(self): if self.max_target_length is not None: warnings.warn('The `max_target_length` argument is deprecated in favor of `max_completion_length` and will be removed in a future version.', FutureWarning) if self.max_completion_length is None: self.max_completion_length = self.max_target_length return super().__post_init__() # File: trl-main/trl/trainer/dpo_trainer.py import inspect import random import warnings from collections import defaultdict from contextlib import contextmanager, nullcontext from copy import deepcopy from functools import wraps from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union import torch import torch.amp as amp import torch.nn as nn import torch.nn.functional as F from accelerate import PartialState from accelerate.utils import is_deepspeed_available, tqdm from datasets import Dataset from huggingface_hub.utils._deprecation import _deprecate_arguments from torch.utils.data import DataLoader from transformers import AutoModelForCausalLM, DataCollator, PreTrainedModel, PreTrainedTokenizerBase, Trainer from transformers.models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES from transformers.trainer_callback import TrainerCallback from transformers.trainer_utils import EvalLoopOutput from ..import_utils import is_peft_available, is_wandb_available from ..models import PreTrainedModelWrapper, create_reference_model from .callbacks import SyncRefModelCallback from .dpo_config import DPOConfig, FDivergenceConstants, FDivergenceType from .utils import DPODataCollatorWithPadding, RunningMoments, add_bos_token_if_needed, add_eos_token_if_needed, cap_exp, disable_dropout_in_model, pad_to_length, peft_module_casting_to_bf16, trl_sanitze_kwargs_for_tagging if is_peft_available(): from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training if is_wandb_available(): import wandb if is_deepspeed_available(): import deepspeed def _tokenize(features: Dict[str, List], tokenizer: PreTrainedTokenizerBase, args: DPOConfig, processor: Optional[Callable]=None, model: Optional[PreTrainedModel]=None) -> Dict[str, List]: batch = defaultdict(list) if model is None: prompt = features['prompt'] images = features.get('images', [None] * len(features['prompt'])) prompt_tokens = _process_prompt(prompt, processor, tokenizer, images) chosen_tokens = _process_answer(prompt, features['chosen'], processor, tokenizer, images) rejected_tokens = _process_answer(prompt, features['rejected'], processor, tokenizer, images) prompt_len_input_ids = _adjust_prompt_length(prompt_tokens, chosen_tokens, rejected_tokens) (prompt_tokens, chosen_tokens, rejected_tokens) = _add_special_tokens(tokenizer, prompt_len_input_ids, prompt_tokens, chosen_tokens, rejected_tokens) _truncate_tokens(chosen_tokens, rejected_tokens, prompt_tokens, args) _build_sequence_tokens(batch, chosen_tokens, args, 'chosen') _build_sequence_tokens(batch, rejected_tokens, args, 'rejected') _append_prompt_tokens_to_batch(batch, prompt_tokens) else: _tokenize_encoder_decoder(batch, tokenizer, features['prompt'], features['chosen'], features['rejected'], args, model) return dict(batch) def _process_prompt(prompts: List[str], processor: Optional[Callable], tokenizer: PreTrainedTokenizerBase, images: List[Optional[Any]]) -> List[Dict[str, List[int]]]: if processor: processor_kwargs = {'add_special_tokens': False} if 'add_special_tokens' in inspect.signature(processor).parameters else {} prompt_tokens = [] for (prompt, image) in zip(prompts, images): tokens = processor(prompt, images=image, **processor_kwargs) tokens = {k: v[0] for (k, v) in tokens.items()} if not isinstance(tokens['input_ids'], list): tokens['input_ids'] = tokens['input_ids'].tolist() tokens['attention_mask'] = tokens['attention_mask'].tolist() prompt_tokens.append(tokens) else: prompt_tokens = [tokenizer(prompt, add_special_tokens=False) for prompt in prompts] return [{f'prompt_{k}': v for (k, v) in tokens.items()} for tokens in prompt_tokens] def _process_answer(prompts: List[str], answers: List[str], processor: Optional[Callable], tokenizer: PreTrainedTokenizerBase, images: List[Optional[Any]]) -> List[Dict[str, Any]]: return [_build_tokenized_answer(prompt, answer, image, processor=processor, tokenizer=tokenizer) for (prompt, answer, image) in zip(prompts, answers, images)] def _adjust_prompt_length(prompt_tokens: List[Dict[str, List[int]]], chosen_tokens: List[Dict[str, List[int]]], rejected_tokens: List[Dict[str, List[int]]]) -> List[int]: prompt_len_input_ids = [] for (p_tokens, c_tokens, r_tokens) in zip(prompt_tokens, chosen_tokens, rejected_tokens): c_len = len(c_tokens['prompt_input_ids']) r_len = len(r_tokens['prompt_input_ids']) min_len = min(c_len, r_len) for (k, v) in p_tokens.items(): p_tokens[k] = v[:min_len] num_diff_tokens = sum([a != b for (a, b) in zip(c_tokens['prompt_input_ids'], r_tokens['prompt_input_ids'])]) num_diff_len = abs(c_len - r_len) if num_diff_tokens > 1 or num_diff_len > 1: raise ValueError('Chosen and rejected prompt_input_ids might only differ on the last token due to tokenizer merge ops.') prompt_len_input_ids.append(min_len) return prompt_len_input_ids def _add_special_tokens(tokenizer: PreTrainedTokenizerBase, prompt_len_input_ids: List[int], prompt_tokens: List[Dict[str, List[int]]], chosen_tokens: List[Dict[str, List[int]]], rejected_tokens: List[Dict[str, List[int]]]) -> Tuple[List[Dict[str, List[int]]], List[Dict[str, List[int]]], List[Dict[str, List[int]]]]: for i in range(len(prompt_tokens)): (prompt_tokens[i], chosen_tokens[i], rejected_tokens[i]) = add_bos_token_if_needed(tokenizer.bos_token_id, prompt_len_input_ids[i], prompt_tokens[i], len(chosen_tokens[i]['prompt_input_ids']), chosen_tokens[i], len(rejected_tokens[i]['prompt_input_ids']), rejected_tokens[i]) (chosen_tokens[i], rejected_tokens[i]) = add_eos_token_if_needed(tokenizer.eos_token_id, chosen_tokens[i], rejected_tokens[i]) return (prompt_tokens, chosen_tokens, rejected_tokens) def _truncate_tokens(chosen_tokens: List[Dict[str, List[int]]], rejected_tokens: List[Dict[str, List[int]]], prompt_tokens: List[Dict[str, List[int]]], args: DPOConfig) -> None: if args.truncation_mode not in ['keep_start', 'keep_end']: raise ValueError(f'Invalid truncation mode: {args.truncation_mode}') for (c_tokens, r_tokens, p_tokens) in zip(chosen_tokens, rejected_tokens, prompt_tokens): longer_response_length = max(len(c_tokens['input_ids']), len(r_tokens['input_ids'])) for answer_tokens in [c_tokens, r_tokens, p_tokens]: if len(answer_tokens['prompt_input_ids']) + longer_response_length > args.max_length: if args.truncation_mode == 'keep_start': for k in ['prompt_input_ids', 'prompt_attention_mask']: answer_tokens[k] = answer_tokens[k][:args.max_prompt_length] elif args.truncation_mode == 'keep_end': for k in ['prompt_input_ids', 'prompt_attention_mask']: answer_tokens[k] = answer_tokens[k][-args.max_prompt_length:] for answer_tokens in [c_tokens, r_tokens]: if len(answer_tokens['prompt_input_ids']) + longer_response_length > args.max_length: for k in ['input_ids', 'attention_mask']: answer_tokens[k] = answer_tokens[k][:args.max_length - args.max_prompt_length] def _build_sequence_tokens(batch: Dict[str, List[int]], tokens: List[Dict[str, List[int]]], args: DPOConfig, prefix: str) -> None: for token in tokens: sequence_tokens = {f'{prefix}_{k}': token[f'prompt_{k}'] + token[k] for k in ['input_ids', 'attention_mask']} sequence_tokens[f'{prefix}_labels'] = sequence_tokens[f'{prefix}_input_ids'][:] sequence_tokens[f'{prefix}_labels'][:len(token['prompt_input_ids'])] = [args.label_pad_token_id] * len(token['prompt_input_ids']) for (k, v) in sequence_tokens.items(): batch[k].append(v) def _append_prompt_tokens_to_batch(batch: Dict[str, List[int]], prompt_tokens: List[Dict[str, List[int]]]) -> None: for p_tokens in prompt_tokens: for (k, v) in p_tokens.items(): batch[k].append(v) def _tokenize_encoder_decoder(batch: Dict[str, List[int]], tokenizer: PreTrainedTokenizerBase, prompt: List[str], chosen: List[str], rejected: List[str], args: DPOConfig, model: Optional[PreTrainedModel]) -> None: chosen_tokens = tokenizer(chosen, truncation=True, max_length=args.max_completion_length, add_special_tokens=True) rejected_tokens = tokenizer(rejected, truncation=True, max_length=args.max_completion_length, add_special_tokens=True) prompt_tokens = tokenizer(prompt, truncation=True, max_length=args.max_prompt_length, add_special_tokens=True) batch['chosen_labels'] = chosen_tokens['input_ids'] batch['rejected_labels'] = rejected_tokens['input_ids'] batch['prompt_input_ids'] = prompt_tokens['input_ids'] batch['prompt_attention_mask'] = prompt_tokens['attention_mask'] if model is not None and hasattr(model, 'prepare_decoder_input_ids_from_labels'): max_length = max((len(seq) for seq in batch['chosen_labels'] + batch['rejected_labels'])) batch['chosen_labels'] = [seq + [tokenizer.pad_token_id] * (max_length - len(seq)) for seq in batch['chosen_labels']] batch['rejected_labels'] = [seq + [tokenizer.pad_token_id] * (max_length - len(seq)) for seq in batch['rejected_labels']] batch['rejected_decoder_input_ids'] = model.prepare_decoder_input_ids_from_labels(labels=torch.tensor(batch['rejected_labels'])) batch['chosen_decoder_input_ids'] = model.prepare_decoder_input_ids_from_labels(labels=torch.tensor(batch['chosen_labels'])) def _build_tokenized_answer(prompt: str, answer: str, images: Optional[List[Any]]=None, processor: Optional[Callable]=None, tokenizer: Optional[PreTrainedTokenizerBase]=None) -> Dict[str, Any]: def tokenize(text, images=None): if processor: processor_kwargs = {'add_special_tokens': False} if 'add_special_tokens' in inspect.signature(processor).parameters else {} tokenized = processor(text, images=images, **processor_kwargs) tokenized = {k: v[0] for (k, v) in tokenized.items()} if not isinstance(tokenized['input_ids'], list): tokenized['input_ids'] = tokenized['input_ids'].tolist() tokenized['attention_mask'] = tokenized['attention_mask'].tolist() else: tokenized = tokenizer(text, add_special_tokens=False) return tokenized full_tokenized = tokenize(prompt + answer, images) prompt_tokenized = tokenize(prompt, images) prompt_input_ids = prompt_tokenized['input_ids'] answer_input_ids = full_tokenized['input_ids'][len(prompt_input_ids):] answer_attention_mask = full_tokenized['attention_mask'][len(prompt_input_ids):] if len(full_tokenized['input_ids']) != len(prompt_input_ids + answer_input_ids): raise ValueError('Prompt input ids and answer input ids should have the same length.') response_token_ids_start_idx = len(prompt_input_ids) if prompt_input_ids != full_tokenized['input_ids'][:response_token_ids_start_idx]: response_token_ids_start_idx -= 1 prompt_input_ids = full_tokenized['input_ids'][:response_token_ids_start_idx] prompt_attention_mask = full_tokenized['attention_mask'][:response_token_ids_start_idx] if len(prompt_input_ids) != len(prompt_attention_mask): raise ValueError('Prompt input ids and attention mask should have the same length.') return_dict = {'prompt_input_ids': prompt_input_ids, 'prompt_attention_mask': prompt_attention_mask, 'input_ids': answer_input_ids, 'attention_mask': answer_attention_mask} if 'pixel_values' in full_tokenized: return_dict['prompt_pixel_values'] = full_tokenized['pixel_values'] if 'pixel_attention_mask' in full_tokenized: return_dict['prompt_pixel_attention_mask'] = full_tokenized['pixel_attention_mask'] return return_dict class DPOTrainer(Trainer): _tag_names = ['trl', 'dpo'] @_deprecate_arguments(version='1.0.0', deprecated_args=['beta', 'label_smoothing', 'loss_type', 'label_pad_token_id', 'padding_value', 'truncation_mode', 'max_length', 'max_prompt_length', 'max_target_length', 'is_encoder_decoder', 'disable_dropout', 'generate_during_eval', 'precompute_ref_log_probs', 'dataset_num_proc', 'model_init_kwargs', 'ref_model_init_kwargs', 'model_adapter_name', 'ref_adapter_name', 'reference_free', 'force_use_ref_model'], custom_message='Deprecated positional argument(s) used in DPOTrainer, please use the DPOConfig to set these arguments instead.') def __init__(self, model: Optional[Union[PreTrainedModel, nn.Module, str]]=None, ref_model: Optional[Union[PreTrainedModel, nn.Module, str]]=None, beta: float=0.1, label_smoothing: float=0, loss_type: Optional[str]=None, args: Optional[DPOConfig]=None, data_collator: Optional[DataCollator]=None, label_pad_token_id: int=-100, padding_value: Optional[int]=None, truncation_mode: str='keep_end', train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional[PreTrainedTokenizerBase]=None, model_init: Optional[Callable[[], PreTrainedModel]]=None, callbacks: Optional[List[TrainerCallback]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None, max_length: Optional[int]=None, max_prompt_length: Optional[int]=None, max_target_length: Optional[int]=None, peft_config: Optional[Dict]=None, is_encoder_decoder: Optional[bool]=None, disable_dropout: bool=True, generate_during_eval: bool=False, compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]]=None, precompute_ref_log_probs: bool=False, dataset_num_proc: Optional[int]=None, model_init_kwargs: Optional[Dict]=None, ref_model_init_kwargs: Optional[Dict]=None, model_adapter_name: Optional[str]=None, ref_adapter_name: Optional[str]=None, reference_free: bool=False, force_use_ref_model: bool=False): if model_init_kwargs is not None: warnings.warn('You passed `model_init_kwargs` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.model_init_kwargs = model_init_kwargs if args.model_init_kwargs is None: model_init_kwargs = {} elif not isinstance(model, str): raise ValueError('You passed model_init_kwargs to the DPOTrainer/DPOConfig, but your model is already instantiated.') else: model_init_kwargs = args.model_init_kwargs torch_dtype = model_init_kwargs.get('torch_dtype') if torch_dtype is not None: if isinstance(torch_dtype, str) and torch_dtype != 'auto': torch_dtype = getattr(torch, torch_dtype) if torch_dtype != 'auto' and (not isinstance(torch_dtype, torch.dtype)): raise ValueError(f"Invalid `torch_dtype` passed to the DPOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}.") model_init_kwargs['torch_dtype'] = torch_dtype if ref_model_init_kwargs is not None: warnings.warn('You passed `ref_model_init_kwargs` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.ref_model_init_kwargs = ref_model_init_kwargs if args.ref_model_init_kwargs is None: ref_model_init_kwargs = {} elif not isinstance(ref_model, str): raise ValueError('You passed ref_model_init_kwargs to the DPOTrainer/DPOConfig, but your ref_model is already instantiated.') else: ref_model_init_kwargs = args.ref_model_init_kwargs torch_dtype = ref_model_init_kwargs.get('torch_dtype') if torch_dtype is not None: if isinstance(torch_dtype, str) and torch_dtype != 'auto': torch_dtype = getattr(torch, torch_dtype) if torch_dtype != 'auto' and (not isinstance(torch_dtype, torch.dtype)): raise ValueError(f"Invalid `torch_dtype` passed to the DPOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}.") ref_model_init_kwargs['torch_dtype'] = torch_dtype if isinstance(model, str): warnings.warn('You passed a model_id to the DPOTrainer. This will automatically create an `AutoModelForCausalLM` or a `PeftModel` (if you passed a `peft_config`) for you.') model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs) if isinstance(ref_model, str): warnings.warn('You passed a ref model_id to the DPOTrainer. This will automatically create an `AutoModelForCausalLM`') ref_model = AutoModelForCausalLM.from_pretrained(ref_model, **ref_model_init_kwargs) self._peft_has_been_casted_to_bf16 = False if force_use_ref_model: warnings.warn('You passed `force_use_ref_model` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.force_use_ref_model = force_use_ref_model if not is_peft_available() and peft_config is not None: raise ValueError("PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models") elif is_peft_available() and peft_config is not None: if isinstance(model, PeftModel): model = model.merge_and_unload() if ref_model is not None and (not args.force_use_ref_model): raise ValueError("You passed both a ref_model and a peft_config. For training PEFT adapters with DPO there is no need to pass a reference model. Please pass `ref_model=None` in case you want to train PEFT adapters, or pass a ref_model with `force_use_ref_model=True` in DPOTrainer's init. if you want to use a different ref_model.") if getattr(model, 'is_loaded_in_8bit', False) or getattr(model, 'is_loaded_in_4bit', False): _support_gc_kwargs = hasattr(args, 'gradient_checkpointing_kwargs') and 'gradient_checkpointing_kwargs' in list(inspect.signature(prepare_model_for_kbit_training).parameters) prepare_model_kwargs = {'use_gradient_checkpointing': args.gradient_checkpointing} if _support_gc_kwargs: prepare_model_kwargs['gradient_checkpointing_kwargs'] = args.gradient_checkpointing_kwargs model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) elif getattr(args, 'gradient_checkpointing', False): if hasattr(model, 'enable_input_require_grads'): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) model = get_peft_model(model, peft_config) if args.bf16 and getattr(model, 'is_loaded_in_4bit', False): peft_module_casting_to_bf16(model) self._peft_has_been_casted_to_bf16 = True elif getattr(args, 'gradient_checkpointing', False): if hasattr(model, 'enable_input_require_grads'): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) if generate_during_eval: warnings.warn('You passed `generate_during_eval` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.generate_during_eval = generate_during_eval if args.generate_during_eval and (not is_wandb_available()): raise ValueError('`generate_during_eval=True` requires Weights and Biases to be installed. Please install `wandb` to resolve.') if is_encoder_decoder is not None: warnings.warn('You passed `is_encoder_decoder` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.is_encoder_decoder = is_encoder_decoder if model is not None: self.is_encoder_decoder = model.config.is_encoder_decoder elif args.is_encoder_decoder is None: raise ValueError('When no model is provided, you need to pass the parameter is_encoder_decoder to the DPOTrainer/DPOConfig.') else: self.is_encoder_decoder = args.is_encoder_decoder if model is not None: self.is_vision_model = model.config.model_type in MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES.keys() else: warnings.warn('No model provided, cannot determine if it is a vision model. Setting is_vision_model to False.') self.is_vision_model = False if self.is_vision_model: self.processor = tokenizer self.tokenizer = tokenizer.tokenizer else: self.tokenizer = tokenizer self.is_peft_model = is_peft_available() and isinstance(model, PeftModel) if model_adapter_name is not None: warnings.warn('You passed `model_adapter_name` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.model_adapter_name = model_adapter_name self.model_adapter_name = args.model_adapter_name if ref_adapter_name is not None: warnings.warn('You passed `ref_adapter_name` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.ref_adapter_name = ref_adapter_name self.ref_adapter_name = args.ref_adapter_name if reference_free: warnings.warn('You passed `reference_free` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.reference_free = reference_free self.reference_free = args.reference_free if precompute_ref_log_probs: warnings.warn('You passed `precompute_ref_log_probs` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.precompute_ref_log_probs = precompute_ref_log_probs if ref_model: self.ref_model = ref_model elif self.is_peft_model or args.precompute_ref_log_probs: self.ref_model = None else: self.ref_model = create_reference_model(model) if tokenizer is None: raise ValueError('tokenizer must be specified to tokenize a DPO dataset.') if max_length is not None: warnings.warn('You passed `max_length` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.max_length = max_length if args.max_length is None: warnings.warn("`max_length` is not set in the DPOConfig's init it will default to `512` by default, but you should do it yourself in the future.", UserWarning) args.max_length = 512 if max_prompt_length is not None: warnings.warn('You passed `max_prompt_length` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.max_prompt_length = max_prompt_length if args.max_prompt_length is None: warnings.warn("`max_prompt_length` is not set in the DPOConfig's init it will default to `128` by default, but you should do it yourself in the future.", UserWarning) args.max_prompt_length = 128 if max_target_length is not None: warnings.warn('You passed `max_target_length` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.max_completion_length = max_target_length if args.max_completion_length is None and self.is_encoder_decoder: warnings.warn("When using an encoder decoder architecture, you should set `max_completion_length` in the DPOConfig's init it will default to `128` by default, but you should do it yourself in the future.", UserWarning) args.max_completion_length = 128 if label_pad_token_id != -100: warnings.warn('You passed `label_pad_token_id` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.label_pad_token_id = label_pad_token_id if data_collator is None: data_collator = DPODataCollatorWithPadding(pad_token_id=self.tokenizer.pad_token_id, label_pad_token_id=args.label_pad_token_id, is_encoder_decoder=self.is_encoder_decoder) if args.remove_unused_columns: args.remove_unused_columns = False warnings.warn('When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments we have set it for you, but you should do it yourself in the future.', UserWarning) self.use_dpo_data_collator = True else: self.use_dpo_data_collator = False if not disable_dropout: warnings.warn('You passed `disable_dropout` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.disable_dropout = disable_dropout if args.disable_dropout: disable_dropout_in_model(model) if self.ref_model is not None: disable_dropout_in_model(self.ref_model) self.max_length = args.max_length self.generate_during_eval = args.generate_during_eval self.label_pad_token_id = args.label_pad_token_id if padding_value is not None: warnings.warn('You passed `padding_value` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.padding_value = padding_value self.padding_value = args.padding_value if padding_value is not None else self.tokenizer.pad_token_id self.max_prompt_length = args.max_prompt_length if truncation_mode != 'keep_end': warnings.warn('You passed `truncation_mode` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.truncation_mode = truncation_mode self.truncation_mode = args.truncation_mode self.max_completion_length = args.max_completion_length self.precompute_ref_log_probs = args.precompute_ref_log_probs self._precomputed_train_ref_log_probs = False self._precomputed_eval_ref_log_probs = False if loss_type is not None: warnings.warn('You passed `loss_type` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.loss_type = loss_type if label_smoothing != 0: warnings.warn('You passed `label_smoothing` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.label_smoothing = label_smoothing if args.loss_type in ['hinge', 'ipo', 'bco_pair', 'sppo_hard', 'nca_pair', 'apo_zero', 'apo_down'] and args.label_smoothing > 0: warnings.warn('You are using a loss type that does not support label smoothing. Ignoring label_smoothing parameter.') if args.loss_type == 'kto_pair': raise ValueError('Support for kto_pair has been removed in DPOTrainer. Please use KTOTrainer.') if beta != 0.1: warnings.warn('You passed `beta` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.beta = beta self.beta = args.beta self.label_smoothing = args.label_smoothing self.loss_type = args.loss_type self.aux_loss_enabled = getattr(model.config, 'output_router_logits', False) self._stored_metrics = defaultdict(lambda : defaultdict(list)) self.f_divergence_type = args.f_divergence_type self.f_divergence_params = {FDivergenceConstants.ALPHA_DIVERGENCE_COEF_KEY: args.f_alpha_divergence_coef} if dataset_num_proc is not None: warnings.warn('You passed `dataset_num_proc` to the DPOTrainer, the value you passed will override the one in the `DPOConfig`.') args.dataset_num_proc = dataset_num_proc self.dataset_num_proc = args.dataset_num_proc with PartialState().local_main_process_first(): fn_kwargs = {'tokenizer': self.tokenizer, 'args': args, 'processor': self.processor if self.is_vision_model else None, 'model': model if self.is_encoder_decoder else None} train_dataset = train_dataset.map(_tokenize, fn_kwargs=fn_kwargs, batched=True, num_proc=self.dataset_num_proc, writer_batch_size=10, desc='Tokenizing train dataset') if eval_dataset is not None: eval_dataset = eval_dataset.map(_tokenize, fn_kwargs=fn_kwargs, batched=True, num_proc=self.dataset_num_proc, writer_batch_size=10, desc='Tokenizing eval dataset') super().__init__(model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics) if hasattr(self.model, 'add_model_tags'): self.model.add_model_tags(self._tag_names) if not hasattr(self, 'accelerator'): raise AttributeError('Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`.') if self.is_deepspeed_enabled: if self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.precompute_ref_log_probs: raise ValueError('You cannot use `precompute_ref_log_probs=True` with Deepspeed ZeRO-3. Please set `precompute_ref_log_probs=False`.') if self.ref_model is None: if not (self.is_peft_model or self.precompute_ref_log_probs): raise ValueError('No reference model and model is not a Peft model. Try setting `precompute_ref_log_probs=True`') if args.sync_ref_model: raise ValueError('You currently cannot use `ref_model=None` with TR-DPO method. Please provide `ref_model`.') elif self.is_deepspeed_enabled: self.ref_model = self._prepare_deepspeed(self.ref_model) else: self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True) if args.sync_ref_model: if precompute_ref_log_probs: raise ValueError('You cannot use `precompute_ref_log_probs=True` with TR-DPO method. Please set `precompute_ref_log_probs=False`.') self.add_callback(SyncRefModelCallback(ref_model=self.ref_model, accelerator=self.accelerator)) if self.loss_type == 'bco_pair': self.running = RunningMoments(self.accelerator) def _prepare_deepspeed(self, model: PreTrainedModelWrapper): deepspeed_plugin = self.accelerator.state.deepspeed_plugin config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config) if model is not None: if hasattr(model, 'config'): hidden_size = max(model.config.hidden_sizes) if getattr(model.config, 'hidden_sizes', None) else getattr(model.config, 'hidden_size', None) if hidden_size is not None and config_kwargs['zero_optimization']['stage'] == 3: config_kwargs.update({'zero_optimization.reduce_bucket_size': hidden_size * hidden_size, 'zero_optimization.stage3_param_persistence_threshold': 10 * hidden_size, 'zero_optimization.stage3_prefetch_bucket_size': 0.9 * hidden_size * hidden_size}) if config_kwargs['zero_optimization']['stage'] != 3: config_kwargs['zero_optimization']['stage'] = 0 (model, *_) = deepspeed.initialize(model=model, config=config_kwargs) model.eval() return model def get_train_dataloader(self) -> DataLoader: if self.precompute_ref_log_probs and (not self._precomputed_train_ref_log_probs): dataloader_params = {'batch_size': self.args.per_device_train_batch_size, 'collate_fn': self.data_collator, 'num_workers': self.args.dataloader_num_workers, 'pin_memory': self.args.dataloader_pin_memory, 'shuffle': False} data_loader = self.accelerator.prepare(DataLoader(self.train_dataset, **dataloader_params)) reference_chosen_logps = [] reference_rejected_logps = [] for padded_batch in tqdm(iterable=data_loader, desc='Train dataset reference log probs'): (reference_chosen_logp, reference_rejected_logp) = self.compute_reference_log_probs(padded_batch) (reference_chosen_logp, reference_rejected_logp) = self.accelerator.gather_for_metrics((reference_chosen_logp, reference_rejected_logp)) reference_chosen_logps.append(reference_chosen_logp.cpu()) reference_rejected_logps.append(reference_rejected_logp.cpu()) torch.cuda.empty_cache() self.accelerator.free_memory() all_reference_chosen_logps = torch.cat(reference_chosen_logps).float().numpy() all_reference_rejected_logps = torch.cat(reference_rejected_logps).float().numpy() self.train_dataset = self.train_dataset.add_column(name='reference_chosen_logps', column=all_reference_chosen_logps) self.train_dataset = self.train_dataset.add_column(name='reference_rejected_logps', column=all_reference_rejected_logps) self._precomputed_train_ref_log_probs = True return super().get_train_dataloader() def get_eval_dataloader(self, eval_dataset: Optional[Dataset]=None) -> DataLoader: if eval_dataset is None and self.eval_dataset is None: raise ValueError('Trainer: evaluation requires an eval_dataset.') eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset if self.precompute_ref_log_probs and (not self._precomputed_eval_ref_log_probs): dataloader_params = {'batch_size': self.args.per_device_eval_batch_size, 'collate_fn': self.data_collator, 'num_workers': self.args.dataloader_num_workers, 'pin_memory': self.args.dataloader_pin_memory, 'shuffle': False} data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params)) reference_chosen_logps = [] reference_rejected_logps = [] for padded_batch in tqdm(iterable=data_loader, desc='Eval dataset reference log probs'): (reference_chosen_logp, reference_rejected_logp) = self.compute_reference_log_probs(padded_batch) (reference_chosen_logp, reference_rejected_logp) = self.accelerator.gather_for_metrics((reference_chosen_logp, reference_rejected_logp)) reference_chosen_logps.append(reference_chosen_logp.cpu()) reference_rejected_logps.append(reference_rejected_logp.cpu()) all_reference_chosen_logps = torch.cat(reference_chosen_logps).float().numpy() all_reference_rejected_logps = torch.cat(reference_rejected_logps).float().numpy() eval_dataset = eval_dataset.add_column(name='reference_chosen_logps', column=all_reference_chosen_logps) eval_dataset = eval_dataset.add_column(name='reference_rejected_logps', column=all_reference_rejected_logps) if self.eval_dataset is not None: self.eval_dataset = eval_dataset self._precomputed_eval_ref_log_probs = True return super().get_eval_dataloader(eval_dataset=eval_dataset) @contextmanager def null_ref_context(self): with self.accelerator.unwrap_model(self.model).disable_adapter() if self.is_peft_model and (not self.ref_adapter_name) else nullcontext(): if self.ref_adapter_name: self.model.set_adapter(self.ref_adapter_name) yield if self.ref_adapter_name: self.model.set_adapter(self.model_adapter_name or 'default') def compute_reference_log_probs(self, padded_batch: Dict) -> Dict: compte_ref_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with torch.no_grad(), compte_ref_context_manager: if self.ref_model is None: with self.null_ref_context(): (reference_chosen_logps, reference_rejected_logps) = self.concatenated_forward(self.model, padded_batch)[:2] else: (reference_chosen_logps, reference_rejected_logps) = self.concatenated_forward(self.ref_model, padded_batch)[:2] return (reference_chosen_logps, reference_rejected_logps) @staticmethod def concatenated_inputs(batch: Dict[str, Union[List, torch.LongTensor]], is_encoder_decoder: bool=False, is_vision_model: bool=False, label_pad_token_id: int=-100, padding_value: int=0, device: Optional[torch.device]=None) -> Dict[str, torch.LongTensor]: concatenated_batch = {} if is_encoder_decoder: max_length = max(batch['chosen_labels'].shape[1], batch['rejected_labels'].shape[1]) else: max_length = max(batch['chosen_input_ids'].shape[1], batch['rejected_input_ids'].shape[1]) for k in batch: if k.startswith('chosen') and isinstance(batch[k], torch.Tensor): if 'labels' in k or is_encoder_decoder: pad_value = label_pad_token_id elif k.endswith('_input_ids'): pad_value = padding_value elif k.endswith('_attention_mask'): pad_value = 0 concatenated_key = k.replace('chosen', 'concatenated') concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value) for k in batch: if k.startswith('rejected') and isinstance(batch[k], torch.Tensor): if 'labels' in k or is_encoder_decoder: pad_value = label_pad_token_id elif k.endswith('_input_ids'): pad_value = padding_value elif k.endswith('_attention_mask'): pad_value = 0 concatenated_key = k.replace('rejected', 'concatenated') concatenated_batch[concatenated_key] = torch.cat((concatenated_batch[concatenated_key], pad_to_length(batch[k], max_length, pad_value=pad_value)), dim=0).to(device=device) if is_encoder_decoder: concatenated_batch['concatenated_input_ids'] = batch['prompt_input_ids'].repeat(2, 1).to(device=device) concatenated_batch['concatenated_attention_mask'] = batch['prompt_attention_mask'].repeat(2, 1).to(device=device) concatenated_batch['concatenated_decoder_input_ids'] = torch.cat([batch['chosen_decoder_input_ids'], batch['rejected_decoder_input_ids']], dim=0).to(device=device) if is_vision_model: concatenated_batch['pixel_values'] = torch.cat([batch['prompt_pixel_values'], batch['prompt_pixel_values']], dim=0) if 'prompt_pixel_attention_mask' in batch: concatenated_batch['pixel_attention_mask'] = torch.cat([batch['prompt_pixel_attention_mask'], batch['prompt_pixel_attention_mask']], dim=0) return concatenated_batch def dpo_loss(self, policy_chosen_logps: torch.FloatTensor, policy_rejected_logps: torch.FloatTensor, reference_chosen_logps: torch.FloatTensor, reference_rejected_logps: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: chosen_logratios = policy_chosen_logps.to(self.accelerator.device) - (not self.reference_free) * reference_chosen_logps.to(self.accelerator.device) rejected_logratios = policy_rejected_logps.to(self.accelerator.device) - (not self.reference_free) * reference_rejected_logps.to(self.accelerator.device) if self.f_divergence_type == FDivergenceType.ALPHA_DIVERGENCE.value: alpha_coef = FDivergenceConstants.ALPHA_DIVERGENCE_COEF_DEFAULT if self.f_divergence_params and FDivergenceConstants.ALPHA_DIVERGENCE_COEF_KEY in self.f_divergence_params: alpha_coef = float(self.f_divergence_params[FDivergenceConstants.ALPHA_DIVERGENCE_COEF_KEY]) logits = (cap_exp(rejected_logratios * -alpha_coef) - cap_exp(chosen_logratios * -alpha_coef)) / alpha_coef else: pi_logratios = policy_chosen_logps - policy_rejected_logps if self.reference_free: ref_logratios = torch.tensor([0], dtype=pi_logratios.dtype, device=pi_logratios.device) else: ref_logratios = reference_chosen_logps - reference_rejected_logps pi_logratios = pi_logratios.to(self.accelerator.device) ref_logratios = ref_logratios.to(self.accelerator.device) logits = pi_logratios - ref_logratios if self.f_divergence_type == FDivergenceType.JS_DIVERGENCE.value: logits -= F.softplus(chosen_logratios) - F.softplus(rejected_logratios) if self.loss_type == 'sigmoid': losses = -F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing) - F.logsigmoid(-self.beta * logits) * self.label_smoothing elif self.loss_type == 'robust': losses = (-F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing) + F.logsigmoid(-self.beta * logits) * self.label_smoothing) / (1 - 2 * self.label_smoothing) elif self.loss_type == 'exo_pair': import math if self.label_smoothing == 0: self.label_smoothing = 0.001 losses = (self.beta * logits).sigmoid() * (F.logsigmoid(self.beta * logits) - math.log(1 - self.label_smoothing)) + (-self.beta * logits).sigmoid() * (F.logsigmoid(-self.beta * logits) - math.log(self.label_smoothing)) elif self.loss_type == 'hinge': losses = torch.relu(1 - self.beta * logits) elif self.loss_type == 'ipo': losses = (logits - 1 / (2 * self.beta)) ** 2 elif self.loss_type == 'bco_pair': chosen_logratios = policy_chosen_logps - reference_chosen_logps rejected_logratios = policy_rejected_logps - reference_rejected_logps chosen_rewards = self.beta * chosen_logratios rejected_rewards = self.beta * rejected_logratios rewards = torch.cat((chosen_rewards, rejected_rewards), 0).mean().detach() self.running.update(rewards) delta = self.running.mean losses = -F.logsigmoid(self.beta * chosen_logratios - delta) - F.logsigmoid(-(self.beta * rejected_logratios - delta)) elif self.loss_type == 'sppo_hard': a = policy_chosen_logps - reference_chosen_logps b = policy_rejected_logps - reference_rejected_logps losses = (a - 0.5 / self.beta) ** 2 + (b + 0.5 / self.beta) ** 2 elif self.loss_type == 'nca_pair': chosen_rewards = (policy_chosen_logps - reference_chosen_logps) * self.beta rejected_rewards = (policy_rejected_logps - reference_rejected_logps) * self.beta losses = -F.logsigmoid(chosen_rewards) - 0.5 * F.logsigmoid(-chosen_rewards) - 0.5 * F.logsigmoid(-rejected_rewards) elif self.loss_type == 'aot_pair': chosen_logratios = policy_chosen_logps - reference_chosen_logps rejected_logratios = policy_rejected_logps - reference_rejected_logps (chosen_logratios_sorted, _) = torch.sort(chosen_logratios, dim=0) (rejected_logratios_sorted, _) = torch.sort(rejected_logratios, dim=0) delta = chosen_logratios_sorted - rejected_logratios_sorted losses = -F.logsigmoid(self.beta * delta) * (1 - self.label_smoothing) - F.logsigmoid(-self.beta * delta) * self.label_smoothing elif self.loss_type == 'aot': pi_logratios = policy_chosen_logps - policy_rejected_logps ref_logratios = reference_chosen_logps - reference_rejected_logps (pi_logratios_sorted, _) = torch.sort(pi_logratios, dim=0) (ref_logratios_sorted, _) = torch.sort(ref_logratios, dim=0) delta = pi_logratios_sorted - ref_logratios_sorted losses = -F.logsigmoid(self.beta * delta) * (1 - self.label_smoothing) - F.logsigmoid(-self.beta * delta) * self.label_smoothing elif self.loss_type == 'apo_zero': losses_chosen = 1 - F.sigmoid(self.beta * chosen_logratios) losses_rejected = F.sigmoid(self.beta * rejected_logratios) losses = losses_chosen + losses_rejected elif self.loss_type == 'apo_down': losses_chosen = F.sigmoid(self.beta * chosen_logratios) losses_rejected = 1 - F.sigmoid(self.beta * (chosen_logratios - rejected_logratios)) losses = losses_chosen + losses_rejected else: raise ValueError(f"Unknown loss type: {self.loss_type}. Should be one of ['sigmoid', 'hinge', 'ipo', 'exo_pair', 'nca_pair', 'robust', 'bco_pair', 'sppo_hard', 'aot', 'aot_pair', 'apo_zero', 'apo_down']") chosen_rewards = self.beta * (policy_chosen_logps.to(self.accelerator.device) - reference_chosen_logps.to(self.accelerator.device)).detach() rejected_rewards = self.beta * (policy_rejected_logps.to(self.accelerator.device) - reference_rejected_logps.to(self.accelerator.device)).detach() return (losses, chosen_rewards, rejected_rewards) @staticmethod def get_batch_logps(logits: torch.FloatTensor, labels: torch.LongTensor, label_pad_token_id: int=-100, is_encoder_decoder: bool=False) -> Tuple[torch.FloatTensor, torch.LongTensor]: if logits.shape[:-1] != labels.shape: raise ValueError(f'Logits (batch and sequence length dim) {logits.shape[:-1]} and labels must have the same shape {labels.shape}.') if not is_encoder_decoder: labels = labels[:, 1:].clone() logits = logits[:, :-1, :] loss_mask = labels != label_pad_token_id labels[labels == label_pad_token_id] = 0 per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2) return ((per_token_logps * loss_mask).sum(-1), loss_mask.sum(-1)) def concatenated_forward(self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: concatenated_batch = self.concatenated_inputs(batch, is_encoder_decoder=self.is_encoder_decoder, is_vision_model=self.is_vision_model, label_pad_token_id=self.label_pad_token_id, padding_value=self.padding_value, device=self.accelerator.device) len_chosen = batch['chosen_labels'].shape[0] model_kwargs = {} if self.is_encoder_decoder: model_kwargs['labels'] = concatenated_batch['concatenated_labels'] model_kwargs['decoder_input_ids'] = concatenated_batch.get('concatenated_decoder_input_ids') if self.is_vision_model: model_kwargs['pixel_values'] = concatenated_batch['pixel_values'] if 'pixel_attention_mask' in concatenated_batch: model_kwargs['pixel_attention_mask'] = concatenated_batch['pixel_attention_mask'] if self.aux_loss_enabled: model_kwargs['output_router_logits'] = True outputs = model(concatenated_batch['concatenated_input_ids'], attention_mask=concatenated_batch['concatenated_attention_mask'], use_cache=False, **model_kwargs) all_logits = outputs.logits if all_logits.shape[:2] != concatenated_batch['concatenated_labels'].shape[:2]: seq_len = concatenated_batch['concatenated_labels'].shape[1] all_logits = all_logits[:, -seq_len:] (all_logps, size_completion) = self.get_batch_logps(all_logits, concatenated_batch['concatenated_labels'], is_encoder_decoder=self.is_encoder_decoder, label_pad_token_id=self.label_pad_token_id) def cross_entropy_loss(logits, labels): if not self.is_encoder_decoder: logits = logits[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() loss_fct = nn.CrossEntropyLoss(ignore_index=self.label_pad_token_id) logits = logits.view(-1, logits.shape[-1]) labels = labels.view(-1) labels = labels.to(logits.device) loss = loss_fct(logits, labels) return loss labels = concatenated_batch['concatenated_labels'].clone() nll_loss = cross_entropy_loss(all_logits[:len_chosen], labels[:len_chosen]) if self.loss_type == 'ipo': all_logps = all_logps / size_completion chosen_logps = all_logps[:len_chosen] rejected_logps = all_logps[len_chosen:] chosen_logits = all_logits[:len_chosen] rejected_logits = all_logits[len_chosen:] if self.aux_loss_enabled: return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, nll_loss, outputs.aux_loss) return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, nll_loss) def get_batch_loss_metrics(self, model, batch: Dict[str, Union[List, torch.LongTensor]], train_eval: Literal['train', 'eval']='train'): metrics = {} forward_output = self.concatenated_forward(model, batch) (policy_chosen_logps, policy_rejected_logps, policy_chosen_logits, policy_rejected_logits, policy_nll_loss) = forward_output[:5] if self.aux_loss_enabled: aux_loss = forward_output[5] if 'reference_chosen_logps' in batch and 'reference_rejected_logps' in batch and (self.precompute_ref_log_probs or self.args.rpo_alpha is not None): reference_chosen_logps = batch['reference_chosen_logps'] reference_rejected_logps = batch['reference_rejected_logps'] else: with torch.no_grad(): if self.ref_model is None: with self.null_ref_context(): (reference_chosen_logps, reference_rejected_logps) = self.concatenated_forward(self.model, batch)[:2] else: (reference_chosen_logps, reference_rejected_logps) = self.concatenated_forward(self.ref_model, batch)[:2] (losses, chosen_rewards, rejected_rewards) = self.dpo_loss(policy_chosen_logps, policy_rejected_logps, reference_chosen_logps, reference_rejected_logps) reward_accuracies = (chosen_rewards > rejected_rewards).float() if self.args.rpo_alpha is not None: losses = losses + policy_nll_loss * self.args.rpo_alpha prefix = 'eval_' if train_eval == 'eval' else '' metrics[f'{prefix}rewards/chosen'] = chosen_rewards.mean().cpu() metrics[f'{prefix}rewards/rejected'] = rejected_rewards.mean().cpu() metrics[f'{prefix}rewards/accuracies'] = reward_accuracies.mean().cpu() metrics[f'{prefix}rewards/margins'] = (chosen_rewards - rejected_rewards).mean().cpu() metrics[f'{prefix}logps/rejected'] = policy_rejected_logps.detach().mean().cpu() metrics[f'{prefix}logps/chosen'] = policy_chosen_logps.detach().mean().cpu() metrics[f'{prefix}logits/rejected'] = policy_rejected_logits.detach().mean().cpu() metrics[f'{prefix}logits/chosen'] = policy_chosen_logits.detach().mean().cpu() if self.args.rpo_alpha is not None: metrics[f'{prefix}nll_loss'] = policy_nll_loss.detach().mean().cpu() if self.aux_loss_enabled: return (losses.mean() + getattr(model.config, 'router_aux_loss_coef', 0.0) * aux_loss, metrics) return (losses.mean(), metrics) def compute_loss(self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], return_outputs=False) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]: if not self.use_dpo_data_collator: warnings.warn('compute_loss is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator') compute_loss_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with compute_loss_context_manager: (loss, metrics) = self.get_batch_loss_metrics(model, inputs, train_eval='train') loss = loss.to(self.args.device) self.store_metrics(metrics, train_eval='train') if return_outputs: return (loss, metrics) return loss def get_batch_samples(self, model, batch: Dict[str, torch.LongTensor]) -> Tuple[str, str]: generate_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with generate_context_manager: policy_output = model.generate(input_ids=batch['prompt_input_ids'], attention_mask=batch['prompt_attention_mask'], max_length=self.max_length, do_sample=True, pad_token_id=self.tokenizer.pad_token_id) if 'reference_output' in batch: reference_output = batch['reference_output'] elif self.ref_model is None: with self.null_ref_context(): reference_output = self.model.generate(input_ids=batch['prompt_input_ids'], attention_mask=batch['prompt_attention_mask'], max_length=self.max_length, do_sample=True, pad_token_id=self.tokenizer.pad_token_id) else: reference_output = self.ref_model.generate(input_ids=batch['prompt_input_ids'], attention_mask=batch['prompt_attention_mask'], max_length=self.max_length, do_sample=True, pad_token_id=self.tokenizer.pad_token_id) policy_output = pad_to_length(policy_output, self.max_length, self.tokenizer.pad_token_id) policy_output_decoded = self.tokenizer.batch_decode(policy_output, skip_special_tokens=True) reference_output = pad_to_length(reference_output, self.max_length, self.tokenizer.pad_token_id) reference_output_decoded = self.tokenizer.batch_decode(reference_output, skip_special_tokens=True) return (policy_output_decoded, reference_output_decoded) def prediction_step(self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]]=None): if not self.use_dpo_data_collator: warnings.warn('prediction_step is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator') if ignore_keys is None: if hasattr(model, 'config'): ignore_keys = getattr(model.config, 'keys_to_ignore_at_inference', []) else: ignore_keys = [] prediction_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with torch.no_grad(), prediction_context_manager: (loss, metrics) = self.get_batch_loss_metrics(model, inputs, train_eval='eval') self.store_metrics(metrics, train_eval='eval') if prediction_loss_only: return (loss.detach(), None, None) logits_dict = {'eval_logits/chosen': metrics['eval_logits/chosen'], 'eval_logits/rejected': metrics['eval_logits/rejected']} logits = tuple((v.unsqueeze(dim=0) for (k, v) in logits_dict.items() if k not in ignore_keys)) logits = torch.stack(logits).mean(axis=1).to(self.accelerator.device) labels = torch.zeros(logits.shape[0], device=self.accelerator.device) return (loss.detach(), logits, labels) def store_metrics(self, metrics: Dict[str, float], train_eval: Literal['train', 'eval']='train') -> None: for (key, value) in metrics.items(): self._stored_metrics[train_eval][key].append(value) def evaluation_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval') -> EvalLoopOutput: if self.generate_during_eval: num_samples = len(dataloader.dataset) random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size) random_batch_dataset = dataloader.dataset.select(random_indices) random_batch = self.data_collator(random_batch_dataset) random_batch = self._prepare_inputs(random_batch) (policy_output_decoded, ref_output_decoded) = self.get_batch_samples(self.model, random_batch) self.log({'game_log': wandb.Table(columns=['Prompt', 'Policy', 'Ref Model'], rows=[[prompt, pol[len(prompt):], ref[len(prompt):]] for (prompt, pol, ref) in zip(random_batch['prompt'], policy_output_decoded, ref_output_decoded)])}) self.state.log_history.pop() initial_output = super().evaluation_loop(dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix) return initial_output def log(self, logs: Dict[str, float]) -> None: train_eval = 'train' if 'loss' in logs else 'eval' for (key, metrics) in self._stored_metrics[train_eval].items(): logs[key] = torch.tensor(metrics).mean().item() del self._stored_metrics[train_eval] return super().log(logs) @wraps(Trainer.push_to_hub) def push_to_hub(self, commit_message: Optional[str]='End of training', blocking: bool=True, **kwargs) -> str: kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs) # File: trl-main/trl/trainer/gkd_config.py from dataclasses import dataclass from typing import Any, Dict, Optional from .sft_config import SFTConfig @dataclass class GKDConfig(SFTConfig): temperature: float = 0.9 lmbda: float = 0.5 beta: float = 0.5 max_new_tokens: int = 128 teacher_model_name_or_path: Optional[str] = None teacher_model_init_kwargs: Optional[Dict[str, Any]] = None disable_dropout: bool = True def __post_init__(self): super().__post_init__() if self.lmbda < 0.0 or self.lmbda > 1.0: raise ValueError('lmbda must be in the range [0.0, 1.0].') if self.beta < 0.0 or self.beta > 1.0: raise ValueError('beta must be in the range [0.0, 1.0].') # File: trl-main/trl/trainer/gkd_trainer.py import random import warnings from copy import deepcopy from typing import Any, Dict, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from accelerate.utils import is_deepspeed_available from transformers import AutoModelForCausalLM, GenerationConfig, PreTrainedModel from ..import_utils import is_liger_available from ..models import PreTrainedModelWrapper from ..models.utils import unwrap_model_for_generation from .gkd_config import GKDConfig from .sft_trainer import SFTTrainer from .utils import DataCollatorForChatML, disable_dropout_in_model, empty_cache if is_deepspeed_available(): import deepspeed if is_liger_available(): from liger_kernel.transformers import AutoLigerKernelForCausalLM class GKDTrainer(SFTTrainer): _tag_names = ['trl', 'gkd'] def __init__(self, teacher_model: Union[PreTrainedModel, nn.Module, str], args: Optional[GKDConfig]=None, *sft_args, **kwargs): args.remove_unused_columns = False kwargs['data_collator'] = DataCollatorForChatML(tokenizer=kwargs['tokenizer'], max_length=args.max_seq_length) super().__init__(*sft_args, args=args, **kwargs) if args.teacher_model_init_kwargs is None: teacher_model_init_kwargs = {} elif not isinstance(teacher_model, str): raise ValueError('You passed teacher_model_init_kwargs to the GKDConfig, but your teacher_model is already instantiated.') else: teacher_model_init_kwargs = args.teacher_model_init_kwargs teacher_model_init_kwargs['torch_dtype'] = teacher_model_init_kwargs['torch_dtype'] if teacher_model_init_kwargs['torch_dtype'] in ['auto', None] else getattr(torch, teacher_model_init_kwargs['torch_dtype']) if isinstance(teacher_model, str): warnings.warn('You passed a teacher model_id to the GKDTrainer. This will automatically create an `AutoModelForCausalLM`') if args.use_liger: teacher_model = AutoLigerKernelForCausalLM.from_pretrained(teacher_model, **teacher_model_init_kwargs) else: teacher_model = AutoModelForCausalLM.from_pretrained(teacher_model, **teacher_model_init_kwargs) if args.disable_dropout: disable_dropout_in_model(self.model) if self.is_deepspeed_enabled: self.teacher_model = self._prepare_deepspeed(teacher_model) else: self.teacher_model = self.accelerator.prepare_model(teacher_model, evaluation_mode=True) self.lmbda = args.lmbda self.beta = args.beta self.temperature = args.temperature self.generation_config = GenerationConfig(max_new_tokens=args.max_new_tokens, temperature=args.temperature, do_sample=True, top_k=0, use_cache=False if args.gradient_checkpointing else True) @staticmethod def generalized_jsd_loss(student_logits, teacher_logits, labels=None, beta=0.5, temperature=1.0, reduction='batchmean'): student_logits = student_logits / temperature teacher_logits = teacher_logits / temperature student_log_probs = F.log_softmax(student_logits, dim=-1) teacher_log_probs = F.log_softmax(teacher_logits, dim=-1) interpolated_log_probs = beta * student_log_probs + (1 - beta) * teacher_log_probs kl_teacher = F.kl_div(interpolated_log_probs, teacher_log_probs, reduction='none', log_target=True) kl_student = F.kl_div(interpolated_log_probs, student_log_probs, reduction='none', log_target=True) jsd = beta * kl_teacher + (1 - beta) * kl_student if labels is not None: mask = labels != -100 jsd = jsd[mask] if reduction == 'batchmean': return jsd.sum() / mask.sum() if labels is not None else jsd.sum() / (jsd.size(0) * jsd.size(1)) elif reduction == 'sum': return jsd.sum() elif reduction == 'mean': return jsd.mean() else: return jsd def compute_loss(self, model, inputs, return_outputs=False): outputs_student = model(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask']) self.teacher_model.eval() with torch.no_grad(): outputs_teacher = self.teacher_model(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask']) prompt_lengths = inputs['prompts'].shape[1] shifted_student_logits = outputs_student.logits[:, prompt_lengths - 1:-1, :] shifted_teacher_logits = outputs_teacher.logits[:, prompt_lengths - 1:-1, :] shifted_labels = inputs['labels'][:, prompt_lengths:] loss = self.generalized_jsd_loss(student_logits=shifted_student_logits, teacher_logits=shifted_teacher_logits, labels=shifted_labels, beta=self.beta) empty_cache() return (loss, outputs_student) if return_outputs else loss @staticmethod def generate_on_policy_outputs(model, inputs, generation_config, pad_token_id=None): generated_outputs = model.generate(input_ids=inputs['prompts'], attention_mask=inputs.get('prompt_attention_mask', None), generation_config=generation_config, return_dict_in_generate=True) generated_tokens = generated_outputs.sequences new_attention_mask = torch.ones_like(generated_tokens) new_labels = generated_tokens.clone() if pad_token_id is not None: new_labels[new_labels == pad_token_id] = -100 new_attention_mask[generated_tokens == pad_token_id] = 0 return (generated_tokens, new_attention_mask, new_labels) def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: if random.random() <= self.lmbda: with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model: (new_input_ids, new_attention_mask, new_labels) = self.generate_on_policy_outputs(unwrapped_model, inputs, self.generation_config, self.tokenizer.pad_token_id) inputs['input_ids'] = new_input_ids inputs['attention_mask'] = new_attention_mask inputs['labels'] = new_labels loss = super().training_step(model, inputs) return loss def _prepare_deepspeed(self, model: PreTrainedModelWrapper): deepspeed_plugin = self.accelerator.state.deepspeed_plugin config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config) if model is not None: if hasattr(model, 'config'): hidden_size = max(model.config.hidden_sizes) if getattr(model.config, 'hidden_sizes', None) else getattr(model.config, 'hidden_size', None) if hidden_size is not None and config_kwargs['zero_optimization']['stage'] == 3: config_kwargs.update({'zero_optimization.reduce_bucket_size': hidden_size * hidden_size, 'zero_optimization.stage3_param_persistence_threshold': 10 * hidden_size, 'zero_optimization.stage3_prefetch_bucket_size': 0.9 * hidden_size * hidden_size}) if config_kwargs['zero_optimization']['stage'] != 3: config_kwargs['zero_optimization']['stage'] = 0 (model, *_) = deepspeed.initialize(model=model, config=config_kwargs) model.eval() return model # File: trl-main/trl/trainer/iterative_sft_trainer.py import warnings from functools import wraps from typing import Callable, Dict, List, Optional, Tuple, Union import torch from datasets import Dataset from torch.utils.data import DataLoader from transformers import DataCollator, DataCollatorForLanguageModeling, DataCollatorForSeq2Seq, PreTrainedModel, PreTrainedTokenizerBase, Trainer, TrainingArguments from transformers.trainer_utils import EvalLoopOutput from ..core import PPODecorators from ..import_utils import is_peft_available from .utils import trl_sanitze_kwargs_for_tagging if is_peft_available(): from peft import PeftModel class IterativeSFTTrainer(Trainer): _tag_names = ['trl', 'iterative-sft'] def __init__(self, model: Optional[PreTrainedModel]=None, args: Optional[TrainingArguments]=None, tokenizer: Optional[PreTrainedTokenizerBase]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), data_collator: Optional[DataCollator]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, max_length: Optional[int]=None, truncation_mode: Optional[str]='keep_end', preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None, compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]]=None, optimize_device_cache: Optional[bool]=False): if not isinstance(tokenizer, PreTrainedTokenizerBase): raise ValueError(f'tokenizer must be a PreTrainedTokenizerBase like a PreTrainedTokenizer or a PreTrainedTokenizerFast, got {type(tokenizer)}') if not isinstance(model, PreTrainedModel): raise ValueError(f'model must be a PreTrainedModel, got {type(model)}') if not model.can_generate(): warnings.warn(f'The current model class {type(model)} is not compatible with `.generate()`Please make sure that this is intended.') if optimizers[1] is None and args.max_steps == -1: raise ValueError('When no scheduler is provided, you need to set the total number of training steps to perform `max_steps`') self.is_encoder_decoder = getattr(model.config, 'is_encoder_decoder', False) self.is_peft_model = is_peft_available() and isinstance(model, PeftModel) self.tokenizer = tokenizer if data_collator is None: if self.is_encoder_decoder: warnings.warn("No data collator is provided. Using 'DataCollatorForSeq2Seq' with'labels_pad_token_id' set to '-100' and 'pad_to_multiple_of' set to 8.") self.data_collator = DataCollatorForSeq2Seq(tokenizer, label_pad_token_id=-100, pad_to_multiple_of=8) else: warnings.warn("No data collator is provided. Using 'DataCollatorForLanguageModeling'") self.data_collator = DataCollatorForLanguageModeling(self.tokenizer, mlm=False) else: self.data_collator = data_collator self.max_length = max_length self.truncation_mode = truncation_mode self.optimize_device_cache = optimize_device_cache super().__init__(model=model, args=args, data_collator=self.data_collator, eval_dataset=eval_dataset, tokenizer=tokenizer, compute_metrics=compute_metrics, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics) self.create_optimizer_and_scheduler(self.args.max_steps) (self.model, self.optimizer, self.lr_scheduler) = self.accelerator.prepare(self.model, self.optimizer, self.lr_scheduler) self.tokenizer.truncation_side = 'left' if self.truncation_mode == 'keep_end' else 'right' if not hasattr(self, 'accelerator'): raise AttributeError('Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`.') PPODecorators.optimize_device_cache = self.optimize_device_cache def prepare_model_inputs(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, labels: torch.Tensor): if attention_mask is None: attention_mask = [torch.ones_like(ids) for ids in input_ids] if self.is_encoder_decoder: input_data = self.data_collator([{'input_ids': ids, 'attention_mask': att, 'labels': lab} for (ids, att, lab) in zip(input_ids, attention_mask, labels)]).to(self.model.device) input_data.pop('decoder_input_ids', None) input_data['labels'][input_data['labels'] == self.tokenizer.pad_token_id] = -100 else: input_data = self.data_collator([{'input_ids': ids, 'attention_mask': att} for (ids, att) in zip(input_ids, attention_mask)]).to(self.model.device) if self.max_length is not None: if self.truncation_mode == 'keep_start': input_data = {k: v[:self.max_length] for (k, v) in input_data.items()} elif self.truncation_mode == 'keep_end': input_data = {k: v[-self.max_length:] for (k, v) in input_data.items()} else: raise ValueError(f'Unknown truncation mode: {self.truncation_mode}') return input_data @staticmethod def _step_safety_checker(input_ids: List[torch.LongTensor], attention_mask: List[torch.LongTensor], labels: List[torch.LongTensor], texts: List[str], texts_labels: List[str]): if texts is None: if attention_mask is None: for (name, tensor_list) in zip(['input_ids', 'labels'], [input_ids, labels]): if not isinstance(tensor_list, list): raise ValueError(f'{name} must be a list of tensors - got {type(tensor_list)}') if not isinstance(tensor_list[0], torch.Tensor): raise ValueError(f'Elements in {name} must be tensors - got {type(tensor_list[0])}') else: for (name, tensor_list) in zip(['input_ids', 'attention_mask', 'labels'], [input_ids, attention_mask, labels]): if not isinstance(tensor_list, list): raise ValueError(f'{name} must be a list of tensors - got {type(tensor_list)}') if not isinstance(tensor_list[0], torch.Tensor): raise ValueError(f'Elements in {name} must be tensors - got {type(tensor_list[0])}') else: if not isinstance(texts, list): raise ValueError(f"'text' must be a list of strings - got {type(texts)}") if not isinstance(texts[0], str): raise ValueError(f"Elements in 'text' must be strings - got {type(texts[0])}") if texts_labels is not None: if not isinstance(texts_labels, list): raise ValueError(f"'text_labels' must be a list of strings - got {type(texts_labels)}") if not isinstance(texts_labels[0], str): raise ValueError(f"Elements in 'text_labels' must be strings - got {type(texts_labels[0])}") return (input_ids, attention_mask, labels, texts, texts_labels) @PPODecorators.empty_device_cache() def step(self, input_ids: Optional[List[torch.LongTensor]]=None, attention_mask: Optional[List[torch.LongTensor]]=None, labels: Optional[List[torch.LongTensor]]=None, texts: Optional[List[str]]=None, texts_labels: Optional[List[str]]=None): self.model.train() if self.state.global_step == 0: self.tr_loss = torch.tensor(0.0).to(self.args.device) self._globalstep_last_logged = self.state.global_step if input_ids is None and texts is None: raise ValueError('Step should include `input_ids` or `texts` as keyword arguments.') elif input_ids is not None and texts is not None: warnings.warn("Both 'input_ids' and 'texts' are provided. 'input_ids' will be overwritten using inputs provided by the 'texts' keyword argument.") if labels is None and texts_labels is None and self.is_encoder_decoder: raise ValueError("No 'labels' or 'text_labels' are provided. When using an encoder-decoder architecture, 'labels' or 'text_labels' must be passed.") (input_ids, attention_mask, labels, texts, texts_labels) = self._step_safety_checker(input_ids, attention_mask, labels, texts, texts_labels) if texts is not None: model_inputs = self.tokenizer(texts, max_length=self.max_length, truncation=True, padding=True, return_tensors='pt') (input_ids, attention_mask) = (model_inputs['input_ids'], model_inputs['attention_mask']) if texts_labels is not None: labels = self.tokenizer(texts, max_length=self.max_length, truncation=True, padding=True, return_tensors='pt')['input_ids'] if labels is None: warnings.warn('No labels are provided. Setting labels to input_ids') labels = input_ids model_inputs = self.prepare_model_inputs(input_ids, attention_mask, labels) model_inputs_names = list(model_inputs.keys()) batch_dict = {} batch_dict.update(model_inputs) def collator(data): return_dict = dict() for key in data[0]: if key in ['input_ids', 'attention_mask', 'labels']: return_dict[key] = torch.stack([d[key] for d in data]).to(self.model.device) return return_dict batch_data = Dataset.from_dict(batch_dict) batch_data.set_format('torch') step_dataloader = DataLoader(batch_data, batch_size=self.args.per_device_train_batch_size, shuffle=True, collate_fn=collator) for (_, batch) in enumerate(step_dataloader): with self.accelerator.accumulate(self.model): model_inputs = {k: batch[k] for k in model_inputs_names} loss = self.compute_loss(self.model, model_inputs) if self.args.n_gpu > 1: loss = loss.mean() tr_loss_step = loss.detach() self.accelerator.backward(loss) if self.accelerator.sync_gradients and self.args.max_grad_norm is not None: self.accelerator.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm) self.optimizer.step() self.optimizer.zero_grad() if self.lr_scheduler is not None: self.lr_scheduler.step() self.state.global_step += 1 self.tr_loss += tr_loss_step self._maybe_log_save_evaluate() def _maybe_log_save_evaluate(self): if self.args.eval_steps is not None: if self.state.global_step % self.args.eval_steps == 0 and self.state.global_step != 0: self.evaluate(self.eval_dataset) if self.args.logging_steps is not None: if self.state.global_step % self.args.logging_steps == 0 and self.state.global_step != 0: logs: Dict[str, float] = {} tr_loss_scalar = self._nested_gather(self.tr_loss).mean().item() self.tr_loss -= self.tr_loss logs['loss'] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) logs['learning_rate'] = self._get_learning_rate() self._globalstep_last_logged = self.state.global_step self.log(logs) @wraps(Trainer.push_to_hub) def push_to_hub(self, commit_message: Optional[str]='End of training', blocking: bool=True, **kwargs) -> str: kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs) # File: trl-main/trl/trainer/judges.py import concurrent.futures import logging import random from abc import ABC, abstractmethod from typing import List, Optional, Union import numpy as np from accelerate import Accelerator from huggingface_hub import InferenceClient from ..import_utils import is_llmblender_available, is_openai_available if is_llmblender_available(): import llm_blender if is_openai_available(): from openai import OpenAI DEFAULT_PAIRWISE_SYSTEM_PROMPT = 'I require a leaderboard for various large language models. I\'ll provide you with prompts given to these models and their corresponding outputs. Your task is to assess these responses, and select the model that produces the best output from a human perspective.\n\n## Instruction\n\n{{\n "instruction": """{prompt}""",\n}}\n\n## Model Outputs\n\nHere are the unordered outputs from the models. Each output is associated with a specific model, identified by a unique model identifier.\n\n{{\n {{\n "model_identifier": "0",\n "output": """{response0}"""\n }},\n {{\n "model_identifier": "1",\n "output": """{response1}"""\n }}\n}}\n\n## Task\n\nEvaluate the models on the basis of the quality and relevance of their results, and select the model that generated the best result. Reply with the identifier of the best model. Our evaluation will only take into account the first character of your answer, so make sure it contains only one of the identifiers and nothing else (no quotation marks, no spaces, no new lines, ...).\n' class BaseJudge(ABC): @abstractmethod def judge(self, prompts: List[str], completions: List[str], shuffle_order: bool=True) -> List: raise NotImplementedError('Judge subclasses must implement the `judge` method.') class BaseRankJudge(ABC): @abstractmethod def judge(self, prompts: List[str], completions: List[List[str]], shuffle_order: bool=True) -> List[List[int]]: raise NotImplementedError('Judge subclasses must implement the `judge` method.') class BasePairwiseJudge(BaseJudge): @abstractmethod def judge(self, prompts: List[str], completions: List[List[str]], shuffle_order: bool=True) -> List[int]: raise NotImplementedError('Judge subclasses must implement the `judge` method.') class RandomRankJudge(BaseRankJudge): def judge(self, prompts, completions, shuffle_order=True): num_completions = [len(completions[i]) for i in range(len(prompts))] return [random.sample(range(n), n) for n in num_completions] class RandomPairwiseJudge(BasePairwiseJudge): def judge(self, prompts, completions, shuffle_order=True): return [random.randint(0, len(completion) - 1) for completion in completions] class PairRMJudge(BasePairwiseJudge): def __init__(self): if not is_llmblender_available(): raise ValueError("llm-blender is not installed. Please install it with 'pip install llm-blender'.") self.blender = llm_blender.Blender() self.blender.loadranker('llm-blender/PairRM', device=Accelerator().device) def judge(self, prompts: List[str], completions: List[List[str]], shuffle_order: bool=True) -> List[int]: if shuffle_order: flip_mask = np.random.choice([True, False], size=len(prompts)) completions = [pair[::-1] if flip else pair for (flip, pair) in zip(flip_mask, completions)] ranks = self.blender.rank(prompts, completions) ranks -= 1 if shuffle_order: ranks[flip_mask] = ranks[flip_mask][:, ::-1] return ranks[:, 0].tolist() class HfPairwiseJudge(BasePairwiseJudge): def __init__(self, model='meta-llama/Meta-Llama-3-70B-Instruct', token: Optional[str]=None, system_prompt: Optional[str]=None): self.client = InferenceClient(model=model, token=token) self.system_prompt = system_prompt or DEFAULT_PAIRWISE_SYSTEM_PROMPT def judge(self, prompts: List[str], completions: List[List[str]], shuffle_order: bool=True) -> List[int]: if shuffle_order: flip_mask = np.random.choice([True, False], size=len(prompts)) completions = [pair[::-1] if flip else pair for (flip, pair) in zip(flip_mask, completions)] def get_rank(prompt, candidates): content = self.system_prompt.format(prompt=prompt, response0=candidates[0], response1=candidates[1]) completion = self.client.chat_completion(messages=[{'role': 'user', 'content': content}], max_tokens=1) response = completion.choices[0].message.content if response in ['0', '1']: return int(response) else: logging.debug(f"Invalid response from the judge model: '{response}'. Returning -1.") return -1 with concurrent.futures.ThreadPoolExecutor() as executor: ranks = list(executor.map(get_rank, prompts, completions)) if shuffle_order: ranks = [ranks[i] if not flip else 1 - ranks[i] for (i, flip) in enumerate(flip_mask)] return ranks class OpenAIPairwiseJudge(BasePairwiseJudge): def __init__(self, model='gpt-4-turbo-preview', system_prompt: Optional[str]=None, max_requests: Union[int, None]=1000): if not is_openai_available(): raise ValueError("OpenAI client is not installed. Please install it with 'pip install openai'.") self.client = OpenAI() self.model = model self.system_prompt = system_prompt or DEFAULT_PAIRWISE_SYSTEM_PROMPT self.max_requests = max_requests self.num_requests = 0 self._warned = False def judge(self, prompts: List[str], completions: List[List[str]], shuffle_order: bool=True) -> List[int]: if self.max_requests is not None and self.num_requests >= self.max_requests: if not self._warned: logging.warning(f'Reached the maximum number of requests ({self.max_requests}). From now on, returning -1 instead. To increase the limit, set `max_requests` to a higher value, or to `None` for no limit.') self._warned = True return [-1] * len(prompts) if shuffle_order: flip_mask = np.random.choice([True, False], size=len(prompts)) completions = [pair[::-1] if flip else pair for (flip, pair) in zip(flip_mask, completions)] def get_rank(prompt, candidates): content = self.system_prompt.format(prompt=prompt, response0=candidates[0], response1=candidates[1]) messages = [{'role': 'user', 'content': content}] completion = self.client.chat.completions.create(model=self.model, messages=messages, max_tokens=1) response = completion.choices[0].message.content if response in ['0', '1']: return int(response) else: logging.debug(f"Invalid response from the judge model: '{response}'. Returning -1.") return -1 with concurrent.futures.ThreadPoolExecutor() as executor: ranks = list(executor.map(get_rank, prompts, completions)) if shuffle_order: ranks = [ranks[i] if not flip else 1 - ranks[i] for (i, flip) in enumerate(flip_mask)] self.num_requests += len(prompts) return ranks # File: trl-main/trl/trainer/kto_config.py from dataclasses import dataclass from typing import Any, Dict, Literal, Optional from transformers import TrainingArguments @dataclass class KTOConfig(TrainingArguments): max_length: Optional[int] = None max_prompt_length: Optional[int] = None max_completion_length: Optional[int] = None beta: float = 0.1 loss_type: Literal['kto', 'apo_zero_unpaired'] = 'kto' desirable_weight: float = 1.0 undesirable_weight: float = 1.0 label_pad_token_id: int = -100 padding_value: Optional[int] = None truncation_mode: str = 'keep_end' generate_during_eval: bool = False is_encoder_decoder: Optional[bool] = None precompute_ref_log_probs: bool = False model_init_kwargs: Optional[Dict[str, Any]] = None ref_model_init_kwargs: Optional[Dict[str, Any]] = None dataset_num_proc: Optional[int] = None # File: trl-main/trl/trainer/kto_trainer.py import inspect import random import warnings from collections import defaultdict from contextlib import contextmanager, nullcontext from copy import deepcopy from functools import wraps from operator import itemgetter from typing import TYPE_CHECKING, Any, Callable, Dict, List, Literal, Optional, Tuple, Union import numpy as np import torch import torch.amp as amp import torch.nn as nn import torch.nn.functional as F from accelerate import PartialState from accelerate.utils import is_deepspeed_available, tqdm from datasets import Dataset, concatenate_datasets from torch.utils.data import DataLoader, SequentialSampler from transformers import AutoModelForCausalLM, DataCollator, PreTrainedModel, PreTrainedTokenizerBase, Trainer, TrainingArguments from transformers.trainer_callback import TrainerCallback from transformers.trainer_utils import EvalLoopOutput, has_length from ..import_utils import is_peft_available, is_wandb_available from ..models import PreTrainedModelWrapper, create_reference_model from .kto_config import KTOConfig from .utils import DPODataCollatorWithPadding, disable_dropout_in_model, pad_to_length, peft_module_casting_to_bf16, trl_sanitze_kwargs_for_tagging if is_peft_available(): from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training if is_wandb_available(): import wandb if is_deepspeed_available(): import deepspeed if TYPE_CHECKING: from transformers import PreTrainedModel, PreTrainedTokenizer RUNNING_NAME = 'running.pt' def _get_kl_dataset(batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]: batch['answer_input_ids'] = [batch['answer_input_ids'][-1]] + batch['answer_input_ids'][:-1] batch['answer_attention_mask'] = [batch['answer_attention_mask'][-1]] + batch['answer_attention_mask'][:-1] return batch def _tokenize(batch: Dict[str, List[Any]], tokenizer: 'PreTrainedTokenizer') -> Dict[str, List[Any]]: prompt_tokenized = tokenizer(batch['prompt'], add_special_tokens=False) prompt_input_ids = prompt_tokenized['input_ids'] prompt_attention_mask = prompt_tokenized['attention_mask'] prompt_and_completion = [prompt + completion for (prompt, completion) in zip(batch['prompt'], batch['completion'])] full_tokenized = tokenizer(prompt_and_completion, add_special_tokens=False) full_input_ids = full_tokenized['input_ids'] full_attention_mask = full_tokenized['attention_mask'] answer_input_ids = [f[len(p):] for (f, p) in zip(full_input_ids, prompt_input_ids)] answer_attention_mask = [f[len(p):] for (f, p) in zip(full_attention_mask, prompt_attention_mask)] full_concat_input_ids = [np.concatenate([p, a]) for (p, a) in zip(prompt_input_ids, answer_input_ids)] full_input_ids = [np.array(f) for f in full_input_ids] for (full, concat) in zip(full_input_ids, full_concat_input_ids): if len(full) != len(concat): raise ValueError('Prompt input ids and answer input ids should have the same length.') response_token_ids_start_idx = [len(p) for p in prompt_input_ids] for (idx, (p, f, r)) in enumerate(zip(prompt_input_ids, full_input_ids, response_token_ids_start_idx)): if not np.array_equal(p, f[:r]): response_token_ids_start_idx[idx] -= 1 prompt_input_ids = [f[:r] for (f, r) in zip(full_input_ids, response_token_ids_start_idx)] prompt_attention_mask = [f[:r] for (f, r) in zip(full_attention_mask, response_token_ids_start_idx)] for (p, m) in zip(prompt_input_ids, prompt_attention_mask): if len(p) != len(m): raise ValueError('Prompt input ids and attention mask should have the same length.') answer_input_ids = [f[r:] for (f, r) in zip(full_input_ids, response_token_ids_start_idx)] answer_attention_mask = [f[r:] for (f, r) in zip(full_attention_mask, response_token_ids_start_idx)] output = dict(prompt_input_ids=prompt_input_ids, prompt_attention_mask=prompt_attention_mask, answer_input_ids=answer_input_ids, answer_attention_mask=answer_attention_mask) return output def _process_tokens(example: Dict[str, Any], model: 'PreTrainedModel'=None, **kwargs) -> Dict: prompt = example['prompt'] completion = example['completion'] batch = {f"{kwargs['prefix']}prompt": prompt, f"{kwargs['prefix']}completion": completion, f"{kwargs['prefix']}label": example['label']} if not kwargs['is_encoder_decoder']: if not isinstance(prompt, str): raise ValueError(f'prompt should be an str but got {type(prompt)}') if not isinstance(completion, str): raise ValueError(f'completion should be an str but got {type(completion)}') all_tokens = {'prompt_input_ids': example['prompt_input_ids'], 'prompt_attention_mask': example['prompt_attention_mask'], 'answer_input_ids': example['answer_input_ids'], 'answer_attention_mask': example['answer_attention_mask']} max_length = kwargs['max_length'] bos_token_id = kwargs['tokenizer'].bos_token_id eos_token_id = kwargs['tokenizer'].eos_token_id if bos_token_id != all_tokens['prompt_input_ids'][0]: max_length -= 1 if eos_token_id != all_tokens['answer_input_ids'][-1]: max_length -= 1 if len(all_tokens['prompt_input_ids']) + len(all_tokens['answer_input_ids']) > max_length: for k in ['prompt_input_ids', 'prompt_attention_mask']: if kwargs['truncation_mode'] == 'keep_start': all_tokens[k] = all_tokens[k][:kwargs['max_prompt_length']] elif kwargs['truncation_mode'] == 'keep_end': all_tokens[k] = all_tokens[k][-kwargs['max_prompt_length']:] else: raise ValueError(f"Unknown truncation mode: {kwargs['truncation_mode']}") if len(all_tokens['prompt_input_ids']) + len(all_tokens['answer_input_ids']) > max_length: for k in ['answer_input_ids', 'answer_attention_mask']: all_tokens[k] = all_tokens[k][:max_length - kwargs['max_prompt_length']] batch[f"{kwargs['prefix']}prompt_input_ids"] = all_tokens['prompt_input_ids'] batch[f"{kwargs['prefix']}prompt_attention_mask"] = all_tokens['prompt_attention_mask'] batch[f"{kwargs['prefix']}completion_input_ids"] = all_tokens['prompt_input_ids'] + all_tokens['answer_input_ids'] batch[f"{kwargs['prefix']}completion_attention_mask"] = all_tokens['prompt_attention_mask'] + all_tokens['answer_attention_mask'] if len(all_tokens['prompt_input_ids']) == 0 or bos_token_id != all_tokens['prompt_input_ids'][0]: batch[f"{kwargs['prefix']}prompt_input_ids"] = [bos_token_id] + batch[f"{kwargs['prefix']}prompt_input_ids"] batch[f"{kwargs['prefix']}prompt_attention_mask"] = [1] + batch[f"{kwargs['prefix']}prompt_attention_mask"] batch[f"{kwargs['prefix']}completion_input_ids"] = [bos_token_id] + batch[f"{kwargs['prefix']}completion_input_ids"] batch[f"{kwargs['prefix']}completion_attention_mask"] = [1] + batch[f"{kwargs['prefix']}completion_attention_mask"] if len(all_tokens['answer_input_ids']) == 0 or eos_token_id != all_tokens['answer_input_ids'][-1]: batch[f"{kwargs['prefix']}completion_input_ids"] = batch[f"{kwargs['prefix']}completion_input_ids"] + [eos_token_id] batch[f"{kwargs['prefix']}completion_attention_mask"] = batch[f"{kwargs['prefix']}completion_attention_mask"] + [1] batch[f"{kwargs['prefix']}completion_labels"] = batch[f"{kwargs['prefix']}completion_input_ids"][:] batch[f"{kwargs['prefix']}completion_labels"][:len(batch[f"{kwargs['prefix']}prompt_input_ids"])] = [kwargs['label_pad_token_id']] * len(batch[f"{kwargs['prefix']}prompt_input_ids"]) else: completion_tokens = kwargs['tokenizer'](completion, truncation=True, max_length=kwargs['max_completion_length'], add_special_tokens=True) prompt_tokens = kwargs['tokenizer'](prompt, truncation=True, max_length=kwargs['max_prompt_length'], add_special_tokens=True) batch[f"{kwargs['prefix']}prompt_input_ids"] = prompt_tokens['input_ids'] batch[f"{kwargs['prefix']}prompt_attention_mask"] = prompt_tokens['attention_mask'] batch[f"{kwargs['prefix']}completion_labels"] = completion_tokens['input_ids'] batch[f"{kwargs['prefix']}completion_attention_mask"] = completion_tokens['attention_mask'] if model is not None and hasattr(model, 'prepare_decoder_input_ids_from_labels'): batch[f"{kwargs['prefix']}completion_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(labels=torch.tensor(batch['completion_labels'])) return batch class KTOTrainer(Trainer): _tag_names = ['trl', 'kto'] def __init__(self, model: Union[PreTrainedModel, nn.Module, str]=None, ref_model: Optional[Union[PreTrainedModel, nn.Module, str]]=None, args: KTOConfig=None, train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional[PreTrainedTokenizerBase]=None, data_collator: Optional[DataCollator]=None, model_init: Optional[Callable[[], PreTrainedModel]]=None, callbacks: Optional[List[TrainerCallback]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None, peft_config: Optional[Dict]=None, compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]]=None, model_adapter_name: Optional[str]=None, ref_adapter_name: Optional[str]=None): if type(args) is TrainingArguments: raise ValueError('Please use `KTOConfig` instead TrainingArguments.') if args.model_init_kwargs is None: model_init_kwargs = {} elif not isinstance(model, str): raise ValueError('You passed model_kwargs to the KTOTrainer. But your model is already instantiated.') else: model_init_kwargs = args.model_init_kwargs torch_dtype = model_init_kwargs.get('torch_dtype') if torch_dtype is not None: if isinstance(torch_dtype, str) and torch_dtype != 'auto': torch_dtype = getattr(torch, torch_dtype) if torch_dtype != 'auto' and (not isinstance(torch_dtype, torch.dtype)): raise ValueError(f"Invalid `torch_dtype` passed to the KTOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}.") model_init_kwargs['torch_dtype'] = torch_dtype if args.ref_model_init_kwargs is None: ref_model_init_kwargs = {} elif not isinstance(ref_model, str): raise ValueError('You passed ref_model_kwargs to the KTOTrainer. But your ref_model is already instantiated.') else: ref_model_init_kwargs = args.ref_model_init_kwargs torch_dtype = ref_model_init_kwargs.get('torch_dtype') if torch_dtype is not None: if isinstance(torch_dtype, str) and torch_dtype != 'auto': torch_dtype = getattr(torch, torch_dtype) if torch_dtype != 'auto' and (not isinstance(torch_dtype, torch.dtype)): raise ValueError(f"Invalid `torch_dtype` passed to the KTOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}.") ref_model_init_kwargs['torch_dtype'] = torch_dtype if isinstance(model, str): warnings.warn('You passed a model_id to the KTOTrainer. This will automatically create an `AutoModelForCausalLM` or a `PeftModel` (if you passed a `peft_config`) for you.') model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs) if isinstance(ref_model, str): warnings.warn('You passed a ref model_id to the KTOTrainer. This will automatically create an `AutoModelForCausalLM`') ref_model = AutoModelForCausalLM.from_pretrained(ref_model, **ref_model_init_kwargs) self._peft_has_been_casted_to_bf16 = False if not is_peft_available() and peft_config is not None: raise ValueError("PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it with `pip install peft` to use the PEFT models") elif is_peft_available() and peft_config is not None: if isinstance(model, PeftModel): model = model.merge_and_unload() if getattr(model, 'is_loaded_in_8bit', False) or getattr(model, 'is_loaded_in_4bit', False): _support_gc_kwargs = hasattr(args, 'gradient_checkpointing_kwargs') and 'gradient_checkpointing_kwargs' in list(inspect.signature(prepare_model_for_kbit_training).parameters) prepare_model_kwargs = {'use_gradient_checkpointing': args.gradient_checkpointing} if _support_gc_kwargs: prepare_model_kwargs['gradient_checkpointing_kwargs'] = args.gradient_checkpointing_kwargs model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) elif getattr(args, 'gradient_checkpointing', False): if hasattr(model, 'enable_input_require_grads'): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) model = get_peft_model(model, peft_config) if args.bf16 and getattr(model, 'is_loaded_in_4bit', False): peft_module_casting_to_bf16(model) self._peft_has_been_casted_to_bf16 = True elif getattr(args, 'gradient_checkpointing', False): if hasattr(model, 'enable_input_require_grads'): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) if args.generate_during_eval and (not is_wandb_available()): raise ValueError('`generate_during_eval=True` requires Weights and Biases to be installed. Please install with `pip install wandb` to resolve.') if model is not None: self.is_encoder_decoder = model.config.is_encoder_decoder elif args.is_encoder_decoder is None: raise ValueError('When no model is provided, you need to pass the parameter is_encoder_decoder.') else: self.is_encoder_decoder = args.is_encoder_decoder self.is_peft_model = is_peft_available() and isinstance(model, PeftModel) self.model_adapter_name = model_adapter_name self.ref_adapter_name = ref_adapter_name if ref_model: self.ref_model = ref_model elif self.is_peft_model or args.precompute_ref_log_probs: self.ref_model = None else: self.ref_model = create_reference_model(model) if tokenizer is None: raise ValueError('max_length or a tokenizer must be specified when using the default DPODataCollatorWithPadding') if args.max_length is None: warnings.warn("When using DPODataCollatorWithPadding, you should set `max_length` in the KTOTrainer's init it will be set to `512` by default, but you should do it yourself in the future.", UserWarning) max_length = 512 if args.max_length is not None: max_length = args.max_length if args.max_prompt_length is None: warnings.warn("When using DPODataCollatorWithPadding, you should set `max_prompt_length` in the KTOTrainer's init it will be set to `128` by default, but you should do it yourself in the future.", UserWarning) max_prompt_length = 128 if args.max_prompt_length is not None: max_prompt_length = args.max_prompt_length max_completion_length = None if args.max_completion_length is None and self.is_encoder_decoder: warnings.warn("When using DPODataCollatorWithPadding with an encoder decoder architecture, you should set `max_completion_length` in the KTOTrainer's init it will be set to `128` by default, but you should do it yourself in the future.", UserWarning) max_completion_length = 128 if args.max_completion_length is not None and self.is_encoder_decoder: max_completion_length = args.max_completion_length if data_collator is None: data_collator = DPODataCollatorWithPadding(pad_token_id=tokenizer.pad_token_id, label_pad_token_id=args.label_pad_token_id, is_encoder_decoder=self.is_encoder_decoder) if args.remove_unused_columns: args.remove_unused_columns = False warnings.warn('When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your KTOConfig we have set it for you, but you should do it yourself in the future.', UserWarning) self.use_dpo_data_collator = True else: self.use_dpo_data_collator = False disable_dropout_in_model(model) if self.ref_model is not None: disable_dropout_in_model(self.ref_model) self.loss_type = args.loss_type self.max_length = max_length self.generate_during_eval = args.generate_during_eval self.label_pad_token_id = args.label_pad_token_id self.padding_value = args.padding_value if args.padding_value is not None else tokenizer.pad_token_id self.max_prompt_length = max_prompt_length self.truncation_mode = args.truncation_mode self.max_completion_length = max_completion_length self.tokenizer = tokenizer self.precompute_ref_log_probs = args.precompute_ref_log_probs self.calculate_KL = True if self.loss_type in ['apo_zero_unpaired']: self.calculate_KL = False self._precomputed_train_ref_log_probs = False self._precomputed_eval_ref_log_probs = False self._stored_metrics = defaultdict(lambda : defaultdict(list)) self.beta = args.beta self.desirable_weight = args.desirable_weight self.undesirable_weight = args.undesirable_weight self.aux_loss_enabled = getattr(model.config, 'output_router_logits', False) with PartialState().local_main_process_first(): train_dataset = train_dataset.shuffle(seed=args.data_seed) if eval_dataset is not None: eval_dataset = eval_dataset.shuffle(seed=args.data_seed) train_dataset = train_dataset.map(_tokenize, batched=True, fn_kwargs={'tokenizer': self.tokenizer}, num_proc=args.dataset_num_proc, desc='Tokenizing train dataset') fn_kwargs = {'prefix': '', 'is_encoder_decoder': self.is_encoder_decoder, 'tokenizer': self.tokenizer, 'max_length': self.max_length, 'truncation_mode': self.truncation_mode, 'label_pad_token_id': self.label_pad_token_id, 'max_prompt_length': self.max_prompt_length, 'max_completion_length': self.max_completion_length} train_dataset = train_dataset.map(_process_tokens, fn_kwargs=fn_kwargs, num_proc=args.dataset_num_proc, desc='Processing tokenized train dataset') if eval_dataset is not None: eval_dataset = eval_dataset.map(_tokenize, fn_kwargs={'tokenizer': self.tokenizer}, batched=True, num_proc=args.dataset_num_proc, desc='Tokenizing eval dataset') eval_dataset = eval_dataset.map(_process_tokens, fn_kwargs=fn_kwargs, num_proc=args.dataset_num_proc, desc='Processing tokenized eval dataset') if self.calculate_KL: total_batch_size = max(torch.cuda.device_count(), 1) * args.per_device_train_batch_size * args.gradient_accumulation_steps if total_batch_size <= 1: raise ValueError('Batch size is 1 (too small). KTO will not work properly because the KL term will be equivalent to the implied reward.') train_kl_dataset = train_dataset.map(_get_kl_dataset, batched=True, batch_size=total_batch_size, num_proc=args.dataset_num_proc, desc='Extracting KL train dataset') fn_kwargs['prefix'] = 'KL_' train_kl_dataset = train_kl_dataset.map(_process_tokens, fn_kwargs=fn_kwargs, num_proc=args.dataset_num_proc, remove_columns=[c for c in train_kl_dataset.column_names if c in train_dataset.column_names], desc='Processing tokenized train KL dataset') train_dataset = concatenate_datasets([train_dataset, train_kl_dataset], axis=1) if eval_dataset is not None: eval_kl_dataset = eval_dataset.map(_get_kl_dataset, batched=True, batch_size=total_batch_size, num_proc=args.dataset_num_proc, desc='Extracting eval KL dataset') eval_kl_dataset = eval_kl_dataset.map(_process_tokens, fn_kwargs=fn_kwargs, num_proc=args.dataset_num_proc, remove_columns=[c for c in eval_kl_dataset.column_names if c in eval_dataset.column_names], desc='Processing tokenized eval KL dataset') eval_dataset = concatenate_datasets([eval_dataset, eval_kl_dataset], axis=1) num_desirable = max(sum(train_dataset['label']), 1) num_undesirable = max(len(train_dataset['label']) - num_desirable, 1) if num_desirable != num_undesirable: des_weight_lower_bound = round(num_undesirable * self.undesirable_weight / num_desirable * 1, 2) des_weight_upper_bound = round(num_undesirable * self.undesirable_weight / num_desirable * 1.33, 2) und_weight_lower_bound = round(num_desirable * self.desirable_weight / num_undesirable / 1.33, 2) und_weight_upper_bound = round(num_desirable * self.desirable_weight / num_undesirable / 1, 2) des_weight_in_range = des_weight_lower_bound <= self.desirable_weight <= des_weight_upper_bound und_weight_in_range = und_weight_lower_bound <= self.undesirable_weight <= und_weight_upper_bound if not (des_weight_in_range or und_weight_in_range): warnings.warn(f"\n You have different amounts of desirable/positive and undesirable/negative examples but the\n weights on the desirable and undesirable losses don't seem to be in an ideal range. Based\n on your data, we recommend EITHER desirable_weight in [{des_weight_lower_bound}, {des_weight_upper_bound}]\n or undesirable_weight in [{und_weight_lower_bound}, {und_weight_upper_bound}] (but NOT BOTH).\n See the documentation on how to optimally set these weights.", UserWarning) super().__init__(model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics) if hasattr(self.model, 'add_model_tags'): self.model.add_model_tags(self._tag_names) if not hasattr(self, 'accelerator'): raise AttributeError('Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`.') if self.is_deepspeed_enabled: if self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.precompute_ref_log_probs: raise ValueError('You cannot use `precompute_ref_log_probs=True` with Deepspeed ZeRO-3. Please set `precompute_ref_log_probs=False`.') if self.ref_model is None: if not (self.is_peft_model or self.precompute_ref_log_probs): raise ValueError('No reference model and model is not a Peft model. Try setting `precompute_ref_log_probs=True`') elif self.is_deepspeed_enabled: self.ref_model = self._prepare_deepspeed(self.ref_model) else: self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True) def _prepare_deepspeed(self, model: PreTrainedModelWrapper): deepspeed_plugin = self.accelerator.state.deepspeed_plugin config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config) if model is not None: if hasattr(model, 'config'): hidden_size = max(model.config.hidden_sizes) if getattr(model.config, 'hidden_sizes', None) else getattr(model.config, 'hidden_size', None) if hidden_size is not None and config_kwargs['zero_optimization']['stage'] == 3: config_kwargs.update({'zero_optimization.reduce_bucket_size': hidden_size * hidden_size, 'zero_optimization.stage3_param_persistence_threshold': 10 * hidden_size, 'zero_optimization.stage3_prefetch_bucket_size': 0.9 * hidden_size * hidden_size}) if config_kwargs['zero_optimization']['stage'] != 3: config_kwargs['zero_optimization']['stage'] = 0 (model, *_) = deepspeed.initialize(model=model, config=config_kwargs) model.eval() return model @contextmanager def null_ref_context(self): with self.accelerator.unwrap_model(self.model).disable_adapter() if self.is_peft_model and (not self.ref_adapter_name) else nullcontext(): if self.ref_adapter_name: self.model.set_adapter(self.ref_adapter_name) yield if self.ref_adapter_name: self.model.set_adapter(self.model_adapter_name or 'default') def get_train_dataloader(self) -> DataLoader: if self.precompute_ref_log_probs and (not self._precomputed_train_ref_log_probs): dataloader_params = {'batch_size': self.args.per_device_train_batch_size, 'collate_fn': self.data_collator, 'num_workers': self.args.dataloader_num_workers, 'pin_memory': self.args.dataloader_pin_memory, 'shuffle': False} data_loader = self.accelerator.prepare(DataLoader(self.train_dataset, **dataloader_params)) reference_completion_logps = [] reference_KL_logps = [] for padded_batch in tqdm(iterable=data_loader, desc='Train dataset reference log probs'): (reference_completion_logp, reference_KL_logp) = self.compute_reference_log_probs(padded_batch) reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp) reference_completion_logps.append(reference_completion_logp.cpu()) if self.calculate_KL: reference_KL_logp = self.accelerator.gather_for_metrics(reference_KL_logp) reference_KL_logps.append(reference_KL_logp.cpu()) self.train_dataset = self.train_dataset.add_column(name='reference_logps', column=torch.cat(reference_completion_logps).float().numpy()) if self.calculate_KL: self.train_dataset = self.train_dataset.add_column(name='reference_KL_logps', column=torch.cat(reference_KL_logps).float().numpy()) self._precomputed_train_ref_log_probs = True return super().get_train_dataloader() def get_eval_dataloader(self, eval_dataset: Optional[Dataset]=None) -> DataLoader: if eval_dataset is None and self.eval_dataset is None: raise ValueError('Trainer: evaluation requires an eval_dataset.') eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset if self.precompute_ref_log_probs and (not self._precomputed_eval_ref_log_probs): dataloader_params = {'batch_size': self.args.per_device_eval_batch_size, 'collate_fn': self.data_collator, 'num_workers': self.args.dataloader_num_workers, 'pin_memory': self.args.dataloader_pin_memory, 'shuffle': False} data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params)) reference_completion_logps = [] reference_KL_logps = [] for padded_batch in tqdm(iterable=data_loader, desc='Eval dataset reference log probs'): (reference_completion_logp, reference_KL_logp) = self.compute_reference_log_probs(padded_batch) reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp) reference_completion_logps.append(reference_completion_logp.cpu()) if self.calculate_KL: reference_KL_logp = self.accelerator.gather_for_metrics(reference_KL_logp) reference_KL_logps.append(reference_KL_logp.cpu()) eval_dataset = eval_dataset.add_column(name='reference_logps', column=torch.cat(reference_completion_logps).float().numpy()) if self.calculate_KL: eval_dataset = eval_dataset.add_column(name='reference_KL_logps', column=torch.cat(reference_KL_logps).float().numpy()) if self.eval_dataset is not None: self.eval_dataset = eval_dataset self._precomputed_eval_ref_log_probs = True return super().get_eval_dataloader(eval_dataset=eval_dataset) def compute_reference_log_probs(self, padded_batch: Dict) -> Dict: with torch.no_grad(): if self.ref_model is None: with self.null_ref_context(): if self.is_encoder_decoder: completion_logits = self.model(padded_batch['prompt_input_ids'], attention_mask=padded_batch['prompt_attention_mask'], decoder_input_ids=padded_batch.get('completion_decoder_input_ids'), labels=padded_batch['completion_labels']).logits if self.calculate_KL: KL_logits = self.model(padded_batch['KL_prompt_input_ids'], attention_mask=padded_batch['KL_prompt_attention_mask'], decoder_input_ids=padded_batch.get('KL_completion_decoder_input_ids'), labels=padded_batch['KL_completion_labels']).logits else: completion_logits = self.model(padded_batch['completion_input_ids'], attention_mask=padded_batch['completion_attention_mask']).logits if self.calculate_KL: KL_logits = self.model(padded_batch['KL_completion_input_ids'], attention_mask=padded_batch['KL_completion_attention_mask']).logits elif self.is_encoder_decoder: completion_logits = self.ref_model(padded_batch['prompt_input_ids'], attention_mask=padded_batch['prompt_attention_mask'], decoder_input_ids=padded_batch.get('completion_decoder_input_ids'), labels=padded_batch['completion_labels']).logits if self.calculate_KL: KL_logits = self.ref_model(padded_batch['KL_prompt_input_ids'], attention_mask=padded_batch['KL_prompt_attention_mask'], decoder_input_ids=padded_batch.get('KL_completion_decoder_input_ids'), labels=padded_batch['KL_completion_labels']).logits else: completion_logits = self.ref_model(padded_batch['completion_input_ids'], attention_mask=padded_batch['completion_attention_mask']).logits if self.calculate_KL: KL_logits = self.ref_model(padded_batch['KL_completion_input_ids'], attention_mask=padded_batch['KL_completion_attention_mask']).logits completion_logps = self.get_batch_logps(completion_logits, padded_batch['completion_labels'], average_log_prob=False, is_encoder_decoder=self.is_encoder_decoder, label_pad_token_id=self.label_pad_token_id) if self.calculate_KL: KL_logps = self.get_batch_logps(KL_logits, padded_batch['KL_completion_labels'], average_log_prob=False, is_encoder_decoder=self.is_encoder_decoder, label_pad_token_id=self.label_pad_token_id) else: KL_logps = None return (completion_logps, KL_logps) @staticmethod def get_batch_logps(logits: torch.FloatTensor, labels: torch.LongTensor, average_log_prob: bool=False, label_pad_token_id: int=-100, is_encoder_decoder: bool=False) -> torch.FloatTensor: if logits.shape[:-1] != labels.shape: raise ValueError('Logits (batch and sequence length dim) and labels must have the same shape.') if not is_encoder_decoder: labels = labels[:, 1:].clone() logits = logits[:, :-1, :] else: labels = labels.clone() loss_mask = labels != label_pad_token_id labels[labels == label_pad_token_id] = 0 per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2) if average_log_prob: return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1) else: return (per_token_logps * loss_mask).sum(-1) def forward(self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: if self.calculate_KL: KL_logps = None KL_model_kwargs = {'input_ids': batch['KL_prompt_input_ids'], 'attention_mask': batch['KL_prompt_attention_mask'], 'labels': batch['KL_completion_labels'], 'decoder_input_ids': batch.get('KL_completion_decoder_input_ids')} if self.is_encoder_decoder else {'input_ids': batch['KL_completion_input_ids'], 'attention_mask': batch['KL_completion_attention_mask']} with torch.no_grad(): KL_logits = model(**KL_model_kwargs).logits KL_logps = self.get_batch_logps(KL_logits, batch['KL_completion_labels'], average_log_prob=False, is_encoder_decoder=self.is_encoder_decoder, label_pad_token_id=self.label_pad_token_id) else: KL_logps = None model_kwargs = {'labels': batch['completion_labels'], 'decoder_input_ids': batch.get('completion_decoder_input_ids')} if self.is_encoder_decoder else {} if self.aux_loss_enabled: model_kwargs['output_router_logits'] = True outputs = model(batch['completion_input_ids'], attention_mask=batch['completion_attention_mask'], **model_kwargs) completion_logits = outputs.logits completion_logps = self.get_batch_logps(completion_logits, batch['completion_labels'], average_log_prob=False, is_encoder_decoder=self.is_encoder_decoder, label_pad_token_id=self.label_pad_token_id) if completion_logps.shape[0] != len(batch['label']): raise ValueError('There is a mismatch between the number of examples in this batch and the number of examples for which an output sequence was predicted.') chosen_idx = [i for i in range(completion_logps.shape[0]) if batch['label'][i] is True] rejected_idx = [i for i in range(completion_logps.shape[0]) if batch['label'][i] is False] chosen_logps = completion_logps[chosen_idx, ...] rejected_logps = completion_logps[rejected_idx, ...] chosen_logits = completion_logits[chosen_idx, ...] rejected_logits = completion_logits[rejected_idx, ...] if self.aux_loss_enabled: return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, KL_logps, outputs.aux_loss) else: return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, KL_logps) def kto_loss(self, policy_chosen_logps: torch.FloatTensor, policy_rejected_logps: torch.FloatTensor, policy_KL_logps: torch.FloatTensor, reference_chosen_logps: torch.FloatTensor, reference_rejected_logps: torch.FloatTensor, reference_KL_logps: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: if self.calculate_KL: kl = (policy_KL_logps - reference_KL_logps).mean().detach() kl = self.accelerator.gather(kl).mean().clamp(min=0) else: kl = torch.zeros(1).to(policy_chosen_logps.device) if policy_chosen_logps.shape[0] != 0 or reference_chosen_logps.shape[0] != 0: chosen_logratios = policy_chosen_logps - reference_chosen_logps if self.loss_type == 'kto': chosen_losses = 1 - F.sigmoid(self.beta * (chosen_logratios - kl)) elif self.loss_type == 'apo_zero_unpaired': chosen_losses = 1 - F.sigmoid(self.beta * chosen_logratios) chosen_rewards = self.beta * chosen_logratios.detach() else: chosen_losses = torch.Tensor([]).to(self.accelerator.device) chosen_rewards = torch.Tensor([]).to(self.accelerator.device) if policy_rejected_logps.shape[0] != 0 or reference_rejected_logps.shape[0] != 0: rejected_logratios = policy_rejected_logps - reference_rejected_logps if self.loss_type == 'kto': rejected_losses = 1 - F.sigmoid(self.beta * (kl - rejected_logratios)) elif self.loss_type == 'apo_zero_unpaired': rejected_losses = F.sigmoid(self.beta * rejected_logratios) rejected_rewards = self.beta * rejected_logratios.detach() else: rejected_losses = torch.Tensor([]).to(self.accelerator.device) rejected_rewards = torch.Tensor([]).to(self.accelerator.device) losses = torch.cat((self.desirable_weight * chosen_losses, self.undesirable_weight * rejected_losses), 0) return (losses, chosen_rewards, rejected_rewards, kl) def get_batch_loss_metrics(self, model, batch: Dict[str, Union[List, torch.LongTensor]]): metrics = {} batch = {k: v.to(self.accelerator.device) if isinstance(v, torch.Tensor) else v for (k, v) in batch.items()} forward_output = self.forward(model, batch) (policy_chosen_logps, policy_rejected_logps, policy_chosen_logits, policy_rejected_logits, policy_KL_logps) = forward_output[:5] if self.aux_loss_enabled: aux_loss = forward_output[5] if 'reference_logps' in batch: chosen_idx = [i for i in range(batch['reference_logps'].shape[0]) if batch['label'][i] is True] rejected_idx = [i for i in range(batch['reference_logps'].shape[0]) if batch['label'][i] is False] reference_chosen_logps = batch['reference_logps'][chosen_idx, ...] reference_rejected_logps = batch['reference_logps'][rejected_idx, ...] if self.calculate_KL: reference_KL_logps = batch['reference_KL_logps'] else: reference_KL_logps = None else: with torch.no_grad(): if self.ref_model is None: with self.null_ref_context(): (reference_chosen_logps, reference_rejected_logps, _, _, reference_KL_logps) = self.forward(self.model, batch)[:5] else: (reference_chosen_logps, reference_rejected_logps, _, _, reference_KL_logps) = self.forward(self.ref_model, batch)[:5] (losses, chosen_rewards, rejected_rewards, kl) = self.kto_loss(policy_chosen_logps, policy_rejected_logps, policy_KL_logps, reference_chosen_logps, reference_rejected_logps, reference_KL_logps) metrics['kl'] = kl.item() num_chosen = torch.Tensor([len(chosen_rewards)]).to(self.accelerator.device) num_rejected = torch.Tensor([len(rejected_rewards)]).to(self.accelerator.device) all_num_chosen = self.accelerator.gather(num_chosen).sum().item() all_num_rejected = self.accelerator.gather(num_rejected).sum().item() if all_num_chosen > 0: metrics['rewards/chosen_sum'] = self.accelerator.gather(chosen_rewards.nansum()).nansum().item() metrics['logps/chosen_sum'] = self.accelerator.gather(policy_chosen_logps.nansum()).nansum().item() metrics['logits/chosen'] = self.accelerator.gather(policy_chosen_logits.nansum()).nanmean().item() metrics['count/chosen'] = all_num_chosen if all_num_rejected > 0: metrics['rewards/rejected_sum'] = self.accelerator.gather(rejected_rewards.nansum()).nansum().item() metrics['logps/rejected_sum'] = self.accelerator.gather(policy_rejected_logps.nansum()).nansum().item() metrics['logits/rejected'] = self.accelerator.gather(policy_rejected_logits.nansum()).nanmean().item() metrics['count/rejected'] = all_num_rejected loss = losses.nanmean() if self.aux_loss_enabled: loss += getattr(model.config, 'router_aux_loss_coef', 0.0) * aux_loss return (loss, metrics) def compute_loss(self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], return_outputs=False) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]: if not self.use_dpo_data_collator: warnings.warn('compute_loss is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator') compute_loss_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with compute_loss_context_manager: (loss, metrics) = self.get_batch_loss_metrics(model, inputs) loss = loss.to(self.args.device) if self.accelerator.is_main_process: self.store_metrics(metrics, train_eval='train') if return_outputs: return (loss, metrics) return loss def store_metrics(self, metrics: Dict[str, float], train_eval: Literal['train', 'eval']='train') -> None: for (key, value) in metrics.items(): self._stored_metrics[train_eval][key].append(value) def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: if self.train_dataset is None or not has_length(self.train_dataset): return None return SequentialSampler(self.train_dataset) def get_batch_samples(self, model, batch: Dict[str, torch.LongTensor]) -> Tuple[str, str]: generate_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with generate_context_manager: policy_output = model.generate(input_ids=batch['prompt_input_ids'], attention_mask=batch['prompt_attention_mask'], max_length=self.max_length, do_sample=True, pad_token_id=self.tokenizer.pad_token_id) if 'reference_output' in batch: reference_output = batch['reference_output'] elif self.ref_model is None: with self.null_ref_context(): reference_output = self.model.generate(input_ids=batch['prompt_input_ids'], attention_mask=batch['prompt_attention_mask'], max_length=self.max_length, do_sample=True, pad_token_id=self.tokenizer.pad_token_id) else: reference_output = self.ref_model.generate(input_ids=batch['prompt_input_ids'], attention_mask=batch['prompt_attention_mask'], max_length=self.max_length, do_sample=True, pad_token_id=self.tokenizer.pad_token_id) policy_output = pad_to_length(policy_output, self.max_length, self.tokenizer.pad_token_id) policy_output_decoded = self.tokenizer.batch_decode(policy_output, skip_special_tokens=True) reference_output = pad_to_length(reference_output, self.max_length, self.tokenizer.pad_token_id) reference_output_decoded = self.tokenizer.batch_decode(reference_output, skip_special_tokens=True) return (policy_output_decoded, reference_output_decoded) def prediction_step(self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]]=None): if not self.use_dpo_data_collator: warnings.warn('prediction_step is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator') if ignore_keys is None: if hasattr(model, 'config'): ignore_keys = getattr(model.config, 'keys_to_ignore_at_inference', []) else: ignore_keys = [] prediction_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with torch.no_grad(), prediction_context_manager: (loss, metrics) = self.get_batch_loss_metrics(model, inputs) if self.accelerator.is_main_process: self.store_metrics(metrics, train_eval='eval') if prediction_loss_only: return (loss.detach(), None, None) logits_dict = {'eval_logits/chosen': metrics['logits/chosen'], 'eval_logits/rejected': metrics['logits/rejected']} logits = torch.tensor([v for (k, v) in logits_dict.items() if k not in ignore_keys], device=self.accelerator.device) labels = torch.zeros(logits.shape[0], device=self.accelerator.device) return (loss.detach(), logits, labels) def evaluation_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval') -> EvalLoopOutput: if self.generate_during_eval: num_samples = len(dataloader.dataset) random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size) random_batch_dataset = dataloader.dataset.select(random_indices) random_batch = self.data_collator(random_batch_dataset) random_batch = self._prepare_inputs(random_batch) target_indicies = [i for i in range(len(random_batch['label'])) if random_batch['label'][i] is False] target_batch = {'prompt_input_ids': random_batch['prompt_input_ids'][target_indicies], 'prompt_attention_mask': random_batch['prompt_attention_mask'][target_indicies], 'prompt': itemgetter(*target_indicies)(random_batch['prompt'])} (policy_output_decoded, ref_output_decoded) = self.get_batch_samples(self.model, target_batch) self.log({'game_log': wandb.Table(columns=['Prompt', 'Policy', 'Ref Model'], rows=[[prompt, pol[len(prompt):], ref[len(prompt):]] for (prompt, pol, ref) in zip(target_batch['prompt'], policy_output_decoded, ref_output_decoded)])}) self.state.log_history.pop() initial_output = super().evaluation_loop(dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix) return initial_output def log(self, logs: Dict[str, float]) -> None: train_eval = 'train' if 'loss' in logs else 'eval' prefix = 'eval_' if train_eval == 'eval' else '' for split in ['chosen', 'rejected']: if f'count/{split}' in self._stored_metrics[train_eval]: count_sum = torch.Tensor(self._stored_metrics[train_eval][f'count/{split}']).sum().item() logs[f'{prefix}rewards/{split}'] = torch.Tensor(self._stored_metrics[train_eval][f'rewards/{split}_sum']).sum().item() / count_sum logs[f'{prefix}logps/{split}'] = torch.Tensor(self._stored_metrics[train_eval][f'logps/{split}_sum']).sum().item() / count_sum for key in [f'count/{split}', f'rewards/{split}_sum', f'logps/{split}_sum']: del self._stored_metrics[train_eval][key] if f'{prefix}rewards/chosen' in logs and f'{prefix}rewards/rejected' in logs: logs[f'{prefix}rewards/margins'] = logs[f'{prefix}rewards/chosen'] - logs[f'{prefix}rewards/rejected'] for (key, metrics) in self._stored_metrics[train_eval].items(): logs[f'{prefix}{key}'] = torch.Tensor(metrics).mean().item() del self._stored_metrics[train_eval] return super().log(logs) @wraps(Trainer.push_to_hub) def push_to_hub(self, commit_message: Optional[str]='End of training', blocking: bool=True, **kwargs) -> str: kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs) # File: trl-main/trl/trainer/model_config.py from dataclasses import dataclass from typing import List, Literal, Optional @dataclass class ModelConfig: model_name_or_path: Optional[str] = None model_revision: str = 'main' torch_dtype: Optional[Literal['auto', 'bfloat16', 'float16', 'float32']] = None trust_remote_code: bool = False attn_implementation: Optional[str] = None use_peft: bool = False lora_r: int = 16 lora_alpha: int = 32 lora_dropout: float = 0.05 lora_target_modules: Optional[List[str]] = None lora_modules_to_save: Optional[List[str]] = None lora_task_type: str = 'CAUSAL_LM' use_rslora: bool = False load_in_8bit: bool = False load_in_4bit: bool = False bnb_4bit_quant_type: Literal['fp4', 'nf4'] = 'nf4' use_bnb_nested_quant: bool = False def __post_init__(self): if self.load_in_8bit and self.load_in_4bit: raise ValueError("You can't use 8 bit and 4 bit precision at the same time") if isinstance(self.lora_target_modules, list) and len(self.lora_target_modules) == 1: self.lora_target_modules = self.lora_target_modules[0] # File: trl-main/trl/trainer/online_dpo_config.py from dataclasses import dataclass from typing import Literal, Optional from transformers import TrainingArguments @dataclass class OnlineDPOConfig(TrainingArguments): reward_model_path: Optional[str] = None max_new_tokens: int = 64 temperature: float = 0.9 missing_eos_penalty: Optional[float] = None beta: float = 0.1 loss_type: Literal['sigmoid', 'ipo'] = 'sigmoid' dataset_num_proc: Optional[int] = None disable_dropout: bool = True # File: trl-main/trl/trainer/online_dpo_trainer.py import warnings from functools import wraps from typing import Any, Callable, Dict, List, Optional, Tuple, Union import datasets import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data from accelerate import PartialState from datasets import Dataset from packaging import version from torch.utils.data import DataLoader, IterableDataset from transformers import DataCollator, GenerationConfig, PreTrainedTokenizerBase, Trainer, TrainerCallback from transformers.modeling_utils import PreTrainedModel from transformers.trainer_utils import EvalPrediction, seed_worker from transformers.training_args import OptimizerNames from transformers.utils import is_apex_available, is_sagemaker_mp_enabled, logging from ..import_utils import is_peft_available from ..models import create_reference_model from ..models.utils import unwrap_model_for_generation from .judges import BasePairwiseJudge from .online_dpo_config import OnlineDPOConfig from .utils import DPODataCollatorWithPadding, disable_dropout_in_model, empty_cache, get_reward, prepare_deepspeed, trl_sanitze_kwargs_for_tagging, truncate_right if is_peft_available(): from peft import PeftModel, get_peft_model if is_apex_available(): from apex import amp if is_sagemaker_mp_enabled(): from smdistributed.modelparallel import __version__ as SMP_VERSION IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse('1.10') else: IS_SAGEMAKER_MP_POST_1_10 = False logger = logging.get_logger(__name__) class OnlineDPOTrainer(Trainer): _tag_names = ['trl', 'online-dpo'] def __init__(self, model: Union[PreTrainedModel, nn.Module], ref_model: Union[PreTrainedModel, nn.Module, None]=None, reward_model: Union[PreTrainedModel, nn.Module, None]=None, judge: Optional[BasePairwiseJudge]=None, args: Optional[OnlineDPOConfig]=None, data_collator: Optional[DataCollator]=None, train_dataset: Optional[Union[Dataset, IterableDataset, 'datasets.Dataset']]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset], 'datasets.Dataset']]=None, tokenizer: Optional[PreTrainedTokenizerBase]=None, peft_config: Optional[Dict]=None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]]=None, callbacks: Optional[List[TrainerCallback]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None) -> None: if reward_model is not None and judge is not None: warnings.warn('Both `reward_model` and `judge` are provided. Please choose provide only one of them. Ignoring `judge` and using `reward_model`.') elif reward_model is None and judge is None: raise ValueError('Either `reward_model` or `judge` must be provided.') elif reward_model is None and judge is not None: raise NotImplementedError('Using `judge` is not yet supported.') self.reward_model = reward_model self.judge = judge if args is None: raise ValueError('`args` must be provided.') if tokenizer is None: raise ValueError('`tokenizer` must be provided.') if peft_config is not None: if not is_peft_available(): raise ImportError('PEFT is not available and passed `peft_config`. Please install PEFT with `pip install peft` to use it.') if isinstance(model, PeftModel): model = model.merge_and_unload() model = get_peft_model(model, peft_config) if args.disable_dropout: disable_dropout_in_model(model) if ref_model is None: if peft_config is None: self.ref_model = create_reference_model(model) else: self.ref_model = None else: self.ref_model = ref_model self.ref_model.eval() if self.reward_model is not None: self.reward_model.eval() if data_collator is None: data_collator = DPODataCollatorWithPadding(pad_token_id=tokenizer.pad_token_id) with PartialState().local_main_process_first(): fn_kwargs = {'is_encoder_decoder': model.config.is_encoder_decoder, 'tokenizer': tokenizer} train_dataset = train_dataset.map(self.tokenize_row, fn_kwargs=fn_kwargs, num_proc=args.dataset_num_proc) if eval_dataset is not None: eval_dataset = eval_dataset.map(self.tokenize_row, fn_kwargs=fn_kwargs, num_proc=args.dataset_num_proc) self.stats = {'objective/kl': [], 'objective/entropy': [], 'objective/non_score_reward': [], 'objective/rlhf_reward': [], 'objective/scores': [], 'objective/scores_margin': [], 'rewards/chosen': [], 'rewards/rejected': [], 'rewards/accuracies': [], 'rewards/margins': [], 'logps/chosen': [], 'logps/rejected': [], 'val/contain_eos_token': []} self.generation_config = GenerationConfig(max_new_tokens=args.max_new_tokens, temperature=args.temperature, top_k=0, top_p=1.0, do_sample=True, use_cache=False if args.gradient_checkpointing else True) super().__init__(model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics) if self.is_deepspeed_enabled: if self.reward_model is not None: self.reward_model = prepare_deepspeed(self.reward_model, args.per_device_train_batch_size, args.fp16, args.bf16) self.ref_model = prepare_deepspeed(self.ref_model, args.per_device_train_batch_size, args.fp16, args.bf16) else: if self.ref_model is not None: self.ref_model = self.ref_model.to(self.accelerator.device) if self.reward_model is not None: self.reward_model = self.reward_model.to(self.accelerator.device) @staticmethod def tokenize_row(feature, is_encoder_decoder: bool, tokenizer: PreTrainedTokenizerBase) -> Dict[str, Any]: if not is_encoder_decoder: batch = tokenizer(feature['prompt'], add_special_tokens=False) if tokenizer.bos_token_id is not None: prompt_len_input_ids = len(batch['input_ids']) if prompt_len_input_ids == 0 or tokenizer.bos_token_id != batch['input_ids'][0]: batch['input_ids'] = [tokenizer.bos_token_id] + batch['input_ids'] batch['attention_mask'] = [1] + batch['attention_mask'] else: batch = tokenizer(feature['prompt'], add_special_tokens=True) batch = {f'prompt_{key}': value for (key, value) in batch.items()} return batch @wraps(Trainer.get_train_dataloader) def get_train_dataloader(self) -> DataLoader: if self.train_dataset is None: raise ValueError('Trainer: training requires a train_dataset.') train_dataset = self.train_dataset data_collator = self.data_collator dataloader_params = {'batch_size': self._train_batch_size, 'collate_fn': data_collator, 'num_workers': self.args.dataloader_num_workers, 'pin_memory': self.args.dataloader_pin_memory, 'persistent_workers': self.args.dataloader_persistent_workers} if not isinstance(train_dataset, torch.utils.data.IterableDataset): dataloader_params['sampler'] = self._get_train_sampler() dataloader_params['drop_last'] = self.args.dataloader_drop_last dataloader_params['worker_init_fn'] = seed_worker dataloader_params['prefetch_factor'] = self.args.dataloader_prefetch_factor return self.accelerator.prepare(DataLoader(train_dataset, **dataloader_params)) @wraps(Trainer.get_eval_dataloader) def get_eval_dataloader(self, eval_dataset: Optional[Union[str, Dataset]]=None) -> DataLoader: if eval_dataset is None and self.eval_dataset is None: raise ValueError('Trainer: evaluation requires an eval_dataset.') dataloader_key = eval_dataset if isinstance(eval_dataset, str) else 'eval' if hasattr(self, '_eval_dataloaders') and dataloader_key in self._eval_dataloaders and self.args.dataloader_persistent_workers: return self.accelerator.prepare(self._eval_dataloaders[dataloader_key]) eval_dataset = self.eval_dataset[eval_dataset] if isinstance(eval_dataset, str) else eval_dataset if eval_dataset is not None else self.eval_dataset data_collator = self.data_collator dataloader_params = {'batch_size': self.args.eval_batch_size, 'collate_fn': data_collator, 'num_workers': self.args.dataloader_num_workers, 'pin_memory': self.args.dataloader_pin_memory, 'persistent_workers': self.args.dataloader_persistent_workers} if not isinstance(eval_dataset, torch.utils.data.IterableDataset): dataloader_params['sampler'] = self._get_eval_sampler(eval_dataset) dataloader_params['drop_last'] = self.args.dataloader_drop_last dataloader_params['prefetch_factor'] = self.args.dataloader_prefetch_factor eval_dataloader = DataLoader(eval_dataset, **dataloader_params) if self.args.dataloader_persistent_workers: if hasattr(self, '_eval_dataloaders'): self._eval_dataloaders[dataloader_key] = eval_dataloader else: self._eval_dataloaders = {dataloader_key: eval_dataloader} return self.accelerator.prepare(eval_dataloader) def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: model.train() inputs = self._prepare_inputs(inputs) (num_examples, context_length) = inputs['prompt_input_ids'].shape prompt_ids = inputs['prompt_input_ids'].repeat(2, 1) prompt_mask = inputs['prompt_attention_mask'].repeat(2, 1) with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model: output = unwrapped_model.generate(input_ids=prompt_ids, attention_mask=prompt_mask, generation_config=self.generation_config) del inputs completion_ids = output[:, context_length:] (completion_ids, completion_mask) = truncate_right(completion_ids, self.tokenizer.eos_token_id, self.tokenizer.pad_token_id) prompt_completion_ids = torch.cat((prompt_ids, completion_ids), dim=1) prompt_completion_mask = torch.cat((prompt_mask, completion_mask), dim=1) output = model(prompt_completion_ids, attention_mask=prompt_completion_mask) logits = output.logits[:, context_length - 1:-1] all_logprobs = F.log_softmax(logits, dim=-1) logprobs = torch.take_along_dim(all_logprobs, completion_ids.unsqueeze(-1), dim=2).squeeze(-1) del output, logits, all_logprobs with torch.no_grad(): if self.ref_model is not None: ref_output = self.ref_model(prompt_completion_ids, attention_mask=prompt_completion_mask) else: with self.model.disable_adapter(): ref_output = self.model(prompt_completion_ids, attention_mask=prompt_completion_mask) ref_logits = ref_output.logits[:, context_length - 1:-1] ref_all_logprobs = F.log_softmax(ref_logits, dim=-1) ref_logprobs = torch.take_along_dim(ref_all_logprobs, completion_ids.unsqueeze(-1), dim=2).squeeze(-1) del ref_output, ref_logits, ref_all_logprobs (_, scores, _) = get_reward(self.reward_model, prompt_completion_ids, self.tokenizer.pad_token_id, context_length) contain_eos_token = torch.any(completion_ids == self.tokenizer.eos_token_id, dim=-1) if self.args.missing_eos_penalty is not None: scores[~contain_eos_token] -= self.args.missing_eos_penalty (first_half, second_half) = scores.split(num_examples) num_examples_range = torch.arange(num_examples, device=scores.device) mask = first_half >= second_half chosen_indices = num_examples_range + ~mask * num_examples rejected_indices = num_examples_range + mask * num_examples cr_indices = torch.cat((chosen_indices, rejected_indices), dim=0) cr_logprobs = logprobs[cr_indices] cr_ref_logprobs = ref_logprobs[cr_indices] padding_mask = ~completion_mask.bool() cr_padding_mask = padding_mask[cr_indices] cr_logprobs_sum = (cr_logprobs * ~cr_padding_mask).sum(1) cr_ref_logprobs_sum = (cr_ref_logprobs * ~cr_padding_mask).sum(1) (chosen_logprobs_sum, rejected_logprobs_sum) = torch.split(cr_logprobs_sum, num_examples) (chosen_ref_logprobs_sum, rejected_ref_logprobs_sum) = torch.split(cr_ref_logprobs_sum, num_examples) pi_logratios = chosen_logprobs_sum - rejected_logprobs_sum ref_logratios = chosen_ref_logprobs_sum - rejected_ref_logprobs_sum logits = pi_logratios - ref_logratios if self.args.loss_type == 'sigmoid': losses = -F.logsigmoid(self.args.beta * logits) elif self.args.loss_type == 'ipo': losses = (logits - 1 / (2 * self.args.beta)) ** 2 else: raise NotImplementedError(f'invalid loss type {self.loss_type}') loss = losses.mean() self.stats['val/contain_eos_token'].append(contain_eos_token.float().mean().item()) self.stats['logps/chosen'].append(self.accelerator.gather(chosen_logprobs_sum).mean().item()) self.stats['logps/rejected'].append(self.accelerator.gather(rejected_logprobs_sum).mean().item()) self.stats['objective/scores'].append(self.accelerator.gather(scores.mean()).mean().item()) kl = logprobs - ref_logprobs mean_kl = kl.sum(1).mean() self.stats['objective/kl'].append(self.accelerator.gather(mean_kl).mean().item()) non_score_reward = (-self.args.beta * kl).sum(1) mean_non_score_reward = non_score_reward.mean() self.stats['objective/non_score_reward'].append(self.accelerator.gather(mean_non_score_reward).mean().item()) rlhf_reward = scores + non_score_reward self.stats['objective/rlhf_reward'].append(self.accelerator.gather(rlhf_reward).mean().item()) mean_entropy = -logprobs.sum(1).mean() self.stats['objective/entropy'].append(self.accelerator.gather(mean_entropy).mean().item()) scores_margin = scores[chosen_indices] - scores[rejected_indices] self.stats['objective/scores_margin'].append(self.accelerator.gather(scores_margin.mean()).mean().item()) chosen_rewards = self.args.beta * (chosen_logprobs_sum - chosen_ref_logprobs_sum) gathered_chosen_rewards = self.accelerator.gather(chosen_rewards) self.stats['rewards/chosen'].append(gathered_chosen_rewards.mean().item()) rejected_rewards = self.args.beta * (rejected_logprobs_sum - rejected_ref_logprobs_sum) gathered_rejected_rewards = self.accelerator.gather(rejected_rewards) self.stats['rewards/rejected'].append(gathered_rejected_rewards.mean().item()) margin = gathered_chosen_rewards - gathered_rejected_rewards self.stats['rewards/margins'].append(margin.mean().item()) accuracy = margin > 0 self.stats['rewards/accuracies'].append(accuracy.float().mean().item()) if self.args.torch_empty_cache_steps is not None and self.state.global_step % self.args.torch_empty_cache_steps == 0: empty_cache() kwargs = {} if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: kwargs['learning_rate'] = self._get_learning_rate() if self.args.n_gpu > 1: loss = loss.mean() if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: self.accelerator.backward(loss, **kwargs) return loss.detach() / self.args.gradient_accumulation_steps def _maybe_log_save_evaluate(self, tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval): if self.control.should_log and self.state.global_step > self._globalstep_last_logged: logs: Dict[str, float] = {} tr_loss_scalar = self._nested_gather(tr_loss).mean().item() tr_loss -= tr_loss logs['loss'] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) if grad_norm is not None: logs['grad_norm'] = grad_norm.detach().item() if isinstance(grad_norm, torch.Tensor) else grad_norm logs['learning_rate'] = self._get_learning_rate() for (key, val) in self.stats.items(): logs[key] = sum(val) / len(val) self.stats = {key: [] for key in self.stats} self._total_loss_scalar += tr_loss_scalar self._globalstep_last_logged = self.state.global_step self.store_flos() self.log(logs) metrics = None if self.control.should_evaluate: metrics = self._evaluate(trial, ignore_keys_for_eval) if self.control.should_save: self._save_checkpoint(model, trial, metrics=metrics) self.control = self.callback_handler.on_save(self.args, self.state, self.control) @wraps(Trainer.push_to_hub) def push_to_hub(self, commit_message: Optional[str]='End of training', blocking: bool=True, **kwargs) -> str: kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs) # File: trl-main/trl/trainer/orpo_config.py from dataclasses import dataclass from typing import Any, Dict, Optional from transformers import TrainingArguments @dataclass class ORPOConfig(TrainingArguments): max_length: Optional[int] = None max_prompt_length: Optional[int] = None max_completion_length: Optional[int] = None beta: float = 0.1 disable_dropout: bool = True label_pad_token_id: int = -100 padding_value: Optional[int] = None truncation_mode: str = 'keep_end' generate_during_eval: bool = False is_encoder_decoder: Optional[bool] = None model_init_kwargs: Optional[Dict[str, Any]] = None dataset_num_proc: Optional[int] = None # File: trl-main/trl/trainer/orpo_trainer.py import inspect import random import warnings from collections import defaultdict from contextlib import nullcontext from copy import deepcopy from functools import wraps from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union import numpy as np import torch import torch.amp as amp import torch.nn as nn import torch.nn.functional as F from accelerate import PartialState from accelerate.utils import is_deepspeed_available from datasets import Dataset from torch.utils.data import DataLoader from transformers import AutoModelForCausalLM, DataCollator, PreTrainedModel, PreTrainedTokenizerBase, Trainer from transformers.trainer_callback import TrainerCallback from transformers.trainer_utils import EvalLoopOutput from transformers.utils import is_torch_fx_proxy, is_torch_xla_available from ..import_utils import is_peft_available, is_wandb_available from ..models import PreTrainedModelWrapper from .orpo_config import ORPOConfig from .utils import DPODataCollatorWithPadding, add_bos_token_if_needed, add_eos_token_if_needed, disable_dropout_in_model, pad_to_length, peft_module_casting_to_bf16, trl_sanitze_kwargs_for_tagging if is_peft_available(): from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training if is_wandb_available(): import wandb if is_deepspeed_available(): import deepspeed if is_torch_xla_available(): import torch_xla.core.xla_model as xm class ORPOTrainer(Trainer): _tag_names = ['trl', 'orpo'] def __init__(self, model: Optional[Union[PreTrainedModel, nn.Module, str]]=None, args: Optional[ORPOConfig]=None, data_collator: Optional[DataCollator]=None, train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional[PreTrainedTokenizerBase]=None, model_init: Optional[Callable[[], PreTrainedModel]]=None, callbacks: Optional[List[TrainerCallback]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None, peft_config: Optional[Dict]=None, compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]]=None): if args.model_init_kwargs is None: model_init_kwargs = {} elif not isinstance(model, str): raise ValueError('You passed model_kwargs to the ORPOTrainer. But your model is already instantiated.') else: model_init_kwargs = args.model_init_kwargs torch_dtype = model_init_kwargs.get('torch_dtype') if torch_dtype is not None: if isinstance(torch_dtype, str) and torch_dtype != 'auto': torch_dtype = getattr(torch, torch_dtype) if torch_dtype != 'auto' and (not isinstance(torch_dtype, torch.dtype)): raise ValueError(f"Invalid `torch_dtype` passed to the ORPOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}.") model_init_kwargs['torch_dtype'] = torch_dtype if isinstance(model, str): warnings.warn('You passed a model_id to the ORPOTrainer. This will automatically create an `AutoModelForCausalLM` or a `PeftModel` (if you passed a `peft_config`) for you.') model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs) self._peft_has_been_casted_to_bf16 = False if not is_peft_available() and peft_config is not None: raise ValueError("PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models") elif is_peft_available() and peft_config is not None: if isinstance(model, PeftModel): model = model.merge_and_unload() if getattr(model, 'is_loaded_in_8bit', False) or getattr(model, 'is_loaded_in_4bit', False): _support_gc_kwargs = hasattr(args, 'gradient_checkpointing_kwargs') and 'gradient_checkpointing_kwargs' in list(inspect.signature(prepare_model_for_kbit_training).parameters) prepare_model_kwargs = {'use_gradient_checkpointing': args.gradient_checkpointing} if _support_gc_kwargs: prepare_model_kwargs['gradient_checkpointing_kwargs'] = args.gradient_checkpointing_kwargs model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) elif getattr(args, 'gradient_checkpointing', False): if hasattr(model, 'enable_input_require_grads'): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) model = get_peft_model(model, peft_config) if args.bf16 and getattr(model, 'is_loaded_in_4bit', False): peft_module_casting_to_bf16(model) self._peft_has_been_casted_to_bf16 = True elif getattr(args, 'gradient_checkpointing', False): if hasattr(model, 'enable_input_require_grads'): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) if args.generate_during_eval and (not is_wandb_available()): raise ValueError('`generate_during_eval=True` requires Weights and Biases to be installed. Please install `wandb` to resolve.') if model is not None: self.is_encoder_decoder = model.config.is_encoder_decoder elif args.is_encoder_decoder is None: raise ValueError('When no model is provided, you need to pass the parameter is_encoder_decoder.') else: self.is_encoder_decoder = args.is_encoder_decoder if self.is_encoder_decoder: self.decoder_start_token_id = model.config.decoder_start_token_id self.pad_token_id = model.config.pad_token_id if tokenizer is None: raise ValueError('tokenizer must be specified to tokenize a ORPO dataset.') if args.max_length is None: warnings.warn("`max_length` is not set in the ORPOConfig's init it will default to `512` by default, but you should do it yourself in the future.", UserWarning) max_length = 512 else: max_length = args.max_length if args.max_prompt_length is None: warnings.warn("`max_prompt_length` is not set in the ORPOConfig's init it will default to `128` by default, but you should do it yourself in the future.", UserWarning) max_prompt_length = 128 else: max_prompt_length = args.max_prompt_length if args.max_completion_length is None and self.is_encoder_decoder: warnings.warn("When using an encoder decoder architecture, you should set `max_completion_length` in the ORPOConfig's init it will default to `128` by default, but you should do it yourself in the future.", UserWarning) self.max_completion_length = 128 else: self.max_completion_length = args.max_completion_length if data_collator is None: data_collator = DPODataCollatorWithPadding(pad_token_id=tokenizer.pad_token_id, label_pad_token_id=args.label_pad_token_id, is_encoder_decoder=self.is_encoder_decoder) if args.remove_unused_columns: args.remove_unused_columns = False warnings.warn('When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments we have set it for you, but you should do it yourself in the future.', UserWarning) self.use_dpo_data_collator = True else: self.use_dpo_data_collator = False if args.disable_dropout: disable_dropout_in_model(model) self.max_length = max_length self.generate_during_eval = args.generate_during_eval self.label_pad_token_id = args.label_pad_token_id self.padding_value = args.padding_value if args.padding_value is not None else tokenizer.pad_token_id self.max_prompt_length = max_prompt_length self.truncation_mode = args.truncation_mode self.tokenizer = tokenizer self.beta = args.beta self.aux_loss_enabled = getattr(model.config, 'output_router_logits', False) self._stored_metrics = defaultdict(lambda : defaultdict(list)) with PartialState().local_main_process_first(): train_dataset = train_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc) if eval_dataset is not None: eval_dataset = eval_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc) super().__init__(model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics) if hasattr(self.model, 'add_model_tags'): self.model.add_model_tags(self._tag_names) if not hasattr(self, 'accelerator'): raise AttributeError('Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`.') def _prepare_deepspeed(self, model: PreTrainedModelWrapper): deepspeed_plugin = self.accelerator.state.deepspeed_plugin config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config) if model is not None: if hasattr(model, 'config'): hidden_size = max(model.config.hidden_sizes) if getattr(model.config, 'hidden_sizes', None) else getattr(model.config, 'hidden_size', None) if hidden_size is not None and config_kwargs['zero_optimization']['stage'] == 3: config_kwargs.update({'zero_optimization.reduce_bucket_size': hidden_size * hidden_size, 'zero_optimization.stage3_param_persistence_threshold': 10 * hidden_size, 'zero_optimization.stage3_prefetch_bucket_size': 0.9 * hidden_size * hidden_size}) if config_kwargs['zero_optimization']['stage'] != 3: config_kwargs['zero_optimization']['stage'] = 0 (model, *_) = deepspeed.initialize(model=model, config=config_kwargs) model.eval() return model def build_tokenized_answer(self, prompt, answer): full_tokenized = self.tokenizer(prompt + answer, add_special_tokens=False) prompt_input_ids = self.tokenizer(prompt, add_special_tokens=False)['input_ids'] answer_input_ids = full_tokenized['input_ids'][len(prompt_input_ids):] answer_attention_mask = full_tokenized['attention_mask'][len(prompt_input_ids):] full_concat_input_ids = np.concatenate([prompt_input_ids, answer_input_ids]) full_input_ids = np.array(full_tokenized['input_ids']) if len(full_input_ids) != len(full_concat_input_ids): raise ValueError('Prompt input ids and answer input ids should have the same length.') response_token_ids_start_idx = len(prompt_input_ids) if prompt_input_ids != full_tokenized['input_ids'][:response_token_ids_start_idx]: response_token_ids_start_idx -= 1 prompt_input_ids = full_tokenized['input_ids'][:response_token_ids_start_idx] prompt_attention_mask = full_tokenized['attention_mask'][:response_token_ids_start_idx] if len(prompt_input_ids) != len(prompt_attention_mask): raise ValueError('Prompt input ids and attention mask should have the same length.') answer_input_ids = full_tokenized['input_ids'][response_token_ids_start_idx:] answer_attention_mask = full_tokenized['attention_mask'][response_token_ids_start_idx:] return dict(prompt_input_ids=prompt_input_ids, prompt_attention_mask=prompt_attention_mask, input_ids=answer_input_ids, attention_mask=answer_attention_mask) def tokenize_row(self, feature, model: Optional[Union[PreTrainedModel, nn.Module]]=None) -> Dict: batch = {} prompt = feature['prompt'] chosen = feature['chosen'] rejected = feature['rejected'] if not self.is_encoder_decoder: if not isinstance(prompt, str): raise ValueError(f'prompt should be an str but got {type(prompt)}') prompt_tokens = self.tokenizer(prompt, add_special_tokens=False) prompt_tokens = {f'prompt_{k}': v for (k, v) in prompt_tokens.items()} if not isinstance(chosen, str): raise ValueError(f'chosen should be an str but got {type(chosen)}') chosen_tokens = self.build_tokenized_answer(prompt, chosen) if not isinstance(rejected, str): raise ValueError(f'rejected should be an str but got {type(rejected)}') rejected_tokens = self.build_tokenized_answer(prompt, rejected) prompt_len_input_ids = len(prompt_tokens['prompt_input_ids']) chosen_prompt_len_input_ids = len(chosen_tokens['prompt_input_ids']) rejected_prompt_len_input_ids = len(rejected_tokens['prompt_input_ids']) prompt_len_input_ids = min(chosen_prompt_len_input_ids, rejected_prompt_len_input_ids) for (k, v) in prompt_tokens.items(): prompt_tokens[k] = v[:prompt_len_input_ids] num_diff_tokens = sum([a != b for (a, b) in zip(chosen_tokens['prompt_input_ids'], rejected_tokens['prompt_input_ids'])]) num_diff_len = abs(chosen_prompt_len_input_ids - rejected_prompt_len_input_ids) if num_diff_tokens > 1 or num_diff_len > 1: raise ValueError('Chosen and rejected prompt_input_ids might only differ on the last token due to tokenizer merge ops.') (prompt_tokens, chosen_tokens, rejected_tokens) = add_bos_token_if_needed(self.tokenizer.bos_token_id, prompt_len_input_ids, prompt_tokens, chosen_prompt_len_input_ids, chosen_tokens, rejected_prompt_len_input_ids, rejected_tokens) (chosen_tokens, rejected_tokens) = add_eos_token_if_needed(self.tokenizer.eos_token_id, chosen_tokens, rejected_tokens) longer_response_length = max(len(chosen_tokens['input_ids']), len(rejected_tokens['input_ids'])) for answer_tokens in [chosen_tokens, rejected_tokens, prompt_tokens]: if len(answer_tokens['prompt_input_ids']) + longer_response_length > self.max_length: if self.truncation_mode == 'keep_start': for k in ['prompt_input_ids', 'prompt_attention_mask']: answer_tokens[k] = answer_tokens[k][:self.max_prompt_length] elif self.truncation_mode == 'keep_end': for k in ['prompt_input_ids', 'prompt_attention_mask']: answer_tokens[k] = answer_tokens[k][-self.max_prompt_length:] else: raise ValueError(f'Unknown truncation mode: {self.truncation_mode}') for answer_tokens in [chosen_tokens, rejected_tokens]: if len(answer_tokens['prompt_input_ids']) + longer_response_length > self.max_length: for k in ['input_ids', 'attention_mask']: answer_tokens[k] = answer_tokens[k][:self.max_length - self.max_prompt_length] chosen_sequence_tokens = {k: chosen_tokens[f'prompt_{k}'] + chosen_tokens[k] for k in ['input_ids', 'attention_mask']} rejected_sequence_tokens = {k: rejected_tokens[f'prompt_{k}'] + rejected_tokens[k] for k in ['input_ids', 'attention_mask']} chosen_sequence_tokens['labels'] = chosen_sequence_tokens['input_ids'][:] chosen_sequence_tokens['labels'][:len(chosen_tokens['prompt_input_ids'])] = [self.label_pad_token_id] * len(chosen_tokens['prompt_input_ids']) rejected_sequence_tokens['labels'] = rejected_sequence_tokens['input_ids'][:] rejected_sequence_tokens['labels'][:len(rejected_tokens['prompt_input_ids'])] = [self.label_pad_token_id] * len(rejected_tokens['prompt_input_ids']) for (k, toks) in {'chosen_': chosen_sequence_tokens, 'rejected_': rejected_sequence_tokens, '': prompt_tokens}.items(): for (type_key, tokens) in toks.items(): if type_key == 'token_type_ids': continue batch[f'{k}{type_key}'] = tokens else: chosen_tokens = self.tokenizer(chosen, truncation=True, max_length=self.max_completion_length, add_special_tokens=True) rejected_tokens = self.tokenizer(rejected, truncation=True, max_length=self.max_completion_length, add_special_tokens=True) prompt_tokens = self.tokenizer(prompt, truncation=True, max_length=self.max_prompt_length, add_special_tokens=True) batch['chosen_labels'] = chosen_tokens['input_ids'] batch['rejected_labels'] = rejected_tokens['input_ids'] batch['prompt_input_ids'] = prompt_tokens['input_ids'] batch['prompt_attention_mask'] = prompt_tokens['attention_mask'] if model is not None and hasattr(model, 'prepare_decoder_input_ids_from_labels'): batch['rejected_decoder_input_ids'] = model.prepare_decoder_input_ids_from_labels(labels=torch.tensor(batch['rejected_labels'])) batch['chosen_decoder_input_ids'] = model.prepare_decoder_input_ids_from_labels(labels=torch.tensor(batch['chosen_labels'])) if is_torch_xla_available(): for k in batch: if 'labels' in k or self.is_encoder_decoder: pad_value = self.label_pad_token_id elif k.endswith('_input_ids'): pad_value = self.padding_value elif k.endswith('_attention_mask'): pad_value = 0 batch[k] = batch[k] + [pad_value] * (self.max_length - len(batch[k])) return batch @staticmethod def concatenated_inputs(batch: Dict[str, Union[List, torch.LongTensor]], is_encoder_decoder: bool=False, label_pad_token_id: int=-100, padding_value: int=0, device: Optional[torch.device]=None) -> Dict[str, torch.LongTensor]: concatenated_batch = {} if is_encoder_decoder: max_length = max(batch['chosen_labels'].shape[1], batch['rejected_labels'].shape[1]) else: max_length = max(batch['chosen_input_ids'].shape[1], batch['rejected_input_ids'].shape[1]) for k in batch: if k.startswith('chosen') and isinstance(batch[k], torch.Tensor): if 'labels' in k or is_encoder_decoder: pad_value = label_pad_token_id elif k.endswith('_input_ids'): pad_value = padding_value elif k.endswith('_attention_mask'): pad_value = 0 concatenated_key = k.replace('chosen', 'concatenated') concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value) for k in batch: if k.startswith('rejected') and isinstance(batch[k], torch.Tensor): if 'labels' in k or is_encoder_decoder: pad_value = label_pad_token_id elif k.endswith('_input_ids'): pad_value = padding_value elif k.endswith('_attention_mask'): pad_value = 0 concatenated_key = k.replace('rejected', 'concatenated') concatenated_batch[concatenated_key] = torch.cat((concatenated_batch[concatenated_key], pad_to_length(batch[k], max_length, pad_value=pad_value)), dim=0).to(device=device) if is_encoder_decoder: concatenated_batch['concatenated_input_ids'] = batch['prompt_input_ids'].repeat(2, 1).to(device=device) concatenated_batch['concatenated_attention_mask'] = batch['prompt_attention_mask'].repeat(2, 1).to(device=device) return concatenated_batch def odds_ratio_loss(self, policy_chosen_logps: torch.FloatTensor, policy_rejected_logps: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: log_odds = policy_chosen_logps - policy_rejected_logps - (torch.log1p(-torch.exp(policy_chosen_logps)) - torch.log1p(-torch.exp(policy_rejected_logps))) sig_ratio = F.sigmoid(log_odds) ratio = torch.log(sig_ratio) losses = self.beta * ratio chosen_rewards = self.beta * policy_chosen_logps.to(self.accelerator.device).detach() rejected_rewards = self.beta * policy_rejected_logps.to(self.accelerator.device).detach() return (losses, chosen_rewards, rejected_rewards, torch.mean(ratio), torch.mean(log_odds)) @staticmethod def get_batch_logps(logits: torch.FloatTensor, labels: torch.LongTensor, average_log_prob: bool=False, label_pad_token_id: int=-100, is_encoder_decoder: bool=False) -> torch.FloatTensor: if logits.shape[:-1] != labels.shape: raise ValueError('Logits (batch and sequence length dim) and labels must have the same shape.') if not is_encoder_decoder: labels = labels[:, 1:].clone() logits = logits[:, :-1, :] loss_mask = labels != label_pad_token_id labels = torch.where(labels == label_pad_token_id, 0, labels) per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2) if average_log_prob: return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1) else: return (per_token_logps * loss_mask).sum(-1) def concatenated_forward(self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: concatenated_batch = self.concatenated_inputs(batch, is_encoder_decoder=self.is_encoder_decoder, label_pad_token_id=self.label_pad_token_id, padding_value=self.padding_value, device=self.accelerator.device) len_chosen = batch['chosen_labels'].shape[0] model_kwargs = {'decoder_input_ids': self._shift_right(concatenated_batch['concatenated_labels'])} if self.is_encoder_decoder else {} if self.aux_loss_enabled: model_kwargs['output_router_logits'] = True outputs = model(concatenated_batch['concatenated_input_ids'], attention_mask=concatenated_batch['concatenated_attention_mask'], use_cache=False, **model_kwargs) all_logits = outputs.logits def cross_entropy_loss(logits, labels): if not self.is_encoder_decoder: logits = logits[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() loss_fct = nn.CrossEntropyLoss() logits = logits.view(-1, logits.shape[-1]) labels = labels.view(-1) labels = labels.to(logits.device) loss = loss_fct(logits, labels) return loss if self.is_encoder_decoder: labels = concatenated_batch['concatenated_labels'].clone() else: labels = concatenated_batch['concatenated_input_ids'].clone() attention_mask = concatenated_batch['concatenated_attention_mask'] labels = torch.where(attention_mask == 1, labels, self.label_pad_token_id) chosen_nll_loss = cross_entropy_loss(all_logits[:len_chosen], labels[:len_chosen]) all_logps = self.get_batch_logps(all_logits, concatenated_batch['concatenated_labels'], average_log_prob=True, is_encoder_decoder=self.is_encoder_decoder, label_pad_token_id=self.label_pad_token_id) chosen_logps = all_logps[:len_chosen] rejected_logps = all_logps[len_chosen:] chosen_logits = all_logits[:len_chosen] rejected_logits = all_logits[len_chosen:] if self.aux_loss_enabled: return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, chosen_nll_loss, outputs.aux_loss) return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, chosen_nll_loss) def get_batch_loss_metrics(self, model, batch: Dict[str, Union[List, torch.LongTensor]], train_eval: Literal['train', 'eval']='train'): metrics = {} forward_output = self.concatenated_forward(model, batch) (policy_chosen_logps, policy_rejected_logps, policy_chosen_logits, policy_rejected_logits, policy_nll_loss) = forward_output[:5] if self.aux_loss_enabled: aux_loss = forward_output[5] (losses, chosen_rewards, rejected_rewards, log_odds_ratio, log_odds_chosen) = self.odds_ratio_loss(policy_chosen_logps, policy_rejected_logps) loss = policy_nll_loss - losses.mean() reward_accuracies = (chosen_rewards > rejected_rewards).float() prefix = 'eval_' if train_eval == 'eval' else '' metrics[f'{prefix}rewards/chosen'] = chosen_rewards.mean() metrics[f'{prefix}rewards/rejected'] = rejected_rewards.mean() metrics[f'{prefix}rewards/accuracies'] = reward_accuracies.mean() metrics[f'{prefix}rewards/margins'] = (chosen_rewards - rejected_rewards).mean() metrics[f'{prefix}logps/rejected'] = policy_rejected_logps.detach().mean() metrics[f'{prefix}logps/chosen'] = policy_chosen_logps.detach().mean() metrics[f'{prefix}logits/rejected'] = policy_rejected_logits.detach().mean() metrics[f'{prefix}logits/chosen'] = policy_chosen_logits.detach().mean() metrics[f'{prefix}nll_loss'] = policy_nll_loss.detach().mean() metrics[f'{prefix}log_odds_ratio'] = log_odds_ratio metrics[f'{prefix}log_odds_chosen'] = log_odds_chosen if is_torch_xla_available(): xm.mark_step() for (k, v) in metrics.items(): metrics[k] = v.item() if self.aux_loss_enabled: loss += getattr(model.config, 'router_aux_loss_coef', 0.0) * aux_loss return (loss, metrics) def compute_loss(self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], return_outputs=False) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]: if not self.use_dpo_data_collator: warnings.warn('compute_loss is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator') compute_loss_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with compute_loss_context_manager: (loss, metrics) = self.get_batch_loss_metrics(model, inputs, train_eval='train') loss = loss.to(self.args.device) self.store_metrics(metrics, train_eval='train') if return_outputs: return (loss, metrics) return loss def get_batch_samples(self, model, batch: Dict[str, torch.LongTensor]) -> Tuple[str, str]: generate_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with generate_context_manager: policy_output = model.generate(input_ids=batch['prompt_input_ids'], attention_mask=batch['prompt_attention_mask'], max_length=self.max_length, do_sample=True, pad_token_id=self.tokenizer.pad_token_id) policy_output = pad_to_length(policy_output, self.max_length, self.tokenizer.pad_token_id) policy_output_decoded = self.tokenizer.batch_decode(policy_output, skip_special_tokens=True) return policy_output_decoded def prediction_step(self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]]=None): if not self.use_dpo_data_collator: warnings.warn('prediction_step is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator') if ignore_keys is None: if hasattr(model, 'config'): ignore_keys = getattr(model.config, 'keys_to_ignore_at_inference', []) else: ignore_keys = [] prediction_context_manager = amp.autocast('cuda') if self._peft_has_been_casted_to_bf16 else nullcontext() with torch.no_grad(), prediction_context_manager: (loss, metrics) = self.get_batch_loss_metrics(model, inputs, train_eval='eval') self.store_metrics(metrics, train_eval='eval') if prediction_loss_only: return (loss.detach(), None, None) logits_dict = {'eval_logits/chosen': metrics['eval_logits/chosen'], 'eval_logits/rejected': metrics['eval_logits/rejected']} logits = tuple((v.unsqueeze(dim=0) for (k, v) in logits_dict.items() if k not in ignore_keys)) logits = torch.stack(logits).mean(axis=1).to(self.accelerator.device) labels = torch.zeros(logits.shape[0], device=self.accelerator.device) return (loss.detach(), logits, labels) def store_metrics(self, metrics: Dict[str, float], train_eval: Literal['train', 'eval']='train') -> None: for (key, value) in metrics.items(): self._stored_metrics[train_eval][key].append(value) def evaluation_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval') -> EvalLoopOutput: if self.generate_during_eval: num_samples = len(dataloader.dataset) random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size) random_batch_dataset = dataloader.dataset.select(random_indices) random_batch = self.data_collator(random_batch_dataset) random_batch = self._prepare_inputs(random_batch) policy_output_decoded = self.get_batch_samples(self.model, random_batch) self.log({'game_log': wandb.Table(columns=['Prompt', 'Policy'], rows=[[prompt, pol[len(prompt):]] for (prompt, pol) in zip(random_batch['prompt'], policy_output_decoded)])}) self.state.log_history.pop() initial_output = super().evaluation_loop(dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix) return initial_output def log(self, logs: Dict[str, float]) -> None: train_eval = 'train' if 'loss' in logs else 'eval' for (key, metrics) in self._stored_metrics[train_eval].items(): logs[key] = torch.tensor(metrics).mean().item() del self._stored_metrics[train_eval] return super().log(logs) def _shift_right(self, input_ids): if self.decoder_start_token_id is None: raise ValueError('model.config.decoder_start_token_id has to be defined. It is usually set to the pad_token_id.') if is_torch_fx_proxy(input_ids): shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), self.decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = self.decoder_start_token_id if self.pad_token_id is None: raise ValueError('model.config.pad_token_id has to be defined.') shifted_input_ids.masked_fill_(shifted_input_ids == -100, self.pad_token_id) return shifted_input_ids @wraps(Trainer.push_to_hub) def push_to_hub(self, commit_message: Optional[str]='End of training', blocking: bool=True, **kwargs) -> str: kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs) # File: trl-main/trl/trainer/ppo_config.py import json import os import sys import warnings from dataclasses import dataclass, field from typing import Literal, Optional import numpy as np import tyro from typing_extensions import Annotated from trl.trainer.utils import exact_div from ..core import flatten_dict from ..import_utils import is_wandb_available JSONDict = Annotated[Optional[dict], tyro.conf.arg(metavar='JSON', constructor=json.loads)] @dataclass class PPOConfig: exp_name: str = os.path.basename(sys.argv[0])[:-len('.py')] seed: int = 0 log_with: Optional[Literal['wandb', 'tensorboard']] = None task_name: Optional[str] = None model_name: str = 'gpt2' query_dataset: str = 'stanfordnlp/imdb' reward_model: str = 'sentiment-analysis:lvwerra/distilbert-imdb' remove_unused_columns: bool = True tracker_kwargs: JSONDict = field(default_factory=dict) accelerator_kwargs: JSONDict = field(default_factory=dict) project_kwargs: JSONDict = field(default_factory=dict) tracker_project_name: str = 'trl' push_to_hub_if_best_kwargs: JSONDict = field(default_factory=dict) steps: int = 20000 learning_rate: float = 1.41e-05 adap_kl_ctrl: bool = True init_kl_coef: float = 0.2 kl_penalty: Literal['kl', 'abs', 'mse', 'full'] = 'kl' target: float = 6.0 horizon: float = 10000.0 gamma: float = 1.0 lam: float = 0.95 cliprange: float = 0.2 cliprange_value: float = 0.2 vf_coef: float = 0.1 batch_size: int = 128 forward_batch_size: Optional[int] = None mini_batch_size: int = 128 gradient_accumulation_steps: int = 1 world_size: tyro.conf.Suppress[int] = None ppo_epochs: int = 4 max_grad_norm: Optional[float] = None optimize_cuda_cache: Optional[bool] = None optimize_device_cache: bool = False early_stopping: bool = False target_kl: float = 1.0 compare_steps: int = 1 ratio_threshold: float = 10.0 use_score_scaling: bool = False use_score_norm: bool = False score_clip: Optional[float] = None whiten_rewards: bool = False gradient_checkpointing: bool = False is_encoder_decoder: Optional[tyro.conf.Suppress[bool]] = None is_peft_model: Optional[tyro.conf.Suppress[bool]] = None backward_batch_size: tyro.conf.Suppress[int] = None global_backward_batch_size: Optional[tyro.conf.Suppress[int]] = None global_batch_size: tyro.conf.Suppress[int] = None dataset_num_proc: Optional[int] = None if optimize_cuda_cache is not None: warnings.warn('The `optimize_cuda_cache` argument will be deprecated soon, please use `optimize_device_cache` instead.') if optimize_device_cache is True: raise ValueError('Both `optimize_device_cache` and `optimize_cuda_cache` were provided') optimize_device_cache = optimize_cuda_cache def __post_init__(self): warnings.warn('`PPOConfig` is deprecated and will be removed in the future. Please use `PPOv2Config` with `PPOv2Trainer` instead.', FutureWarning) if self.forward_batch_size is not None: warnings.warn('Note that using `forward_batch_size` is deprecated, use `mini_batch_size` instead. By setting it you overwrite `mini_batch_size` which affects both the batch size during forward passes and also the mini batch size for PPO optimization.') self.mini_batch_size = self.forward_batch_size self.backward_batch_size = self.mini_batch_size * self.gradient_accumulation_steps exact_div(self.batch_size, self.backward_batch_size, '`batch_size` must be a multiple of `mini_batch_size * gradient_accumulation_steps`') if self.log_with == 'wandb': if not is_wandb_available(): raise ImportError('Please install wandb to use wandb logging. You can do this by running `pip install wandb`.') self.total_ppo_epochs = int(np.ceil(self.steps / self.batch_size)) assert self.kl_penalty in ['kl', 'abs', 'mse', 'full'] def to_dict(self): output_dict = {} for (key, value) in self.__dict__.items(): output_dict[key] = value return flatten_dict(output_dict) # File: trl-main/trl/trainer/ppo_trainer.py import inspect import math import os import time import typing import warnings from contextlib import nullcontext from typing import Callable, List, Optional, Union import datasets import numpy as np import torch import torch.nn.functional as F from accelerate import Accelerator from accelerate.utils import ProjectConfiguration, gather_object, is_deepspeed_available from datasets import Dataset from huggingface_hub import whoami from packaging import version from torch.optim import Adam from transformers import DataCollatorForLanguageModeling, PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast from ..core import WANDB_PADDING, PPODecorators, clip_by_value, convert_to_scalar, entropy_from_logits, flatten_dict, logprobs_from_logits, masked_mean, masked_var, masked_whiten, set_seed, stack_dicts, stats_to_np from ..import_utils import is_npu_available, is_torch_greater_2_0, is_xpu_available from ..models import SUPPORTED_ARCHITECTURES, PreTrainedModelWrapper, create_reference_model, unwrap_model_for_generation from . import AdaptiveKLController, BaseTrainer, FixedKLController, PPOConfig, RunningMoments if is_deepspeed_available(): import deepspeed MODEL_CARD_TEMPLATE = '---\nlicense: apache-2.0\nlibrary_name: transformers\ntags:\n- trl\n- ppo\n- transformers\n- reinforcement-learning\n---\n\n# {model_name}\n\nThis is a [TRL language model](https://github.com/huggingface/trl) that has been fine-tuned with reinforcement learning to\n guide the model outputs according to a value, function, or human feedback. The model can be used for text generation.\n\n## Usage\n\nTo use this model for inference, first install the TRL library:\n\n```bash\npython -m pip install trl\n```\n\nYou can then generate text as follows:\n\n```python\nfrom transformers import pipeline\n\ngenerator = pipeline("text-generation", model="{model_id}")\noutputs = generator("Hello, my llama is cute")\n```\n\nIf you want to use the model for training or to obtain the outputs from the value head, load the model as follows:\n\n```python\nfrom transformers import AutoTokenizer\nfrom trl import AutoModelForCausalLMWithValueHead\n\ntokenizer = AutoTokenizer.from_pretrained("{model_id}")\nmodel = AutoModelForCausalLMWithValueHead.from_pretrained("{model_id}")\n\ninputs = tokenizer("Hello, my llama is cute", return_tensors="pt")\noutputs = model(**inputs, labels=inputs["input_ids"])\n```\n' class PPOTrainer(BaseTrainer): _tag_names = ['trl', 'ppo'] def __init__(self, config: Optional[PPOConfig]=None, model: Optional[PreTrainedModelWrapper]=None, ref_model: Optional[PreTrainedModelWrapper]=None, tokenizer: Optional[PreTrainedTokenizerBase]=None, dataset: Optional[Union[torch.utils.data.Dataset, Dataset]]=None, optimizer: Optional[torch.optim.Optimizer]=None, data_collator: Optional[typing.Callable]=None, num_shared_layers: Optional[int]=None, lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler]=None, training_data_collator: Optional[typing.Callable]=None): warnings.warn('`PPOTrainer` is deprecated and will be removed in trl v0.12. Please use `PPOv2Trainer` instead.', FutureWarning) super().__init__(config) set_seed(config.seed) if not isinstance(config, PPOConfig): raise ValueError(f'config must be a PPOConfig, got {type(config)}') if not isinstance(tokenizer, PreTrainedTokenizerBase): raise ValueError(f'tokenizer must be a PreTrainedTokenizerBase like a PreTrainedTokenizer or a PreTrainedTokenizerFast, got {type(tokenizer)}') if not isinstance(model, SUPPORTED_ARCHITECTURES): raise ValueError(f'model must be a PreTrainedModelWrapper, got {type(model)} - supported architectures are: {SUPPORTED_ARCHITECTURES}') self.accelerator = Accelerator(log_with=config.log_with, gradient_accumulation_steps=config.gradient_accumulation_steps, project_config=ProjectConfiguration(**config.project_kwargs), **config.accelerator_kwargs) config.world_size = self.accelerator.num_processes config.global_backward_batch_size = config.backward_batch_size * config.world_size config.global_batch_size = config.batch_size * config.world_size self.model = model self.model_params = filter(lambda p: p.requires_grad, self.model.parameters()) self.is_encoder_decoder = hasattr(self.model, 'is_encoder_decoder') self.is_peft_model = getattr(self.model, 'is_peft_model', False) config.is_encoder_decoder = self.is_encoder_decoder config.is_peft_model = self.is_peft_model is_using_tensorboard = config.log_with is not None and config.log_with == 'tensorboard' self.accelerator.init_trackers(config.tracker_project_name, config=dict(trl_ppo_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(), init_kwargs=config.tracker_kwargs) self.is_using_text_environment = getattr(config, 'use_text_environment', False) if isinstance(ref_model, SUPPORTED_ARCHITECTURES): self.ref_model = ref_model if num_shared_layers is not None: warnings.warn('num_shared_layers is ignored when ref_model is provided. Two different models are used for the model and the reference model and no layers are shared.', UserWarning) elif ref_model is None and (not self.is_peft_model): self.ref_model = create_reference_model(self.model, num_shared_layers=num_shared_layers) elif self.is_peft_model: self.ref_model = None else: raise ValueError(f'ref_model must be a PreTrainedModelWrapper or `None`, got {type(ref_model)} - supported architectures are: {SUPPORTED_ARCHITECTURES} ') self.optional_peft_ctx = self.accelerator.unwrap_model(self.model).pretrained_model.disable_adapter if self.is_peft_model else nullcontext if not (isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast)): raise ValueError('tokenizer must be a transformers.PreTrainedTokenizer or transformers.PreTrainedTokenizerFast') self.tokenizer = tokenizer if dataset is not None and (not (isinstance(dataset, torch.utils.data.Dataset) or isinstance(dataset, Dataset))): raise ValueError('dataset must be a torch.utils.data.Dataset or datasets.Dataset') elif dataset is None: warnings.warn('No dataset is provided. Make sure to set config.batch_size to the correct value before training.', UserWarning) self.dataset = dataset self._signature_columns = None if self.dataset is not None: self.dataloader = self.prepare_dataloader(self.dataset, data_collator) elif self.dataset is None and self.accelerator.num_processes > 1: warnings.warn('No dataset is provided. In a multi-GPU setting, this will lead to an error. You should prepare your dataloader yourself with `dataloader = ppo_trainer.accelerator.prepare(dataloader)` and using `torch.utils.data.DataLoader`, or pass a dataset to the `PPOTrainer`. Please refer to the documentation for more details.', UserWarning) self.dataloader = None else: self.dataloader = None if training_data_collator is None: self.data_collator = DataCollatorForLanguageModeling(self.tokenizer, mlm=False) else: self.data_collator = training_data_collator if optimizer is None: self.optimizer = Adam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.config.learning_rate) else: self.optimizer = optimizer self.lr_scheduler = lr_scheduler if self.lr_scheduler is not None: lr_scheduler_class = torch.optim.lr_scheduler._LRScheduler if not is_torch_greater_2_0() else torch.optim.lr_scheduler.LRScheduler if not isinstance(self.lr_scheduler, lr_scheduler_class): raise ValueError('lr_scheduler must be a torch.optim.lr_scheduler._LRScheduler or torch.optim.lr_scheduler.LRScheduler (for torch >= 2.0)') if self.config.adap_kl_ctrl: self.kl_ctl = AdaptiveKLController(self.config.init_kl_coef, self.config.target, self.config.horizon) else: self.kl_ctl = FixedKLController(self.config.init_kl_coef) is_deepspeed_used = self.accelerator.distributed_type == 'DEEPSPEED' and hasattr(self.accelerator.state, 'deepspeed_plugin') if config.gradient_checkpointing: self.model.gradient_checkpointing_enable() if hasattr(self.model, 'enable_input_require_grads'): self.model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) self.model.pretrained_model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) (self.model, self.optimizer, self.data_collator, self.dataloader, self.lr_scheduler) = self.accelerator.prepare(self.model, self.optimizer, self.data_collator, self.dataloader, self.lr_scheduler) if is_deepspeed_used: if not self.is_peft_model and (not (getattr(self.ref_model.pretrained_model, 'is_loaded_in_8bit', False) or getattr(self.ref_model.pretrained_model, 'is_loaded_in_4bit', False))): self.ref_model = self._prepare_deepspeed(self.ref_model) else: self.ref_model = self.accelerator.prepare(self.ref_model) self.is_distributed = self.accelerator.num_processes > 1 self.current_step = 0 if config.push_to_hub_if_best_kwargs: if 'repo_id' not in config.push_to_hub_if_best_kwargs: raise ValueError('You have to specify repo_id in order to push the model to the hub!') self.push_to_hub_kwargs = config.push_to_hub_if_best_kwargs self.compare_step = 0 self.highest_reward = torch.tensor(-float('inf')) if not getattr(self.model, 'is_sequential_parallel', False): self.current_device = self.accelerator.device elif is_xpu_available(): self.current_device = torch.device('xpu:0') elif is_npu_available(): self.current_device = torch.device('npu:0') else: self.current_device = torch.device('cuda:0') PPODecorators.optimize_device_cache = self.config.optimize_device_cache self.running = RunningMoments(self.accelerator) def _filter_kwargs(self, kwargs, target_func): return {k: v for (k, v) in kwargs.items() if k in inspect.signature(target_func).parameters.keys()} def prepare_dataloader(self, dataset: Union[torch.utils.data.Dataset, Dataset], data_collator=None): if isinstance(dataset, Dataset): dataset = self._remove_unused_columns(dataset) dataloader = torch.utils.data.DataLoader(dataset, batch_size=self.config.batch_size, collate_fn=data_collator, shuffle=True, drop_last=True) return dataloader def _set_signature_columns_if_needed(self): if self._signature_columns is None: signature = inspect.signature(self.model.forward) self._signature_columns = list(signature.parameters.keys()) self._signature_columns += ['label', 'query', 'response'] def _remove_unused_columns(self, dataset: 'Dataset'): if not self.config.remove_unused_columns: return dataset self._set_signature_columns_if_needed() signature_columns = self._signature_columns ignored_columns = list(set(dataset.column_names) - set(signature_columns)) columns = [k for k in signature_columns if k in dataset.column_names] if version.parse(datasets.__version__) < version.parse('1.4.0'): dataset.set_format(type=dataset.format['type'], columns=columns, format_kwargs=dataset.format['format_kwargs']) return dataset else: return dataset.remove_columns(ignored_columns) def generate(self, query_tensor: Union[torch.Tensor, List[torch.Tensor]], length_sampler: Optional[Callable]=None, batch_size: int=4, return_prompt: bool=True, generate_ref_response: bool=False, **generation_kwargs): if generate_ref_response: ref_model = self.model if self.is_peft_model else self.ref_model if isinstance(query_tensor, List): response = self._generate_batched(self.model, query_tensor, length_sampler=length_sampler, batch_size=batch_size, return_prompt=return_prompt, **generation_kwargs) if generate_ref_response: ref_response = self._generate_batched(ref_model, query_tensor, length_sampler=length_sampler, batch_size=batch_size, return_prompt=return_prompt, **generation_kwargs) else: if len(query_tensor.shape) == 2: raise ValueError('query_tensor must be a tensor of shape (`seq_len`) or a list of tensors of shape (`seq_len`)') if length_sampler is not None: generation_kwargs['max_new_tokens'] = length_sampler() with unwrap_model_for_generation(self.model, self.accelerator) as unwrapped_model: response = unwrapped_model.generate(input_ids=query_tensor.unsqueeze(dim=0), **generation_kwargs) if generate_ref_response: with unwrap_model_for_generation(ref_model, self.accelerator, is_peft_model=self.is_peft_model) as unwrapped_model: ref_response = unwrapped_model.generate(input_ids=query_tensor.unsqueeze(dim=0), **generation_kwargs) if not return_prompt and (not self.is_encoder_decoder): response = response[:, query_tensor.shape[0]:] if generate_ref_response: ref_response = ref_response[:, query_tensor.shape[0]:] if generate_ref_response: return (response, ref_response) return response def _generate_batched(self, model: PreTrainedModelWrapper, query_tensors: List[torch.Tensor], length_sampler: Optional[Callable]=None, batch_size: int=4, return_prompt: bool=True, pad_to_multiple_of: Optional[int]=None, remove_padding: bool=True, **generation_kwargs): outputs = [] padding_side_default = self.tokenizer.padding_side if not self.is_encoder_decoder: self.tokenizer.padding_side = 'left' batch_size = min(len(query_tensors), batch_size) for i in range(0, len(query_tensors), batch_size): if length_sampler is not None: generation_kwargs['max_new_tokens'] = length_sampler() end_index = min(len(query_tensors), i + batch_size) batch = query_tensors[i:end_index] batch_mask = [torch.ones_like(element) for element in batch] inputs = {'input_ids': batch, 'attention_mask': batch_mask} padded_inputs = self.tokenizer.pad(inputs, padding=True, max_length=None, pad_to_multiple_of=pad_to_multiple_of, return_tensors='pt').to(self.current_device) with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model: generations = unwrapped_model.generate(**padded_inputs, **generation_kwargs) for (generation, mask) in zip(generations, padded_inputs['attention_mask']): if not self.is_encoder_decoder: output = generation[(1 - mask).sum():] else: output = generation if not return_prompt and (not self.is_encoder_decoder): output = output[mask.sum():] if remove_padding and self.tokenizer.eos_token_id in output: pad_mask = output == self.tokenizer.eos_token_id pad_start = torch.nonzero(pad_mask, as_tuple=False)[0, 0].item() output = output[:pad_start + 1] outputs.append(output) self.tokenizer.padding_side = padding_side_default return outputs def _step_safety_checker(self, batch_size: int, queries: List[torch.LongTensor], responses: List[torch.LongTensor], scores: List[torch.FloatTensor], masks: Optional[List[torch.LongTensor]]=None): for (name, tensor_list) in zip(['queries', 'responses', 'scores'], [queries, responses, scores]): if not isinstance(tensor_list, list): raise ValueError(f'{name} must be a list of tensors - got {type(tensor_list)}') if not isinstance(tensor_list[0], torch.Tensor): raise ValueError(f'Elements in {name} must be tensors - got {type(tensor_list[0])}') if batch_size is not None and len(tensor_list) != batch_size: raise ValueError(f'Batch size ({batch_size}) does not match number of examples - but got {len(tensor_list)} for: {name}') queries = [tensor.to(self.current_device) for tensor in queries] responses = [tensor.to(self.current_device) for tensor in responses] scores = [tensor.to(self.current_device) for tensor in scores] masks = [tensor.to(self.current_device) for tensor in masks] if masks is not None else None for (i, score) in enumerate(scores): if score.dim() > 1: raise ValueError(f'Scores must be 1-dimensional - got {score.dim()} for {score}') elif score.dim() == 1: scores[i] = score.squeeze() return (queries, responses, scores, masks) @PPODecorators.empty_device_cache() def step(self, queries: List[torch.LongTensor], responses: List[torch.LongTensor], scores: List[torch.FloatTensor], response_masks: Optional[List[torch.LongTensor]]=None): bs = self.config.batch_size (queries, responses, scores, response_masks) = self._step_safety_checker(bs, queries, responses, scores, response_masks) scores = torch.tensor(scores, device=self.current_device) if self.config.use_score_scaling: (scores_mean, scores_std) = self.running.update(scores) tensor_to_kwargs = dict(dtype=scores.dtype, device=scores.device) score_scaling_factor = self.running.std.to(**tensor_to_kwargs) + torch.finfo(scores.dtype).eps if self.config.use_score_norm: scores = (scores - self.running.mean.to(**tensor_to_kwargs)) / score_scaling_factor else: scores /= score_scaling_factor if self.config.score_clip is not None: scores_dtype = scores.dtype scores = torch.clip(scores.float(), -self.config.score_clip, self.config.score_clip).to(dtype=scores_dtype) if hasattr(self, 'highest_reward'): if self.compare_step % self.config.compare_steps == 0: curr_mean_reward = scores.mean() if curr_mean_reward > self.highest_reward: self.highest_reward = curr_mean_reward self.push_to_hub(**self.push_to_hub_kwargs) self.compare_step += 1 timing = dict() t0 = time.time() t = time.time() model_inputs = self.prepare_model_inputs(queries, responses) if self.is_distributed: pad_first = self.tokenizer.padding_side == 'left' model_inputs['input_ids'] = self.accelerator.pad_across_processes(model_inputs['input_ids'], dim=1, pad_index=self.tokenizer.pad_token_id, pad_first=pad_first) model_inputs['attention_mask'] = self.accelerator.pad_across_processes(model_inputs['attention_mask'], dim=1, pad_index=0, pad_first=pad_first) if self.is_encoder_decoder: model_inputs['decoder_input_ids'] = self.accelerator.pad_across_processes(model_inputs['decoder_input_ids'], dim=1, pad_index=self.tokenizer.pad_token_id, pad_first=pad_first) model_inputs['decoder_attention_mask'] = self.accelerator.pad_across_processes(model_inputs['decoder_attention_mask'], dim=1, pad_index=0, pad_first=pad_first) model_inputs_names = list(model_inputs.keys()) full_kl_penalty = self.config.kl_penalty == 'full' with torch.no_grad(): (all_logprobs, logits_or_none, values, masks) = self.batched_forward_pass(self.model, queries, responses, model_inputs, response_masks=response_masks, return_logits=full_kl_penalty) with self.optional_peft_ctx(): (ref_logprobs, ref_logits_or_none, _, _) = self.batched_forward_pass(self.model if self.is_peft_model else self.ref_model, queries, responses, model_inputs, return_logits=full_kl_penalty) timing['time/ppo/forward_pass'] = time.time() - t with torch.no_grad(): t = time.time() if full_kl_penalty: active_full_logprobs = logprobs_from_logits(logits_or_none, None, gather=False) ref_full_logprobs = logprobs_from_logits(ref_logits_or_none, None, gather=False) (rewards, non_score_reward, kls) = self.compute_rewards(scores, active_full_logprobs, ref_full_logprobs, masks) else: (rewards, non_score_reward, kls) = self.compute_rewards(scores, all_logprobs, ref_logprobs, masks) timing['time/ppo/compute_rewards'] = time.time() - t t = time.time() (values, advantages, returns) = self.compute_advantages(values, rewards, masks) timing['time/ppo/compute_advantages'] = time.time() - t batch_dict = {'queries': queries, 'responses': responses, 'logprobs': all_logprobs.to(torch.float32), 'values': values.to(torch.float32), 'masks': masks, 'advantages': advantages, 'returns': returns} batch_dict.update(model_inputs) t = time.time() all_stats = [] early_stop = False for _ in range(self.config.ppo_epochs): if early_stop: break b_inds = np.random.permutation(bs) for backward_batch_start in range(0, bs, self.config.backward_batch_size): backward_batch_end = backward_batch_start + self.config.backward_batch_size backward_batch_inds = b_inds[backward_batch_start:backward_batch_end] for mini_batch_start in range(0, self.config.backward_batch_size, self.config.mini_batch_size): mini_batch_end = mini_batch_start + self.config.mini_batch_size mini_batch_inds = backward_batch_inds[mini_batch_start:mini_batch_end] mini_batch_dict = {'logprobs': batch_dict['logprobs'][mini_batch_inds], 'values': batch_dict['values'][mini_batch_inds], 'masks': batch_dict['masks'][mini_batch_inds], 'queries': [batch_dict['queries'][i] for i in mini_batch_inds], 'responses': [batch_dict['responses'][i] for i in mini_batch_inds], 'advantages': batch_dict['advantages'][mini_batch_inds], 'returns': batch_dict['returns'][mini_batch_inds]} for k in model_inputs_names: mini_batch_dict[k] = batch_dict[k][mini_batch_inds] with self.accelerator.accumulate(self.model): model_inputs = {k: mini_batch_dict[k] for k in model_inputs_names} (logprobs, logits, vpreds, _) = self.batched_forward_pass(self.model, mini_batch_dict['queries'], mini_batch_dict['responses'], model_inputs, return_logits=True) train_stats = self.train_minibatch(mini_batch_dict['logprobs'], mini_batch_dict['values'], logprobs, logits, vpreds, mini_batch_dict['masks'], mini_batch_dict['advantages'], mini_batch_dict['returns']) all_stats.append(train_stats) if self.config.early_stopping: policykl = train_stats['policy/policykl'] early_stop = self._early_stop(policykl) if early_stop: break timing['time/ppo/optimize_step'] = time.time() - t t = time.time() train_stats = stack_dicts(all_stats) train_stats['policy/advantages'] = torch.flatten(train_stats['policy/advantages']).unsqueeze(0) train_stats['policy/advantages'] = torch.nan_to_num(train_stats['policy/advantages'], WANDB_PADDING) train_stats['policy/ratio'] = torch.flatten(train_stats['policy/ratio']).unsqueeze(0) stats = self.record_step_stats(scores=scores, logprobs=all_logprobs, ref_logprobs=ref_logprobs, non_score_reward=non_score_reward, train_stats=train_stats, kl_coef=self.kl_ctl.value, masks=masks, queries=queries, responses=responses, kls=kls) if self.is_distributed: stats = self.gather_stats(stats) stats = stats_to_np(stats) timing['time/ppo/calc_stats'] = time.time() - t stats['ppo/learning_rate'] = self.optimizer.param_groups[0]['lr'] self.kl_ctl.update(stats['objective/kl'], self.config.batch_size * self.accelerator.num_processes) timing['time/ppo/total'] = time.time() - t0 stats.update(timing) if self.config.log_with != 'wandb': stats = convert_to_scalar(stats) if self.lr_scheduler is not None: self.lr_scheduler.step() return stats def _early_stop(self, policykl): early_stop = False if not self.config.early_stopping: return early_stop if not self.is_distributed and policykl > 1.5 * self.config.target_kl: self.optimizer.zero_grad() early_stop = True elif self.is_distributed: import torch.distributed as dist dist.barrier() dist.all_reduce(policykl, dist.ReduceOp.SUM) policykl /= self.accelerator.num_processes if policykl > 1.5 * self.config.target_kl: self.optimizer.zero_grad() early_stop = True return early_stop def gather_stats(self, stats): import torch.distributed as dist dist.barrier() for (k, v) in stats.items(): if isinstance(v, torch.Tensor): dist.all_reduce(v.to(self.accelerator.device), dist.ReduceOp.SUM) v /= self.accelerator.num_processes stats[k] = v return stats def prepare_model_inputs(self, queries: torch.Tensor, responses: torch.Tensor): if self.is_encoder_decoder: input_data = self.data_collator([{'input_ids': q, 'attention_mask': torch.ones_like(q)} for q in queries]).to(self.current_device) decoder_inputs = self.data_collator([{'input_ids': r, 'attention_mask': torch.ones_like(r)} for r in responses]).to(self.current_device) input_data['decoder_input_ids'] = decoder_inputs['input_ids'] input_data['decoder_attention_mask'] = decoder_inputs['attention_mask'] else: input_ids = [torch.cat([q, r]) for (q, r) in zip(queries, responses)] input_data = self.data_collator([{'input_ids': ids, 'attention_mask': torch.ones_like(ids)} for ids in input_ids]).to(self.current_device) input_data.pop('labels', None) return input_data @PPODecorators.empty_device_cache() def batched_forward_pass(self, model: PreTrainedModelWrapper, queries: torch.Tensor, responses: torch.Tensor, model_inputs: dict, return_logits: bool=False, response_masks: Optional[torch.Tensor]=None): bs = len(queries) fbs = self.config.mini_batch_size all_logprobs = [] all_logits = [] all_masks = [] all_values = [] model.eval() for i in range(math.ceil(bs / fbs)): input_kwargs = {key: value[i * fbs:(i + 1) * fbs] for (key, value) in model_inputs.items()} query_batch = queries[i * fbs:(i + 1) * fbs] response_batch = responses[i * fbs:(i + 1) * fbs] if response_masks is not None: response_masks_batch = response_masks[i * fbs:(i + 1) * fbs] (logits, _, values) = model(**input_kwargs) if self.is_encoder_decoder: input_ids = input_kwargs['decoder_input_ids'] attention_mask = input_kwargs['decoder_attention_mask'] else: input_ids = input_kwargs['input_ids'] attention_mask = input_kwargs['attention_mask'] logprobs = logprobs_from_logits(logits[:, :-1, :], input_ids[:, 1:]) masks = torch.zeros_like(attention_mask) masks[:, :-1] = attention_mask[:, 1:] for j in range(len(query_batch)): if self.is_encoder_decoder: start = 1 end = attention_mask[j, :].sum() - 1 else: start = len(query_batch[j]) - 1 if attention_mask[j, 0] == 0: start += attention_mask[j, :].nonzero()[0] end = start + len(response_batch[j]) masks[j, :start] = 0 masks[j, end:] = 0 if response_masks is not None: masks[j, start:end] = masks[j, start:end] * response_masks_batch[j] if return_logits: all_logits.append(logits) else: del logits all_values.append(values) all_logprobs.append(logprobs) all_masks.append(masks) return (torch.cat(all_logprobs), torch.cat(all_logits)[:, :-1] if return_logits else None, torch.cat(all_values)[:, :-1], torch.cat(all_masks)[:, :-1]) @PPODecorators.empty_device_cache() def train_minibatch(self, old_logprobs: torch.FloatTensor, values: torch.FloatTensor, logprobs: torch.FloatTensor, logits: torch.FloatTensor, vpreds: torch.FloatTensor, mask: torch.LongTensor, advantages: torch.FloatTensor, returns: torch.FloatTensor): self.model.train() (loss_p, loss_v, train_stats) = self.loss(old_logprobs, values, logits, vpreds, logprobs, mask, advantages, returns) loss = loss_p + loss_v self.accelerator.backward(loss) if self.config.max_grad_norm is not None: if self.accelerator.sync_gradients: self.accelerator.clip_grad_norm_(self.model_params, self.config.max_grad_norm) self.optimizer.step() self.optimizer.zero_grad() return train_stats def compute_rewards(self, scores: torch.FloatTensor, logprobs: torch.FloatTensor, ref_logprobs: torch.FloatTensor, masks: torch.LongTensor): (rewards, non_score_rewards, kls) = ([], [], []) for (score, logprob, ref_logprob, mask) in zip(scores, logprobs, ref_logprobs, masks): kl = self._kl_penalty(logprob, ref_logprob) kls.append(kl) non_score_reward = -self.kl_ctl.value * kl non_score_rewards.append(non_score_reward) reward = non_score_reward.clone() last_non_masked_index = mask.nonzero()[-1] reward[last_non_masked_index] += score rewards.append(reward) return (torch.stack(rewards), torch.stack(non_score_rewards), torch.stack(kls)) def _kl_penalty(self, logprob: torch.FloatTensor, ref_logprob: torch.FloatTensor) -> torch.FloatTensor: if self.config.kl_penalty == 'kl': return logprob - ref_logprob if self.config.kl_penalty == 'abs': return (logprob - ref_logprob).abs() if self.config.kl_penalty == 'mse': return 0.5 * (logprob - ref_logprob).square() if self.config.kl_penalty == 'full': return F.kl_div(ref_logprob, logprob, log_target=True, reduction='none').sum(-1) raise NotImplementedError def compute_advantages(self, values: torch.FloatTensor, rewards: torch.FloatTensor, mask: torch.FloatTensor): lastgaelam = 0 advantages_reversed = [] gen_len = rewards.shape[-1] values = values * mask rewards = rewards * mask if self.config.whiten_rewards: rewards = masked_whiten(rewards, mask, shift_mean=False) for t in reversed(range(gen_len)): nextvalues = values[:, t + 1] if t < gen_len - 1 else 0.0 delta = rewards[:, t] + self.config.gamma * nextvalues - values[:, t] lastgaelam = delta + self.config.gamma * self.config.lam * lastgaelam advantages_reversed.append(lastgaelam) advantages = torch.stack(advantages_reversed[::-1]).transpose(0, 1) returns = advantages + values advantages = masked_whiten(advantages, mask) advantages = advantages.detach() return (values, advantages, returns) def loss(self, old_logprobs: torch.FloatTensor, values: torch.FloatTensor, logits: torch.FloatTensor, vpreds: torch.FloatTensor, logprobs: torch.FloatTensor, mask: torch.LongTensor, advantages: torch.FloatTensor, returns: torch.FloatTensor): vpredclipped = clip_by_value(vpreds, values - self.config.cliprange_value, values + self.config.cliprange_value) vf_losses1 = (vpreds - returns) ** 2 vf_losses2 = (vpredclipped - returns) ** 2 vf_loss = 0.5 * masked_mean(torch.max(vf_losses1, vf_losses2), mask) vf_clipfrac = masked_mean(torch.gt(vf_losses2, vf_losses1).float(), mask) ratio = torch.exp(logprobs - old_logprobs) pg_losses = -advantages * ratio pg_losses2 = -advantages * torch.clamp(ratio, 1.0 - self.config.cliprange, 1.0 + self.config.cliprange) pg_loss = masked_mean(torch.max(pg_losses, pg_losses2), mask) pg_clipfrac = masked_mean(torch.gt(pg_losses2, pg_losses).float(), mask) loss = pg_loss + self.config.vf_coef * vf_loss avg_ratio = masked_mean(ratio, mask).item() if avg_ratio > self.config.ratio_threshold: warnings.warn(f'The average ratio of batch ({avg_ratio:.2f}) exceeds threshold {self.config.ratio_threshold:.2f}. Skipping batch.') pg_loss = pg_loss * 0.0 vf_loss = vf_loss * 0.0 loss = loss * 0.0 entropy = masked_mean(entropy_from_logits(logits), mask) approxkl = 0.5 * masked_mean((logprobs - old_logprobs) ** 2, mask) policykl = masked_mean(old_logprobs - logprobs, mask) (return_mean, return_var) = (masked_mean(returns, mask), masked_var(returns, mask)) (value_mean, value_var) = (masked_mean(values, mask), masked_var(values, mask)) stats = dict(loss=dict(policy=pg_loss.detach(), value=vf_loss.detach(), total=loss.detach()), policy=dict(entropy=entropy.detach(), approxkl=approxkl.detach(), policykl=policykl.detach(), clipfrac=pg_clipfrac.detach(), advantages=advantages.detach(), advantages_mean=masked_mean(advantages, mask).detach(), ratio=ratio.detach()), returns=dict(mean=return_mean.detach(), var=return_var.detach()), val=dict(vpred=masked_mean(vpreds, mask).detach(), error=masked_mean((vpreds - returns) ** 2, mask).detach(), clipfrac=vf_clipfrac.detach(), mean=value_mean.detach(), var=value_var.detach())) return (pg_loss, self.config.vf_coef * vf_loss, flatten_dict(stats)) def record_step_stats(self, kl_coef: float, **data): mask = data.pop('masks') kls = data.pop('kls') kl_list = (kls * mask).sum(axis=-1) mean_kl = kl_list.mean() mean_entropy = (-data['logprobs'] * mask).sum(axis=-1).mean() mean_non_score_reward = masked_mean(data['non_score_reward'], mask) mean_scores = data['scores'].mean() std_scores = data['scores'].std() if mean_kl.item() < -1.0: warnings.warn(f'KL divergence is starting to become negative: {mean_kl.item():.2f} - this might be a precursor for failed training. sometimes this happens because the generation kwargs are not correctly set. Please make sure that the generation kwargs are set correctly, or review your training hyperparameters.') stats = {'objective/kl': mean_kl, 'objective/kl_dist': kl_list, 'objective/logprobs': data['logprobs'], 'objective/ref_logprobs': data['ref_logprobs'], 'objective/kl_coef': kl_coef, 'objective/entropy': mean_entropy, 'ppo/mean_non_score_reward': mean_non_score_reward, 'ppo/mean_scores': mean_scores, 'ppo/std_scores': std_scores} query_lens = torch.tensor([len(query) for query in data['queries']], dtype=torch.float) response_lens = torch.tensor([len(response) for response in data['responses']], dtype=torch.float) stats['tokens/queries_len_mean'] = torch.mean(query_lens).cpu().numpy().item() stats['tokens/queries_len_std'] = torch.std(query_lens).cpu().numpy().item() stats['tokens/queries_dist'] = query_lens.cpu().numpy() stats['tokens/responses_len_mean'] = torch.mean(response_lens).cpu().numpy().item() stats['tokens/responses_len_std'] = torch.std(response_lens).cpu().numpy().item() stats['tokens/responses_dist'] = response_lens.cpu().numpy() for (k, v) in data['train_stats'].items(): stats[f'ppo/{k}'] = torch.mean(v, axis=0) stats['ppo/val/var_explained'] = 1 - stats['ppo/val/error'] / stats['ppo/returns/var'] return stats def log_stats(self, stats: dict, batch: dict, rewards: List[torch.FloatTensor], columns_to_log: typing.Iterable[str]=('query', 'response')): if not isinstance(rewards, torch.Tensor): rewards = torch.tensor(rewards).to(self.current_device) rewards = self.accelerator.gather(rewards).flatten() if self.config.log_with == 'wandb': import wandb if any((column_to_log not in batch.keys() for column_to_log in columns_to_log)): raise ValueError(f'Columns to log {columns_to_log} are not present in the batch {batch.keys()}.') batch_list = [batch[column_to_log] for column_to_log in columns_to_log] if self.is_distributed: gathered_batch_list = [] for b in batch_list: flattened = gather_object(b) gathered_batch_list.append(flattened) batch_list = gathered_batch_list if self.accelerator.is_main_process: logs = {} if 'query' not in batch.keys() and 'response' not in batch.keys(): warnings.warn("The game logs will not be logged because the batch does not contain the keys 'query' and 'response'. ") elif self.config.log_with == 'wandb': table_rows = [list(r) for r in zip(*batch_list, rewards.cpu().tolist())] logs.update({'game_log': wandb.Table(columns=[*columns_to_log, 'reward'], rows=table_rows)}) logs.update(stats) for (k, v) in logs.items(): if isinstance(v, torch.Tensor) and v.dtype == torch.bfloat16: logs[k] = v.float() logs['env/reward_mean'] = torch.mean(rewards).cpu().numpy().item() logs['env/reward_std'] = torch.std(rewards).cpu().numpy().item() logs['env/reward_dist'] = rewards.cpu().numpy() if self.config.log_with == 'tensorboard': self.current_step += 1 self.accelerator.log(logs, step=self.current_step if self.config.log_with == 'tensorboard' else None) def create_model_card(self, path: str, model_name: Optional[str]='TRL Model') -> None: try: user = whoami()['name'] except Exception: warnings.warn('Cannot retrieve user information assuming you are running in offline mode.') return if not os.path.exists(path): os.makedirs(path) model_card_content = MODEL_CARD_TEMPLATE.format(model_name=model_name, model_id=f'{user}/{path}') with open(os.path.join(path, 'README.md'), 'w', encoding='utf-8') as f: f.write(model_card_content) def _save_pretrained(self, save_directory: str) -> None: self.accelerator.unwrap_model(self.model).save_pretrained(save_directory) self.tokenizer.save_pretrained(save_directory) self.create_model_card(save_directory) def _show_tokens(self, tokens, masks): from rich import print from rich.text import Text text = Text() for (_i, (token, mask)) in enumerate(zip(tokens, masks)): if mask == 1: text.append(self.tokenizer.decode(token.item()), style='black on deep_sky_blue1') text.append(' ') else: text.append(self.tokenizer.decode(token.item()), style='black on cyan3') text.append(' ') print(text) def _prepare_deepspeed(self, model: PreTrainedModelWrapper): deepspeed_plugin = self.accelerator.state.deepspeed_plugin config_kwargs = deepspeed_plugin.deepspeed_config if model is not None: if hasattr(model, 'config'): hidden_size = max(model.config.hidden_sizes) if getattr(model.config, 'hidden_sizes', None) else getattr(model.config, 'hidden_size', None) if hidden_size is not None and config_kwargs['zero_optimization']['stage'] == 3: config_kwargs.update({'zero_optimization.reduce_bucket_size': hidden_size * hidden_size, 'zero_optimization.stage3_param_persistence_threshold': 10 * hidden_size, 'zero_optimization.stage3_prefetch_bucket_size': 0.9 * hidden_size * hidden_size}) if config_kwargs['zero_optimization']['stage'] != 3: config_kwargs['zero_optimization']['stage'] = 0 (model, *_) = deepspeed.initialize(model=model, config=config_kwargs) model.eval() return model # File: trl-main/trl/trainer/ppov2_config.py import os from dataclasses import dataclass from ..trainer.utils import OnPolicyConfig @dataclass class PPOv2Config(OnPolicyConfig): exp_name: str = os.path.basename(__file__)[:-len('.py')] reward_model_path: str = 'EleutherAI/pythia-160m' num_ppo_epochs: int = 4 whiten_rewards: bool = False kl_coef: float = 0.05 cliprange: float = 0.2 vf_coef: float = 0.1 cliprange_value: float = 0.2 gamma: float = 1.0 lam: float = 0.95 # File: trl-main/trl/trainer/ppov2_trainer.py import gc import math import os import time from collections import defaultdict from functools import wraps from typing import Dict, List, Optional, Tuple, Union import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from accelerate import Accelerator from accelerate.utils import broadcast, gather_object from datasets import Dataset from torch.utils.data import DataLoader from transformers import DataCollatorWithPadding, GenerationConfig, PreTrainedTokenizer, Trainer, TrainerCallback, TrainerControl from transformers.integrations import get_reporting_integration_callbacks from transformers.trainer import DEFAULT_CALLBACKS, DEFAULT_PROGRESS_CALLBACK from transformers.trainer_callback import CallbackHandler, PrinterCallback from ..core import masked_mean, masked_whiten from ..models.utils import unwrap_model_for_generation from ..trainer.utils import OnlineTrainerState, batch_generation, disable_dropout_in_model, exact_div, first_true_indices, forward, get_reward, prepare_deepspeed, print_rich_table, truncate_response from .ppov2_config import PPOv2Config from .utils import trl_sanitze_kwargs_for_tagging INVALID_LOGPROB = 1.0 class PolicyAndValueWrapper(nn.Module): def __init__(self, policy, value_model) -> None: super().__init__() self.policy = policy self.value_model = value_model self.critic_backbone = getattr(value_model, value_model.base_model_prefix) def forward(self, **kwargs): output = self.critic_backbone(**kwargs) logits = self.value_model.score(output.hidden_states[-1]) return (self.policy(**kwargs), logits) class PPOv2Trainer(Trainer): _tag_names = ['trl', 'ppo'] def __init__(self, config: PPOv2Config, tokenizer: PreTrainedTokenizer, policy: nn.Module, ref_policy: nn.Module, reward_model: nn.Module, train_dataset: Dataset, value_model: Optional[nn.Module]=None, data_collator: Optional[DataCollatorWithPadding]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), callbacks: Optional[List[TrainerCallback]]=None) -> None: self.args = config args = config self.tokenizer = tokenizer self.policy = policy self.policy.generation_config.eos_token_id = None self.policy.generation_config.pad_token_id = None self.ref_policy = ref_policy self.reward_model = reward_model self.train_dataset = train_dataset self.train_dataset_len = len(train_dataset) self.value_model = value_model self.data_collator = data_collator self.eval_dataset = eval_dataset (self.optimizer, self.lr_scheduler) = optimizers if args.total_episodes is None: args.total_episodes = int(args.num_train_epochs * self.train_dataset_len) accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps) self.accelerator = accelerator args.world_size = accelerator.num_processes args.local_batch_size = args.per_device_train_batch_size * args.gradient_accumulation_steps * args.num_mini_batches args.micro_batch_size = int(args.per_device_train_batch_size * args.world_size) args.batch_size = int(args.local_batch_size * args.world_size) args.mini_batch_size = exact_div(args.batch_size, args.num_mini_batches, '`batch_size` must be a multiple of `num_mini_batches`') args.local_mini_batch_size = exact_div(args.local_batch_size, args.num_mini_batches, '`local_batch_size` must be a multiple of `num_mini_batches`') if args.whiten_rewards: assert args.local_mini_batch_size >= 8, f'Per-rank minibatch size {args.local_mini_batch_size} is insufficient for whitening' args.num_total_batches = math.ceil(args.total_episodes / args.batch_size) time_tensor = torch.tensor(int(time.time()), device=accelerator.device) time_int = broadcast(time_tensor, 0).item() args.run_name = f'{args.exp_name}__{args.seed}__{time_int}' self.local_seed = args.seed + accelerator.process_index * 100003 if args.num_sample_generations > 0: self.sample_generations_freq = max(1, args.num_total_batches // args.num_sample_generations) self.local_dataloader_batch_size = args.local_batch_size for module in [policy, ref_policy, value_model, reward_model]: disable_dropout_in_model(module) if args.stop_token and args.stop_token == 'eos': args.stop_token_id = tokenizer.eos_token_id self.model = PolicyAndValueWrapper(policy, value_model) self.model.config = policy.config self.create_optimizer_and_scheduler(num_training_steps=args.num_total_batches) self.state = OnlineTrainerState(is_local_process_zero=self.is_local_process_zero(), is_world_process_zero=self.is_world_process_zero()) default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) self.callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks self.callback_handler = CallbackHandler(self.callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler) self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) self.control = TrainerControl() self.current_flos = 0 self.hp_search_backend = None self.is_deepspeed_enabled = getattr(self.accelerator.state, 'deepspeed_plugin', None) is not None self.is_fsdp_enabled = getattr(self.accelerator.state, 'fsdp_plugin', None) is not None self.hub_model_id = None if self.args.push_to_hub: self.init_hf_repo() if self.args.should_save: os.makedirs(self.args.output_dir, exist_ok=True) self.dataloader = DataLoader(self.train_dataset, batch_size=self.local_dataloader_batch_size, shuffle=True, collate_fn=DataCollatorWithPadding(tokenizer), drop_last=True) torch.manual_seed(args.seed) (self.model, self.optimizer, self.dataloader) = accelerator.prepare(self.model, self.optimizer, self.dataloader) torch.manual_seed(self.local_seed) self.eval_dataloader = DataLoader(self.eval_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=DataCollatorWithPadding(self.tokenizer), drop_last=True) self.eval_dataloader = accelerator.prepare(self.eval_dataloader) if self.is_deepspeed_enabled: self.reward_model = prepare_deepspeed(self.reward_model, args.per_device_train_batch_size, args.fp16, args.bf16) self.ref_policy = prepare_deepspeed(self.ref_policy, args.per_device_train_batch_size, args.fp16, args.bf16) else: self.ref_policy = self.ref_policy.to(self.accelerator.device) self.reward_model = self.reward_model.to(self.accelerator.device) def get_train_dataloader(self) -> DataLoader: return self.dataloader def get_eval_dataloader(self) -> DataLoader: return self.eval_dataloader def save_model(self, output_dir: Optional[str]=None, _internal_call: bool=False): backup_model = self.model self.model = self.model.policy if self.is_deepspeed_enabled: backup_deepspeed = self.deepspeed self.deepspeed = self.model super().save_model(output_dir, _internal_call) self.model = backup_model if self.is_deepspeed_enabled: self.deepspeed = backup_deepspeed def train(self): args = self.args accelerator = self.accelerator optimizer = self.optimizer model = self.model ref_policy = self.ref_policy reward_model = self.reward_model tokenizer = self.tokenizer dataloader = self.dataloader device = accelerator.device def repeat_generator(): while True: yield from dataloader iter_dataloader = iter(repeat_generator()) generation_config = GenerationConfig(max_new_tokens=args.response_length, temperature=args.temperature + 1e-07, top_k=0.0, top_p=1.0, do_sample=True) accelerator.print('===training policy===') start_time = time.time() stats_shape = (args.num_ppo_epochs, args.num_mini_batches, args.gradient_accumulation_steps) approxkl_stats = torch.zeros(stats_shape, device=device) pg_clipfrac_stats = torch.zeros(stats_shape, device=device) pg_loss_stats = torch.zeros(stats_shape, device=device) vf_loss_stats = torch.zeros(stats_shape, device=device) vf_clipfrac_stats = torch.zeros(stats_shape, device=device) entropy_stats = torch.zeros(stats_shape, device=device) ratio_stats = torch.zeros(stats_shape, device=device) model.train() self.state.global_step = 0 self.state.episode = 0 self.state.max_steps = args.num_total_batches * args.num_mini_batches self.state.num_train_epochs = args.total_episodes / self.train_dataset_len if args.logging_steps is not None: if args.logging_steps < 1: self.state.logging_steps = math.ceil(self.state.max_steps * args.logging_steps) else: self.state.logging_steps = args.logging_steps if args.eval_steps is not None: if args.eval_steps < 1: self.state.eval_steps = math.ceil(self.state.max_steps * args.eval_steps) else: self.state.eval_steps = args.eval_steps if args.save_steps is not None: if args.save_steps < 1: self.state.save_steps = math.ceil(self.state.max_steps * args.save_steps) else: self.state.save_steps = args.save_steps self.control = self.callback_handler.on_train_begin(args, self.state, self.control) for update in range(1, args.num_total_batches + 1): self.state.episode += 1 * args.batch_size data = next(iter_dataloader) with torch.no_grad(): queries = data['input_ids'].to(device) context_length = queries.shape[1] responses = [] postprocessed_responses = [] logprobs = [] ref_logprobs = [] scores = [] sequence_lengths = [] values = [] with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model: (query_responses, logitss) = batch_generation(unwrapped_model.policy, queries, args.local_rollout_forward_batch_size, tokenizer.pad_token_id, generation_config) for i in range(0, queries.shape[0], args.local_rollout_forward_batch_size): query = queries[i:i + args.local_rollout_forward_batch_size] query_response = query_responses[i:i + args.local_rollout_forward_batch_size] response = query_response[:, context_length:] logits = logitss[i:i + args.local_rollout_forward_batch_size] all_logprob = F.log_softmax(logits, dim=-1) logprob = torch.gather(all_logprob, 2, response.unsqueeze(-1)).squeeze(-1) del logits, all_logprob torch.cuda.empty_cache() ref_output = forward(ref_policy, query_response, tokenizer.pad_token_id) ref_logits = ref_output.logits[:, context_length - 1:-1] ref_logits /= args.temperature + 1e-07 ref_all_logprob = F.log_softmax(ref_logits, dim=-1) ref_logprob = torch.gather(ref_all_logprob, 2, response.unsqueeze(-1)).squeeze(-1) del ref_output, ref_logits, ref_all_logprob torch.cuda.empty_cache() postprocessed_response = response if args.stop_token_id is not None: postprocessed_response = truncate_response(args.stop_token_id, tokenizer.pad_token_id, response) postprocessed_query_response = torch.cat((query, postprocessed_response), 1) sequence_length = first_true_indices(postprocessed_response == tokenizer.pad_token_id) - 1 unwrapped_value_model = accelerator.unwrap_model(model).value_model (full_value, _, _) = get_reward(unwrapped_value_model, query_response, tokenizer.pad_token_id, context_length) value = full_value[:, context_length - 1:-1].squeeze(-1) (_, score, _) = get_reward(reward_model, postprocessed_query_response, tokenizer.pad_token_id, context_length) responses.append(response) postprocessed_responses.append(postprocessed_response) logprobs.append(logprob) ref_logprobs.append(ref_logprob) sequence_lengths.append(sequence_length) scores.append(score) values.append(value) responses = torch.cat(responses, 0) postprocessed_responses = torch.cat(postprocessed_responses, 0) logprobs = torch.cat(logprobs, 0) ref_logprobs = torch.cat(ref_logprobs, 0) sequence_lengths = torch.cat(sequence_lengths, 0) scores = torch.cat(scores, 0) values = torch.cat(values, 0) del (logprob, ref_logprob, full_value, value, score, unwrapped_model) torch.cuda.empty_cache() gc.collect() contain_eos_token = torch.any(postprocessed_responses == self.tokenizer.eos_token_id, dim=-1) if self.args.missing_eos_penalty is not None: scores[~contain_eos_token] -= self.args.missing_eos_penalty response_idxs = torch.arange(responses.shape[1], device=responses.device).repeat(responses.shape[0], 1) padding_mask = response_idxs > sequence_lengths.unsqueeze(1) logprobs = torch.masked_fill(logprobs, padding_mask, INVALID_LOGPROB) ref_logprobs = torch.masked_fill(ref_logprobs, padding_mask, INVALID_LOGPROB) sequence_lengths_p1 = sequence_lengths + 1 padding_mask_p1 = response_idxs > sequence_lengths_p1.unsqueeze(1) values = torch.masked_fill(values, padding_mask_p1, 0) kl = logprobs - ref_logprobs non_score_reward = -args.kl_coef * kl rewards = non_score_reward.clone() actual_start = torch.arange(rewards.size(0), device=rewards.device) actual_end = torch.where(sequence_lengths_p1 < rewards.size(1), sequence_lengths_p1, sequence_lengths) rewards[[actual_start, actual_end]] += scores if args.whiten_rewards: rewards = masked_whiten(rewards, mask=~padding_mask_p1, shift_mean=False) rewards = torch.masked_fill(rewards, padding_mask_p1, 0) lastgaelam = 0 advantages_reversed = [] gen_length = responses.shape[1] for t in reversed(range(gen_length)): nextvalues = values[:, t + 1] if t < gen_length - 1 else 0.0 delta = rewards[:, t] + args.gamma * nextvalues - values[:, t] lastgaelam = delta + args.gamma * args.lam * lastgaelam advantages_reversed.append(lastgaelam) advantages = torch.stack(advantages_reversed[::-1], axis=1) returns = advantages + values advantages = masked_whiten(advantages, ~padding_mask) advantages = torch.masked_fill(advantages, padding_mask, 0) torch.cuda.empty_cache() for ppo_epoch_idx in range(args.num_ppo_epochs): b_inds = np.random.permutation(args.local_batch_size) minibatch_idx = 0 for mini_batch_start in range(0, args.local_batch_size, args.local_mini_batch_size): mini_batch_end = mini_batch_start + args.local_mini_batch_size mini_batch_inds = b_inds[mini_batch_start:mini_batch_end] gradient_accumulation_idx = 0 for micro_batch_start in range(0, args.local_mini_batch_size, args.per_device_train_batch_size): with accelerator.accumulate(model): micro_batch_end = micro_batch_start + args.per_device_train_batch_size micro_batch_inds = mini_batch_inds[micro_batch_start:micro_batch_end] mb_advantage = advantages[micro_batch_inds] mb_responses = responses[micro_batch_inds] mb_query_responses = query_responses[micro_batch_inds] mb_logprobs = logprobs[micro_batch_inds] mb_return = returns[micro_batch_inds] mb_values = values[micro_batch_inds] (output, vpred_temp) = forward(model, mb_query_responses, tokenizer.pad_token_id) logits = output.logits[:, context_length - 1:-1] logits /= args.temperature + 1e-07 new_all_logprobs = F.log_softmax(logits, dim=-1) new_logprobs = torch.gather(new_all_logprobs, 2, mb_responses.unsqueeze(-1)).squeeze(-1) new_logprobs = torch.masked_fill(new_logprobs, padding_mask[micro_batch_inds], INVALID_LOGPROB) vpred = vpred_temp[:, context_length - 1:-1].squeeze(-1) vpred = torch.masked_fill(vpred, padding_mask_p1[micro_batch_inds], 0) vpredclipped = torch.clamp(vpred, mb_values - args.cliprange_value, mb_values + args.cliprange_value) vf_losses1 = torch.square(vpred - mb_return) vf_losses2 = torch.square(vpredclipped - mb_return) vf_loss_max = torch.max(vf_losses1, vf_losses2) vf_loss = 0.5 * masked_mean(vf_loss_max, ~padding_mask_p1[micro_batch_inds]) vf_clipfrac = masked_mean((vf_losses2 > vf_losses1).float(), ~padding_mask_p1[micro_batch_inds]) logprobs_diff = new_logprobs - mb_logprobs ratio = torch.exp(logprobs_diff) pg_losses = -mb_advantage * ratio pg_losses2 = -mb_advantage * torch.clamp(ratio, 1.0 - args.cliprange, 1.0 + args.cliprange) pg_loss_max = torch.max(pg_losses, pg_losses2) pg_loss = masked_mean(pg_loss_max, ~padding_mask[micro_batch_inds]) loss = pg_loss + args.vf_coef * vf_loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() with torch.no_grad(): pg_clipfrac = masked_mean((pg_losses2 > pg_losses).float(), ~padding_mask[micro_batch_inds]) prob_dist = torch.nn.functional.softmax(logits, dim=-1) entropy = torch.logsumexp(logits, dim=-1) - torch.sum(prob_dist * logits, dim=-1) approxkl = 0.5 * (logprobs_diff ** 2).mean() approxkl_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = approxkl pg_clipfrac_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = pg_clipfrac pg_loss_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = pg_loss vf_loss_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = vf_loss vf_clipfrac_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = vf_clipfrac entropy_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = entropy.mean() ratio_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = ratio.mean() gradient_accumulation_idx += 1 minibatch_idx += 1 del (output, vpred_temp, logits, new_all_logprobs, new_logprobs, vpred, vpredclipped, vf_losses1, vf_losses2, vf_loss, vf_clipfrac, logprobs_diff, ratio, pg_losses, pg_losses2, pg_loss_max, pg_loss, loss, pg_clipfrac, prob_dist, entropy, approxkl, mb_return, mb_advantage, mb_values, mb_responses, mb_query_responses, mb_logprobs) torch.cuda.empty_cache() with torch.no_grad(): mean_kl = kl.sum(1).mean() mean_entropy = (-logprobs).sum(1).mean() mean_non_score_reward = non_score_reward.sum(1).mean() rlhf_reward = mean_non_score_reward + scores.mean() eps = int(self.state.episode / (time.time() - start_time)) metrics = {} metrics['eps'] = eps metrics['objective/kl'] = self.accelerator.gather(mean_kl).mean().item() metrics['objective/entropy'] = self.accelerator.gather(mean_entropy).mean().item() metrics['objective/non_score_reward'] = self.accelerator.gather(mean_non_score_reward).mean().item() metrics['objective/rlhf_reward'] = self.accelerator.gather(rlhf_reward).mean().item() metrics['objective/scores'] = self.accelerator.gather(scores.mean()).mean().item() metrics['policy/approxkl_avg'] = self.accelerator.gather(approxkl_stats).mean().item() metrics['policy/clipfrac_avg'] = self.accelerator.gather(pg_clipfrac_stats).mean().item() metrics['loss/policy_avg'] = self.accelerator.gather(pg_loss_stats).mean().item() metrics['loss/value_avg'] = self.accelerator.gather(vf_loss_stats).mean().item() metrics['val/clipfrac_avg'] = self.accelerator.gather(vf_clipfrac_stats).mean().item() metrics['policy/entropy_avg'] = self.accelerator.gather(entropy_stats).mean().item() metrics['val/ratio'] = self.accelerator.gather(ratio_stats).mean().item() metrics['val/ratio_var'] = self.accelerator.gather(ratio_stats).var().item() metrics['val/num_eos_tokens'] = (responses == tokenizer.eos_token_id).sum().item() metrics['lr'] = self.lr_scheduler.get_last_lr()[0] metrics['episode'] = self.state.episode self.state.epoch = self.state.episode / self.train_dataset_len self.state.global_step += 1 self.log(metrics) self.lr_scheduler.step() self.control = self.callback_handler.on_step_end(args, self.state, self.control) if self.control.should_save: self._save_checkpoint(model, trial=None, metrics=metrics) self.control = self.callback_handler.on_save(self.args, self.state, self.control) del kl, mean_kl, mean_entropy, mean_non_score_reward, scores, metrics, non_score_reward torch.cuda.empty_cache() gc.collect() if args.num_sample_generations > 0 and (update - 1) % self.sample_generations_freq == 0: self.generate_completions(sampling=True) torch.cuda.empty_cache() del (query_responses, responses, postprocessed_responses, logprobs, ref_logprobs, values, sequence_lengths, contain_eos_token, sequence_lengths_p1, response_idxs, padding_mask, padding_mask_p1, rewards, actual_start, actual_end, advantages, returns) torch.cuda.empty_cache() self.control = self.callback_handler.on_train_end(args, self.state, self.control) if self.control.should_save: self._save_checkpoint(model, trial=None, metrics=None) self.control = self.callback_handler.on_save(self.args, self.state, self.control) def generate_completions(self, sampling: bool=False): args = self.args tokenizer = self.tokenizer generation_config = GenerationConfig(max_new_tokens=self.args.response_length, temperature=0.01 + 1e-07, top_k=0.0, top_p=1.0, do_sample=True) table = defaultdict(list) with unwrap_model_for_generation(self.model, self.accelerator) as unwrapped_model: for batch in self.eval_dataloader: query = batch['input_ids'] with torch.no_grad(): context_length = query.shape[1] (query_response, _) = batch_generation(unwrapped_model.policy, query, query.shape[0], tokenizer.pad_token_id, generation_config) response = query_response[:, context_length:] postprocessed_response = response if args.stop_token_id is not None: postprocessed_response = truncate_response(args.stop_token_id, tokenizer.pad_token_id, response) table['query'].extend(gather_object(tokenizer.batch_decode(query, skip_special_tokens=True))) table['model response'].extend(gather_object(tokenizer.batch_decode(postprocessed_response))) postprocessed_query_response = torch.cat((query, postprocessed_response), 1) (_, score, _) = get_reward(self.reward_model, postprocessed_query_response, tokenizer.pad_token_id, context_length) table['score'].extend(self.accelerator.gather(score).float().cpu().numpy()) if sampling: break df = pd.DataFrame(table) if self.accelerator.is_main_process: print_rich_table(df.iloc[0:0 + 5]) if 'wandb' in args.report_to: import wandb if wandb.run is not None: wandb.log({'completions': wandb.Table(dataframe=df)}) @wraps(Trainer.push_to_hub) def push_to_hub(self, commit_message: Optional[str]='End of training', blocking: bool=True, **kwargs) -> str: kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs) # File: trl-main/trl/trainer/reward_config.py from dataclasses import dataclass from typing import Optional from transformers import TrainingArguments @dataclass class RewardConfig(TrainingArguments): max_length: Optional[int] = None dataset_num_proc: Optional[int] = None center_rewards_coefficient: Optional[float] = None # File: trl-main/trl/trainer/reward_trainer.py import inspect import warnings from collections import defaultdict from dataclasses import FrozenInstanceError, replace from functools import wraps from typing import Any, Callable, Dict, List, Optional, Tuple, Union import pandas as pd import torch import torch.nn as nn from accelerate.utils import gather_object from datasets import Dataset from transformers import DataCollator, PreTrainedModel, PreTrainedTokenizerBase, Trainer, TrainingArguments from transformers.trainer_callback import TrainerCallback from transformers.trainer_pt_utils import nested_detach from transformers.trainer_utils import EvalPrediction from ..import_utils import is_peft_available from .reward_config import RewardConfig from .utils import RewardDataCollatorWithPadding, compute_accuracy, decode_and_strip_padding, print_rich_table, trl_sanitze_kwargs_for_tagging if is_peft_available(): from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training class RewardTrainer(Trainer): _tag_names = ['trl', 'reward-trainer'] def __init__(self, model: Optional[Union[PreTrainedModel, nn.Module]]=None, args: Optional[RewardConfig]=None, data_collator: Optional[DataCollator]=None, train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional[PreTrainedTokenizerBase]=None, model_init: Optional[Callable[[], PreTrainedModel]]=None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]]=None, callbacks: Optional[List[TrainerCallback]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None, max_length: Optional[int]=None, peft_config: Optional[Dict]=None): if type(args) is TrainingArguments: warnings.warn('Using `transformers.TrainingArguments` for `args` is deprecated and will be removed in a future version. Please use `RewardConfig` instead.', FutureWarning) if max_length is not None: warnings.warn('The `max_length` argument is deprecated and will be removed in a future version. Please use the `RewardConfig` to set `max_length` instead.', FutureWarning) else: if max_length is not None and args.max_length is not None: raise ValueError('You cannot specify both `max_length` and `args.max_length`. Please use the `RewardConfig` to set `max_length` once.') if max_length is not None and args.max_length is None: warnings.warn('The `max_length` argument is deprecated and will be removed in a future version. Please use the `RewardConfig` to set `max_length` instead.', FutureWarning) if not is_peft_available() and peft_config is not None: raise ValueError("PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models") elif is_peft_available() and peft_config is not None: if not isinstance(model, PeftModel): if getattr(model, 'is_loaded_in_8bit', False) or getattr(model, 'is_quantized', False): _supports_gc_kwargs = 'gradient_checkpointing_kwargs' in list(inspect.signature(prepare_model_for_kbit_training).parameters) prepare_model_kwargs = {'use_gradient_checkpointing': args.gradient_checkpointing} if not _supports_gc_kwargs and args.gradient_checkpointing_kwargs is not None: warnings.warn("You passed `gradient_checkpointing_kwargs` in the trainer's kwargs, but your peft version does not support it. please update to the latest version of peft to use `gradient_checkpointing_kwargs`.") elif _supports_gc_kwargs and args.gradient_checkpointing_kwargs is not None: prepare_model_kwargs['gradient_checkpointing_kwargs'] = args.gradient_checkpointing_kwargs model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) model = get_peft_model(model, peft_config) if compute_metrics is None: compute_metrics = compute_accuracy if data_collator is None: if tokenizer is None: raise ValueError('max_length or a tokenizer must be specified when using the default RewardDataCollatorWithPadding') if type(args) is TrainingArguments: if max_length is None: warnings.warn('When using RewardDataCollatorWithPadding, you should set `max_length` in RewardConfig. It will be set to `512` by default, but you should do it yourself in the future.', UserWarning) max_length = 512 else: if max_length is None and args.max_length is None: warnings.warn('When using RewardDataCollatorWithPadding, you should set `max_length` in RewardConfig. It will be set to `512` by default, but you should do it yourself in the future.', UserWarning) max_length = 512 if max_length is None and args.max_length is not None: max_length = args.max_length data_collator = RewardDataCollatorWithPadding(tokenizer, max_length=max_length) if args.remove_unused_columns: try: args.remove_unused_columns = False except FrozenInstanceError: args = replace(args, remove_unused_columns=False) warnings.warn('When using RewardDataCollatorWithPadding, you should set `remove_unused_columns=False` in your RewardConfig we have set it for you, but you should do it yourself in the future.', UserWarning) self.use_reward_data_collator = True else: self.use_reward_data_collator = False super().__init__(model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics) if hasattr(self.model, 'add_model_tags'): self.model.add_model_tags(self._tag_names) def compute_loss(self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], return_outputs=False) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]: if not self.use_reward_data_collator: warnings.warn('The current compute_loss is implemented for RewardDataCollatorWithPadding, if you are using a custom data collator make sure you know what you are doing or implement your own compute_loss method.') rewards_chosen = model(input_ids=inputs['input_ids_chosen'], attention_mask=inputs['attention_mask_chosen'], return_dict=True)['logits'] rewards_rejected = model(input_ids=inputs['input_ids_rejected'], attention_mask=inputs['attention_mask_rejected'], return_dict=True)['logits'] if 'margin' in inputs: loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected - inputs['margin']).mean() else: loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected).mean() if self.args.center_rewards_coefficient is not None: loss += self.args.center_rewards_coefficient * torch.mean((rewards_chosen + rewards_rejected) ** 2) if return_outputs: return (loss, {'rewards_chosen': rewards_chosen, 'rewards_rejected': rewards_rejected}) return loss def prediction_step(self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]]=None) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: inputs = self._prepare_inputs(inputs) if ignore_keys is None: if hasattr(self.model, 'config'): ignore_keys = getattr(self.model.config, 'keys_to_ignore_at_inference', []) else: ignore_keys = [] with torch.no_grad(): (loss, logits_dict) = self.compute_loss(model, inputs, return_outputs=True) if prediction_loss_only: return (loss, None, None) loss = loss.detach() logits = tuple((v for (k, v) in logits_dict.items() if k not in ignore_keys)) logits = nested_detach(logits) logits = torch.stack(logits).mean(dim=2).softmax(dim=0).T labels = torch.zeros(logits.shape[0]) labels = self._prepare_inputs(labels) return (loss, logits, labels) def evaluate(self, *args, **kwargs): num_print_samples = kwargs.pop('num_print_samples', 4) self.visualize_samples(num_print_samples) return super().evaluate(*args, **kwargs) def visualize_samples(self, num_print_samples: int): eval_dataloader = self.get_eval_dataloader() table = defaultdict(list) for (_, inputs) in enumerate(eval_dataloader): (_, logits, _) = self.prediction_step(self.model, inputs, prediction_loss_only=False) chosen_text = decode_and_strip_padding(inputs['input_ids_chosen'], self.tokenizer) rejected_text = decode_and_strip_padding(inputs['input_ids_rejected'], self.tokenizer) table['chosen_text'].extend(gather_object(chosen_text)) table['rejected_text'].extend(gather_object(rejected_text)) table['logits'].extend(gather_object([[round(inner_item, 4) for inner_item in item] for item in logits.tolist()])) if num_print_samples >= 0 and len(table['chosen_text']) >= num_print_samples: break df = pd.DataFrame(table) if self.accelerator.process_index == 0: print_rich_table(df[:num_print_samples]) if 'wandb' in self.args.report_to: import wandb if wandb.run is not None: wandb.log({'completions': wandb.Table(dataframe=df)}) @wraps(Trainer.push_to_hub) def push_to_hub(self, commit_message: Optional[str]='End of training', blocking: bool=True, **kwargs) -> str: kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs) # File: trl-main/trl/trainer/rloo_config.py import os from dataclasses import dataclass from ..trainer.utils import OnPolicyConfig @dataclass class RLOOConfig(OnPolicyConfig): exp_name: str = os.path.basename(__file__)[:-len('.py')] reward_model_path: str = 'EleutherAI/pythia-160m' num_ppo_epochs: int = 4 whiten_rewards: bool = False kl_coef: float = 0.05 cliprange: float = 0.2 rloo_k: int = 2 # File: trl-main/trl/trainer/rloo_trainer.py import gc import math import os import time from collections import defaultdict from functools import wraps from typing import Dict, List, Optional, Tuple, Union import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from accelerate import Accelerator from accelerate.utils import broadcast, gather_object from datasets import Dataset from torch.utils.data import DataLoader from transformers import DataCollatorWithPadding, GenerationConfig, PreTrainedTokenizer, Trainer, TrainerCallback, TrainerControl from transformers.integrations import get_reporting_integration_callbacks from transformers.trainer import DEFAULT_CALLBACKS, DEFAULT_PROGRESS_CALLBACK from transformers.trainer_callback import CallbackHandler, PrinterCallback from ..models.utils import unwrap_model_for_generation from ..trainer.utils import OnlineTrainerState, batch_generation, disable_dropout_in_model, exact_div, first_true_indices, forward, get_reward, prepare_deepspeed, print_rich_table, truncate_response from .rloo_config import RLOOConfig from .utils import trl_sanitze_kwargs_for_tagging INVALID_LOGPROB = 1.0 class RLOOTrainer(Trainer): _tag_names = ['trl', 'rloo'] def __init__(self, config: RLOOConfig, tokenizer: PreTrainedTokenizer, policy: nn.Module, ref_policy: nn.Module, reward_model: nn.Module, train_dataset: Dataset, data_collator: Optional[DataCollatorWithPadding]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), callbacks: Optional[List[TrainerCallback]]=None) -> None: self.args = config args = config self.tokenizer = tokenizer self.policy = policy self.policy.generation_config.eos_token_id = None self.policy.generation_config.pad_token_id = None self.ref_policy = ref_policy self.reward_model = reward_model self.train_dataset = train_dataset self.train_dataset_len = len(train_dataset) self.data_collator = data_collator self.eval_dataset = eval_dataset (self.optimizer, self.lr_scheduler) = optimizers if args.total_episodes is None: args.total_episodes = int(args.num_train_epochs * self.train_dataset_len) accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps) self.accelerator = accelerator args.world_size = accelerator.num_processes args.local_batch_size = args.per_device_train_batch_size * args.gradient_accumulation_steps * args.num_mini_batches args.micro_batch_size = int(args.per_device_train_batch_size * args.world_size) args.batch_size = int(args.local_batch_size * args.world_size) args.mini_batch_size = exact_div(args.batch_size, args.num_mini_batches, '`batch_size` must be a multiple of `num_mini_batches`') args.local_mini_batch_size = exact_div(args.local_batch_size, args.num_mini_batches, '`local_batch_size` must be a multiple of `num_mini_batches`') args.num_total_batches = math.ceil(args.total_episodes / args.batch_size) time_tensor = torch.tensor(int(time.time()), device=accelerator.device) time_int = broadcast(time_tensor, 0).item() args.run_name = f'{args.exp_name}__{args.seed}__{time_int}' self.local_seed = args.seed + accelerator.process_index * 100003 if args.num_sample_generations > 0: self.sample_generations_freq = max(1, args.num_total_batches // args.num_sample_generations) self.local_dataloader_batch_size = exact_div(args.local_batch_size, args.rloo_k, '`local_batch_size` must be a multiple of rloo_k') for module in [policy, ref_policy, reward_model]: disable_dropout_in_model(module) if args.stop_token and args.stop_token == 'eos': args.stop_token_id = tokenizer.eos_token_id self.model = policy self.create_optimizer_and_scheduler(num_training_steps=args.num_total_batches) self.state = OnlineTrainerState(is_local_process_zero=self.is_local_process_zero(), is_world_process_zero=self.is_world_process_zero()) default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) self.callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks self.callback_handler = CallbackHandler(self.callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler) self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) self.control = TrainerControl() self.current_flos = 0 self.hp_search_backend = None self.is_deepspeed_enabled = getattr(self.accelerator.state, 'deepspeed_plugin', None) is not None self.is_fsdp_enabled = getattr(self.accelerator.state, 'fsdp_plugin', None) is not None self.hub_model_id = None if self.args.push_to_hub: self.init_hf_repo() if self.args.should_save: os.makedirs(self.args.output_dir, exist_ok=True) self.backup_model = None self.dataloader = DataLoader(self.train_dataset, batch_size=self.local_dataloader_batch_size, shuffle=True, collate_fn=DataCollatorWithPadding(tokenizer), drop_last=True) torch.manual_seed(args.seed) (self.model, self.optimizer, self.dataloader) = accelerator.prepare(self.model, self.optimizer, self.dataloader) torch.manual_seed(self.local_seed) self.eval_dataloader = DataLoader(self.eval_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=DataCollatorWithPadding(self.tokenizer), drop_last=True) self.eval_dataloader = accelerator.prepare(self.eval_dataloader) if self.is_deepspeed_enabled: self.reward_model = prepare_deepspeed(self.reward_model, args.per_device_train_batch_size, args.fp16, args.bf16) self.ref_policy = prepare_deepspeed(self.ref_policy, args.per_device_train_batch_size, args.fp16, args.bf16) self.deepspeed = self.model else: self.ref_policy = self.ref_policy.to(self.accelerator.device) self.reward_model = self.reward_model.to(self.accelerator.device) def get_train_dataloader(self) -> DataLoader: return self.dataloader def get_eval_dataloader(self) -> DataLoader: return self.eval_dataloader def train(self): args = self.args accelerator = self.accelerator optimizer = self.optimizer model = self.model self.model_wrapped = self.model ref_policy = self.ref_policy reward_model = self.reward_model tokenizer = self.tokenizer dataloader = self.dataloader device = accelerator.device def repeat_generator(): while True: yield from dataloader iter_dataloader = iter(repeat_generator()) generation_config = GenerationConfig(max_new_tokens=args.response_length, temperature=args.temperature + 1e-07, top_k=0.0, top_p=1.0, do_sample=True) accelerator.print('===training policy===') start_time = time.time() stats_shape = (args.num_ppo_epochs, args.num_mini_batches, args.gradient_accumulation_steps) approxkl_stats = torch.zeros(stats_shape, device=device) pg_clipfrac_stats = torch.zeros(stats_shape, device=device) pg_loss_stats = torch.zeros(stats_shape, device=device) vf_loss_stats = torch.zeros(stats_shape, device=device) vf_clipfrac_stats = torch.zeros(stats_shape, device=device) entropy_stats = torch.zeros(stats_shape, device=device) ratio_stats = torch.zeros(stats_shape, device=device) model.train() self.state.global_step = 0 self.state.episode = 0 self.state.max_steps = args.num_total_batches * args.num_mini_batches self.state.num_train_epochs = args.total_episodes / self.train_dataset_len if args.logging_steps is not None: if args.logging_steps < 1: self.state.logging_steps = math.ceil(self.state.max_steps * args.logging_steps) else: self.state.logging_steps = args.logging_steps if args.eval_steps is not None: if args.eval_steps < 1: self.state.eval_steps = math.ceil(self.state.max_steps * args.eval_steps) else: self.state.eval_steps = args.eval_steps if args.save_steps is not None: if args.save_steps < 1: self.state.save_steps = math.ceil(self.state.max_steps * args.save_steps) else: self.state.save_steps = args.save_steps self.control = self.callback_handler.on_train_begin(args, self.state, self.control) for update in range(1, args.num_total_batches + 1): self.state.episode += 1 * args.batch_size data = next(iter_dataloader) with torch.no_grad(): queries = data['input_ids'].to(device) queries = queries.repeat(args.rloo_k, 1) context_length = queries.shape[1] query_responses = [] responses = [] postprocessed_responses = [] logprobs = [] ref_logprobs = [] scores = [] sequence_lengths = [] with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model: (query_responses, logitss) = batch_generation(unwrapped_model, queries, args.local_rollout_forward_batch_size, tokenizer.pad_token_id, generation_config) for i in range(0, queries.shape[0], args.local_rollout_forward_batch_size): query = queries[i:i + args.local_rollout_forward_batch_size] query_response = query_responses[i:i + args.local_rollout_forward_batch_size] response = query_response[:, context_length:] logits = logitss[i:i + args.local_rollout_forward_batch_size] all_logprob = F.log_softmax(logits, dim=-1) logprob = torch.gather(all_logprob, 2, response.unsqueeze(-1)).squeeze(-1) del logits, all_logprob torch.cuda.empty_cache() ref_output = forward(ref_policy, query_response, tokenizer.pad_token_id) ref_logits = ref_output.logits[:, context_length - 1:-1] ref_logits /= args.temperature + 1e-07 ref_all_logprob = F.log_softmax(ref_logits, dim=-1) ref_logprob = torch.gather(ref_all_logprob, 2, response.unsqueeze(-1)).squeeze(-1) del ref_output, ref_logits, ref_all_logprob torch.cuda.empty_cache() postprocessed_response = response if args.stop_token_id is not None: postprocessed_response = truncate_response(args.stop_token_id, tokenizer.pad_token_id, response) postprocessed_query_response = torch.cat((query, postprocessed_response), 1) sequence_length = first_true_indices(postprocessed_response == tokenizer.pad_token_id) - 1 (_, score, _) = get_reward(reward_model, postprocessed_query_response, tokenizer.pad_token_id, context_length) responses.append(response) postprocessed_responses.append(postprocessed_response) logprobs.append(logprob) ref_logprobs.append(ref_logprob) sequence_lengths.append(sequence_length) scores.append(score) responses = torch.cat(responses, 0) postprocessed_responses = torch.cat(postprocessed_responses, 0) logprobs = torch.cat(logprobs, 0) ref_logprobs = torch.cat(ref_logprobs, 0) sequence_lengths = torch.cat(sequence_lengths, 0) scores = torch.cat(scores, 0) del (logprob, ref_logprob, score) torch.cuda.empty_cache() gc.collect() contain_eos_token = torch.any(postprocessed_responses == tokenizer.eos_token_id, dim=-1) if args.missing_eos_penalty is not None: scores[~contain_eos_token] -= self.args.missing_eos_penalty response_idxs = torch.arange(responses.shape[1], device=responses.device).repeat(responses.shape[0], 1) padding_mask = response_idxs > sequence_lengths.unsqueeze(1) logprobs = torch.masked_fill(logprobs, padding_mask, INVALID_LOGPROB) ref_logprobs = torch.masked_fill(ref_logprobs, padding_mask, INVALID_LOGPROB) kl = logprobs - ref_logprobs non_score_reward = (-args.kl_coef * kl).sum(1) rlhf_reward = scores + non_score_reward rlhf_reward = rlhf_reward.reshape(args.rloo_k, -1) baseline = (rlhf_reward.sum(0) - rlhf_reward) / (args.rloo_k - 1) advantages = rlhf_reward - baseline advantages = advantages.flatten() torch.cuda.empty_cache() for ppo_epoch_idx in range(args.num_ppo_epochs): b_inds = np.random.permutation(args.local_batch_size) minibatch_idx = 0 for mini_batch_start in range(0, args.local_batch_size, args.local_mini_batch_size): mini_batch_end = mini_batch_start + args.local_mini_batch_size mini_batch_inds = b_inds[mini_batch_start:mini_batch_end] gradient_accumulation_idx = 0 for micro_batch_start in range(0, args.local_mini_batch_size, args.per_device_train_batch_size): with accelerator.accumulate(model): micro_batch_end = micro_batch_start + args.per_device_train_batch_size micro_batch_inds = mini_batch_inds[micro_batch_start:micro_batch_end] mb_advantage = advantages[micro_batch_inds] mb_responses = responses[micro_batch_inds] mb_query_responses = query_responses[micro_batch_inds] mb_logprobs = logprobs[micro_batch_inds] output = forward(model, mb_query_responses, tokenizer.pad_token_id) logits = output.logits[:, context_length - 1:-1] logits /= args.temperature + 1e-07 new_all_logprobs = F.log_softmax(logits, dim=-1) new_logprobs = torch.gather(new_all_logprobs, 2, mb_responses.unsqueeze(-1)).squeeze(-1) new_logprobs = torch.masked_fill(new_logprobs, padding_mask[micro_batch_inds], INVALID_LOGPROB) new_ratio = (new_logprobs - mb_logprobs).exp() new_logprobs = new_logprobs.sum(1) mb_logprobs = mb_logprobs.sum(1) logprobs_diff = new_logprobs - mb_logprobs ratio = torch.exp(logprobs_diff) pg_losses = -mb_advantage * ratio pg_losses2 = -mb_advantage * torch.clamp(ratio, 1.0 - args.cliprange, 1.0 + args.cliprange) pg_loss_max = torch.max(pg_losses, pg_losses2) pg_loss = pg_loss_max.mean() loss = pg_loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() with torch.no_grad(): pg_clipfrac = (pg_losses2 > pg_losses).float().mean() prob_dist = torch.nn.functional.softmax(logits, dim=-1) entropy = torch.logsumexp(logits, dim=-1) - torch.sum(prob_dist * logits, dim=-1) approxkl = 0.5 * (logprobs_diff ** 2).mean() approxkl_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = approxkl pg_clipfrac_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = pg_clipfrac pg_loss_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = pg_loss entropy_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = entropy.mean() ratio_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = new_ratio.mean() gradient_accumulation_idx += 1 minibatch_idx += 1 self.state.global_step += 1 del (output, logits, new_all_logprobs, new_logprobs, logprobs_diff, ratio, pg_losses, pg_losses2, pg_loss, loss, pg_clipfrac, prob_dist, entropy, approxkl, mb_advantage, mb_responses, mb_query_responses, mb_logprobs) torch.cuda.empty_cache() with torch.no_grad(): mean_kl = kl.sum(1).mean() mean_entropy = (-logprobs).sum(1).mean() mean_non_score_reward = non_score_reward.mean() eps = int(self.state.episode / (time.time() - start_time)) metrics = {} metrics['eps'] = eps metrics['objective/kl'] = self.accelerator.gather(mean_kl).mean().item() metrics['objective/entropy'] = self.accelerator.gather(mean_entropy).mean().item() metrics['objective/non_score_reward'] = self.accelerator.gather(mean_non_score_reward).mean().item() metrics['objective/rlhf_reward'] = self.accelerator.gather(rlhf_reward).mean().item() metrics['objective/scores'] = self.accelerator.gather(scores.mean()).mean().item() metrics['policy/approxkl_avg'] = self.accelerator.gather(approxkl_stats).mean().item() metrics['policy/clipfrac_avg'] = self.accelerator.gather(pg_clipfrac_stats).mean().item() metrics['loss/policy_avg'] = self.accelerator.gather(pg_loss_stats).mean().item() metrics['loss/value_avg'] = self.accelerator.gather(vf_loss_stats).mean().item() metrics['val/clipfrac_avg'] = self.accelerator.gather(vf_clipfrac_stats).mean().item() metrics['policy/entropy_avg'] = self.accelerator.gather(entropy_stats).mean().item() metrics['val/ratio'] = self.accelerator.gather(ratio_stats).mean().item() metrics['val/ratio_var'] = self.accelerator.gather(ratio_stats).var().item() metrics['val/num_eos_tokens'] = (responses == tokenizer.eos_token_id).sum().item() metrics['lr'] = self.lr_scheduler.get_last_lr()[0] metrics['episode'] = self.state.episode self.state.epoch = self.state.episode / self.train_dataset_len self.state.global_step += 1 self.log(metrics) del kl, mean_kl, mean_entropy, scores self.lr_scheduler.step() self.control = self.callback_handler.on_step_end(args, self.state, self.control) if self.control.should_save: self._save_checkpoint(model, trial=None, metrics=metrics) self.control = self.callback_handler.on_save(self.args, self.state, self.control) torch.cuda.empty_cache() gc.collect() if args.num_sample_generations > 0 and (update - 1) % self.sample_generations_freq == 0: self.generate_completions(sampling=True) self.control = self.callback_handler.on_train_end(args, self.state, self.control) if self.control.should_save: self._save_checkpoint(model, trial=None, metrics=None) self.control = self.callback_handler.on_save(self.args, self.state, self.control) def generate_completions(self, sampling: bool=False): args = self.args tokenizer = self.tokenizer generation_config = GenerationConfig(max_new_tokens=self.args.response_length, temperature=0.01 + 1e-07, top_k=0.0, top_p=1.0, do_sample=True) table = defaultdict(list) with unwrap_model_for_generation(self.model, self.accelerator) as unwrapped_model: for batch in self.eval_dataloader: query = batch['input_ids'] with torch.no_grad(): context_length = query.shape[1] (query_response, _) = batch_generation(unwrapped_model, query, query.shape[0], tokenizer.pad_token_id, generation_config) response = query_response[:, context_length:] postprocessed_response = response if args.stop_token_id is not None: postprocessed_response = truncate_response(args.stop_token_id, tokenizer.pad_token_id, response) table['query'].extend(gather_object(tokenizer.batch_decode(query, skip_special_tokens=True))) table['model response'].extend(gather_object(tokenizer.batch_decode(postprocessed_response))) postprocessed_query_response = torch.cat((query, postprocessed_response), 1) (_, score, _) = get_reward(self.reward_model, postprocessed_query_response, tokenizer.pad_token_id, context_length) table['score'].extend(self.accelerator.gather(score).float().cpu().numpy()) if sampling: break df = pd.DataFrame(table) if self.accelerator.is_main_process: print_rich_table(df.iloc[0:0 + 5]) if 'wandb' in args.report_to: import wandb if wandb.run is not None: wandb.log({'completions': wandb.Table(dataframe=df)}) @wraps(Trainer.push_to_hub) def push_to_hub(self, commit_message: Optional[str]='End of training', blocking: bool=True, **kwargs) -> str: kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs) # File: trl-main/trl/trainer/sft_config.py from dataclasses import dataclass from typing import Any, Dict, Optional from transformers import TrainingArguments @dataclass class SFTConfig(TrainingArguments): dataset_text_field: Optional[str] = None packing: bool = False max_seq_length: Optional[int] = None dataset_num_proc: Optional[int] = None dataset_batch_size: int = 1000 neftune_noise_alpha: Optional[float] = None model_init_kwargs: Optional[Dict[str, Any]] = None dataset_kwargs: Optional[Dict[str, Any]] = None eval_packing: Optional[bool] = None num_of_sequences: int = 1024 chars_per_token: float = 3.6 use_liger: bool = False # File: trl-main/trl/trainer/sft_trainer.py import dataclasses import inspect import warnings from functools import wraps from typing import Callable, Dict, List, Optional, Tuple, Union import datasets import torch import torch.nn as nn from accelerate.state import PartialState from datasets import Dataset from datasets.arrow_writer import SchemaInferenceError from datasets.builder import DatasetGenerationError from huggingface_hub.utils._deprecation import _deprecate_arguments from transformers import AutoModelForCausalLM, AutoTokenizer, DataCollator, DataCollatorForLanguageModeling, PreTrainedModel, PreTrainedTokenizerBase, Trainer from transformers.modeling_utils import unwrap_model from transformers.trainer_callback import TrainerCallback from transformers.trainer_utils import EvalPrediction from ..extras.dataset_formatting import get_formatting_func_from_dataset from ..import_utils import is_liger_available, is_peft_available from .sft_config import SFTConfig from .utils import ConstantLengthDataset, DataCollatorForCompletionOnlyLM, neftune_post_forward_hook, peft_module_casting_to_bf16, trl_sanitze_kwargs_for_tagging if is_peft_available(): from peft import PeftConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training if is_liger_available(): from liger_kernel.transformers import AutoLigerKernelForCausalLM class SFTTrainer(Trainer): _tag_names = ['trl', 'sft'] @_deprecate_arguments(version='1.0.0', deprecated_args=['dataset_text_field', 'packing', 'max_seq_length', 'dataset_num_proc', 'dataset_batch_size', 'neftune_noise_alpha', 'model_init_kwargs', 'dataset_kwargs', 'eval_packing', 'num_of_sequences', 'chars_per_token'], custom_message='Deprecated positional argument(s) used in SFTTrainer, please use the SFTConfig to set these arguments instead.') def __init__(self, model: Optional[Union[PreTrainedModel, nn.Module, str]]=None, args: Optional[SFTConfig]=None, data_collator: Optional[DataCollator]=None, train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional[PreTrainedTokenizerBase]=None, model_init: Optional[Callable[[], PreTrainedModel]]=None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]]=None, callbacks: Optional[List[TrainerCallback]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None, peft_config: Optional['PeftConfig']=None, dataset_text_field: Optional[str]=None, packing: Optional[bool]=False, formatting_func: Optional[Callable]=None, max_seq_length: Optional[int]=None, infinite: Optional[bool]=None, num_of_sequences: Optional[int]=None, chars_per_token: Optional[float]=None, dataset_num_proc: Optional[int]=None, dataset_batch_size: Optional[int]=None, neftune_noise_alpha: Optional[float]=None, model_init_kwargs: Optional[Dict]=None, dataset_kwargs: Optional[Dict]=None, eval_packing: Optional[bool]=None): if args is None: output_dir = 'tmp_trainer' warnings.warn(f'No `SFTConfig` passed, using `output_dir={output_dir}`.') args = SFTConfig(output_dir=output_dir) elif args is not None and args.__class__.__name__ == 'TrainingArguments': args_as_dict = args.to_dict() args_as_dict.update({k: getattr(args, k) for k in args_as_dict.keys() if k.endswith('_token')}) args = SFTConfig(**args_as_dict) if model_init_kwargs is not None: warnings.warn('You passed `model_init_kwargs` to the SFTTrainer, the value you passed will override the one in the `SFTConfig`.') args.model_init_kwargs = model_init_kwargs if getattr(args, 'model_init_kwargs', None) is None: model_init_kwargs = {} elif not isinstance(model, str): raise ValueError('You passed model_init_kwargs to the SFTConfig, but your model is already instantiated.') else: model_init_kwargs = args.model_init_kwargs torch_dtype = model_init_kwargs.get('torch_dtype') if torch_dtype is not None: if isinstance(torch_dtype, str) and torch_dtype != 'auto': torch_dtype = getattr(torch, torch_dtype) if torch_dtype != 'auto' and (not isinstance(torch_dtype, torch.dtype)): raise ValueError(f"Invalid `torch_dtype` passed to the SFTConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}.") model_init_kwargs['torch_dtype'] = torch_dtype if infinite is not None: warnings.warn('The `infinite` argument is deprecated and will be removed in a future version of TRL. Use `TrainingArguments.max_steps` or `TrainingArguments.num_train_epochs` instead to control training length.') if isinstance(model, str): warnings.warn('You passed a model_id to the SFTTrainer. This will automatically create an `AutoModelForCausalLM` or a `PeftModel` (if you passed a `peft_config`) for you.') if args.use_liger: model = AutoLigerKernelForCausalLM.from_pretrained(model, **model_init_kwargs) else: model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs) if packing: warnings.warn('You passed a `packing` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`.') args.packing = packing if eval_packing is not None: warnings.warn('You passed a `eval_packing` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`.') args.eval_packing = eval_packing if args.packing and data_collator is not None and isinstance(data_collator, DataCollatorForCompletionOnlyLM): raise ValueError('You passed a `DataCollatorForCompletionOnlyLM` to the SFTTrainer. This is not compatible with the `packing` argument.') if is_peft_available() and peft_config is not None: if not isinstance(peft_config, PeftConfig): raise ValueError(f'If you want to use the PeftModel, you need to pass a PeftConfig object to the SFTTrainer. and you passed a {type(peft_config)}.') if not isinstance(model, PeftModel): _support_gc_kwargs = hasattr(args, 'gradient_checkpointing_kwargs') and 'gradient_checkpointing_kwargs' in list(inspect.signature(prepare_model_for_kbit_training).parameters) gradient_checkpointing_kwargs = getattr(args, 'gradient_checkpointing_kwargs', None) or {} is_sharded_qlora = False if getattr(model, 'is_loaded_in_4bit', False): for (_, param) in model.named_parameters(): if param.__class__.__name__ == 'Params4bit': is_sharded_qlora = param.data.device.type == 'cpu' break if getattr(model, 'is_loaded_in_8bit', False) or (getattr(model, 'is_loaded_in_4bit', False) and (not is_sharded_qlora)): prepare_model_kwargs = {'use_gradient_checkpointing': getattr(args, 'gradient_checkpointing', False)} if _support_gc_kwargs: prepare_model_kwargs['gradient_checkpointing_kwargs'] = gradient_checkpointing_kwargs model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) if args is not None: args = dataclasses.replace(args, gradient_checkpointing=False) elif getattr(args, 'gradient_checkpointing', False) and ('use_reentrant' not in gradient_checkpointing_kwargs or gradient_checkpointing_kwargs['use_reentrant']): if hasattr(model, 'enable_input_require_grads'): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) if 'autocast_adapter_dtype' in list(inspect.signature(get_peft_model).parameters) and getattr(model, 'is_loaded_in_4bit', False) and is_sharded_qlora: model = get_peft_model(model, peft_config, autocast_adapter_dtype=False) else: model = get_peft_model(model, peft_config) if args is not None and args.bf16 and getattr(model, 'is_loaded_in_4bit', False) and (not is_sharded_qlora): peft_module_casting_to_bf16(model) if tokenizer is None: tokenizer = AutoTokenizer.from_pretrained(model.config._name_or_path) if getattr(tokenizer, 'pad_token', None) is None: tokenizer.pad_token = tokenizer.eos_token if max_seq_length is not None: warnings.warn('You passed a `max_seq_length` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`.') args.max_seq_length = max_seq_length if args.max_seq_length is None: args.max_seq_length = min(tokenizer.model_max_length, 1024) warnings.warn(f"You didn't pass a `max_seq_length` argument to the SFTTrainer, this will default to {args.max_seq_length}") if dataset_num_proc is not None: warnings.warn('You passed a `dataset_num_proc` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`.') args.dataset_num_proc = dataset_num_proc self.dataset_num_proc = args.dataset_num_proc if dataset_batch_size is not None: warnings.warn('You passed a `dataset_batch_size` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`.') args.dataset_batch_size = dataset_batch_size self.dataset_batch_size = args.dataset_batch_size self._trainer_supports_neftune = hasattr(args, 'neftune_noise_alpha') if neftune_noise_alpha is not None and self._trainer_supports_neftune: args.neftune_noise_alpha = neftune_noise_alpha warnings.warn('You passed a `neftune_noise_alpha` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`.') elif not self._trainer_supports_neftune: self.neftune_noise_alpha = neftune_noise_alpha if dataset_text_field is not None: warnings.warn('You passed a `dataset_text_field` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`.') args.dataset_text_field = dataset_text_field if dataset_kwargs is not None: warnings.warn('You passed a `dataset_kwargs` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`.') args.dataset_kwargs = dataset_kwargs if args.dataset_kwargs is None: args.dataset_kwargs = {} if formatting_func is None and args.dataset_text_field is None: formatting_func = get_formatting_func_from_dataset(train_dataset, tokenizer) if formatting_func is not None: args.dataset_kwargs['add_special_tokens'] = False if not args.packing: if data_collator is None: data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) if num_of_sequences is not None: warnings.warn('You passed a `num_of_sequences` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`.') args.num_of_sequences = num_of_sequences if chars_per_token is not None: warnings.warn('You passed a `chars_per_token` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`.') args.chars_per_token = chars_per_token with PartialState().local_main_process_first(): if train_dataset is not None: train_dataset = self._prepare_dataset(train_dataset, tokenizer, args.packing, args.dataset_text_field, args.max_seq_length, formatting_func, args.num_of_sequences, args.chars_per_token, remove_unused_columns=args.remove_unused_columns if args is not None else True, **args.dataset_kwargs) if eval_dataset is not None: _multiple = isinstance(eval_dataset, dict) _eval_datasets = eval_dataset if _multiple else {'singleton': eval_dataset} eval_packing = args.packing if args.eval_packing is None else args.eval_packing for (_eval_dataset_name, _eval_dataset) in _eval_datasets.items(): _eval_datasets[_eval_dataset_name] = self._prepare_dataset(_eval_dataset, tokenizer, eval_packing, args.dataset_text_field, args.max_seq_length, formatting_func, args.num_of_sequences, args.chars_per_token, remove_unused_columns=args.remove_unused_columns if args is not None else True, **args.dataset_kwargs) if not _multiple: eval_dataset = _eval_datasets['singleton'] if tokenizer.padding_side is not None and tokenizer.padding_side != 'right': warnings.warn("You passed a tokenizer with `padding_side` not equal to `right` to the SFTTrainer. This might lead to some unexpected behaviour due to overflow issues when training a model in half-precision. You might consider adding `tokenizer.padding_side = 'right'` to your code.") super().__init__(model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics) if hasattr(self.model, 'add_model_tags'): self.model.add_model_tags(self._tag_names) if self.train_dataset is not None: if self.args.max_steps > 0 and args.packing: warnings.warn('You passed `packing=True` to the SFTTrainer/SFTConfig, and you are training your model with `max_steps` strategy. The dataset will be iterated until the `max_steps` are reached.') self.train_dataset.infinite = True elif self.args.max_steps == -1 and args.packing: self.train_dataset.infinite = False @wraps(Trainer.train) def train(self, *args, **kwargs): if self.neftune_noise_alpha is not None and (not self._trainer_supports_neftune): self.model = self._trl_activate_neftune(self.model) output = super().train(*args, **kwargs) if self.neftune_noise_alpha is not None and (not self._trainer_supports_neftune): unwrapped_model = unwrap_model(self.model) if is_peft_available() and isinstance(unwrapped_model, PeftModel): embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() self.neftune_hook_handle.remove() del embeddings.neftune_noise_alpha return output @wraps(Trainer.push_to_hub) def push_to_hub(self, commit_message: Optional[str]='End of training', blocking: bool=True, **kwargs) -> str: kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs) def _prepare_dataset(self, dataset, tokenizer, packing, dataset_text_field, max_seq_length, formatting_func, num_of_sequences, chars_per_token, remove_unused_columns=True, append_concat_token=True, add_special_tokens=True, skip_prepare_dataset=False): if dataset is None: raise ValueError('The dataset should not be None') if skip_prepare_dataset: return dataset column_names = dataset.column_names if isinstance(dataset, (datasets.Dataset, datasets.IterableDataset)) else None if column_names and 'input_ids' in column_names: if formatting_func is not None: warnings.warn('You passed a dataset that is already processed (contains an `input_ids` field) together with a valid formatting function. Therefore `formatting_func` will be ignored.') return dataset if isinstance(dataset, (torch.utils.data.IterableDataset, torch.utils.data.Dataset, ConstantLengthDataset)) and (not isinstance(dataset, datasets.IterableDataset)): return dataset if dataset_text_field is None and formatting_func is None: raise ValueError("You need to provide either `dataset_text_field` or `formatting_func` argument. Alternatively, you can skip the dataset preparation by using `SFTConfig(dataset_kwargs={'skip_prepare_dataset': True})`.") if not packing: return self._prepare_non_packed_dataloader(tokenizer, dataset, dataset_text_field, max_seq_length, formatting_func, add_special_tokens, remove_unused_columns) else: return self._prepare_packed_dataloader(tokenizer, dataset, dataset_text_field, max_seq_length, num_of_sequences, chars_per_token, formatting_func, append_concat_token, add_special_tokens) def _prepare_non_packed_dataloader(self, tokenizer, dataset, dataset_text_field, max_seq_length, formatting_func=None, add_special_tokens=True, remove_unused_columns=True): use_formatting_func = formatting_func is not None and dataset_text_field is None def tokenize(element): outputs = tokenizer(element[dataset_text_field] if not use_formatting_func else formatting_func(element), add_special_tokens=add_special_tokens, truncation=True, padding=False, max_length=max_seq_length, return_overflowing_tokens=False, return_length=False) if use_formatting_func and (not isinstance(formatting_func(element), list)): raise ValueError('The `formatting_func` should return a list of processed strings since it can lead to silent bugs.') return {'input_ids': outputs['input_ids'], 'attention_mask': outputs['attention_mask']} signature_columns = ['input_ids', 'labels', 'attention_mask'] if dataset.column_names is not None: extra_columns = list(set(dataset.column_names) - set(signature_columns)) else: extra_columns = [] if not remove_unused_columns and len(extra_columns) > 0: warnings.warn(f'You passed `remove_unused_columns=False` on a non-packed dataset. This might create some issues with the default collator and yield to errors. If you want to inspect dataset other columns (in this case {extra_columns}), you can subclass `DataCollatorForLanguageModeling` in case you used the default collator and create your own data collator in order to inspect the unused dataset columns.') map_kwargs = {'batched': True, 'remove_columns': dataset.column_names if remove_unused_columns else None, 'batch_size': self.dataset_batch_size} if isinstance(dataset, datasets.Dataset): map_kwargs['num_proc'] = self.dataset_num_proc tokenized_dataset = dataset.map(tokenize, **map_kwargs) return tokenized_dataset def _prepare_packed_dataloader(self, tokenizer, dataset, dataset_text_field, max_seq_length, num_of_sequences, chars_per_token, formatting_func=None, append_concat_token=True, add_special_tokens=True): if dataset_text_field is not None or formatting_func is not None: if tokenizer is None: raise ValueError('You need to pass a tokenizer when using `dataset_text_field` with `SFTTrainer`.') constant_length_iterator = ConstantLengthDataset(tokenizer, dataset, dataset_text_field=dataset_text_field, formatting_func=formatting_func, seq_length=max_seq_length, infinite=False, num_of_sequences=num_of_sequences, chars_per_token=chars_per_token, eos_token_id=tokenizer.eos_token_id, append_concat_token=append_concat_token, add_special_tokens=add_special_tokens) if isinstance(dataset, datasets.IterableDataset): return constant_length_iterator def data_generator(constant_length_iterator): yield from constant_length_iterator try: packed_dataset = Dataset.from_generator(data_generator, gen_kwargs={'constant_length_iterator': constant_length_iterator}) except (DatasetGenerationError, SchemaInferenceError) as exc: raise ValueError('Error occurred while packing the dataset. Make sure that your dataset has enough samples to at least yield one packed sequence.') from exc return packed_dataset else: raise ValueError('You need to pass a `dataset_text_field` or `formatting_func` argument to the SFTTrainer if you want to use the `ConstantLengthDataset`.') def _trl_activate_neftune(self, model): unwrapped_model = unwrap_model(model) if is_peft_available() and isinstance(unwrapped_model, PeftModel): embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() embeddings.neftune_noise_alpha = self.neftune_noise_alpha hook_handle = embeddings.register_forward_hook(neftune_post_forward_hook) self.neftune_hook_handle = hook_handle return model # File: trl-main/trl/trainer/utils.py import dataclasses import json import random import warnings from collections import deque from dataclasses import dataclass from typing import Any, Dict, List, Literal, Optional, Tuple, Union import numpy as np import pandas as pd import torch from accelerate import Accelerator from accelerate.state import AcceleratorState, PartialState from rich.console import Console from rich.table import Table from torch.nn.utils.rnn import pad_sequence from torch.utils.data import IterableDataset from transformers import BitsAndBytesConfig, DataCollatorForLanguageModeling, GenerationConfig, PreTrainedTokenizerBase, TrainerState, TrainingArguments from transformers.utils import is_torch_mlu_available, is_torch_npu_available, is_torch_xpu_available from ..import_utils import is_peft_available, is_unsloth_available, is_xpu_available from ..trainer.model_config import ModelConfig if is_peft_available(): from peft import LoraConfig, PeftConfig class AdaptiveKLController: def __init__(self, init_kl_coef, target, horizon): self.value = init_kl_coef self.target = target self.horizon = horizon def update(self, current, n_steps): target = self.target proportional_error = np.clip(current / target - 1, -0.2, 0.2) mult = 1 + proportional_error * n_steps / self.horizon self.value *= mult class FixedKLController: def __init__(self, kl_coef): self.value = kl_coef def update(self, current, n_steps): pass class DataCollatorForCompletionOnlyLM(DataCollatorForLanguageModeling): def __init__(self, response_template: Union[str, List[int]], instruction_template: Optional[Union[str, List[int]]]=None, *args, mlm: bool=False, ignore_index: int=-100, padding_free: bool=False, **kwargs): super().__init__(*args, mlm=mlm, **kwargs) self.instruction_template = instruction_template if isinstance(instruction_template, str): self.instruction_token_ids = self.tokenizer.encode(self.instruction_template, add_special_tokens=False) else: self.instruction_token_ids = instruction_template self.response_template = response_template if isinstance(response_template, str): self.response_token_ids = self.tokenizer.encode(self.response_template, add_special_tokens=False) else: self.response_token_ids = response_template if not self.mlm and self.instruction_template and (self.tokenizer.pad_token_id == self.tokenizer.eos_token_id): warnings.warn('The pad_token_id and eos_token_id values of this tokenizer are identical. If you are planning for multi-turn training, it can result in the model continuously generating questions and answers without eos token. To avoid this, set the pad_token_id to a different value.') self.ignore_index = ignore_index self.padding_free = padding_free def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]: batch = super().torch_call(examples) if self.instruction_template is None: for i in range(len(examples)): response_token_ids_start_idx = None for idx in np.where(batch['labels'][i] == self.response_token_ids[0])[0]: if self.response_token_ids == batch['labels'][i][idx:idx + len(self.response_token_ids)].tolist(): response_token_ids_start_idx = idx if response_token_ids_start_idx is None: warnings.warn(f"Could not find response key `{self.response_template}` in the following instance: {self.tokenizer.decode(batch['input_ids'][i])} This instance will be ignored in loss calculation. Note, if this happens often, consider increasing the `max_seq_length`.") batch['labels'][i, :] = self.ignore_index else: response_token_ids_end_idx = response_token_ids_start_idx + len(self.response_token_ids) batch['labels'][i, :response_token_ids_end_idx] = self.ignore_index else: for i in range(len(examples)): response_token_ids_idxs = [] human_token_ids_idxs = [] for assistant_idx in np.where(batch['labels'][i] == self.response_token_ids[0])[0]: if self.response_token_ids == batch['labels'][i][assistant_idx:assistant_idx + len(self.response_token_ids)].tolist(): response_token_ids_idxs.append(assistant_idx + len(self.response_token_ids)) if len(response_token_ids_idxs) == 0: warnings.warn(f"Could not find response key `{self.response_template}` in the following instance: {self.tokenizer.decode(batch['input_ids'][i])} This instance will be ignored in loss calculation. Note, if this happens often, consider increasing the `max_seq_length`.") batch['labels'][i, :] = self.ignore_index human_token_ids = self.instruction_token_ids for human_idx in np.where(batch['labels'][i] == human_token_ids[0])[0]: if human_token_ids == batch['labels'][i][human_idx:human_idx + len(human_token_ids)].tolist(): human_token_ids_idxs.append(human_idx) if len(human_token_ids_idxs) == 0: warnings.warn(f"Could not find instruction key `{self.instruction_template}` in the following instance: {self.tokenizer.decode(batch['input_ids'][i])} This instance will be ignored in loss calculation. Note, if this happens often, consider increasing the `max_seq_length`.") batch['labels'][i, :] = self.ignore_index if len(human_token_ids_idxs) > 0 and len(response_token_ids_idxs) > 0 and (human_token_ids_idxs[0] > response_token_ids_idxs[0]): human_token_ids_idxs = [0] + human_token_ids_idxs for (idx, (start, end)) in enumerate(zip(human_token_ids_idxs, response_token_ids_idxs)): if idx != 0: batch['labels'][i, start:end] = self.ignore_index else: batch['labels'][i, :end] = self.ignore_index if len(response_token_ids_idxs) < len(human_token_ids_idxs): batch['labels'][i, human_token_ids_idxs[-1]:] = self.ignore_index if self.padding_free: attn_mask = batch.pop('attention_mask') batch['input_ids'] = batch['input_ids'][attn_mask.bool()].unsqueeze(0) batch['position_ids'] = attn_mask.cumsum(1)[attn_mask.bool()].unsqueeze(0) - 1 batch['labels'] = batch['labels'][attn_mask.bool()].unsqueeze(0) batch['labels'][batch['position_ids'] == 0] = self.ignore_index return batch @dataclass class DataCollatorForChatML: tokenizer: PreTrainedTokenizerBase ignore_index: int = -100 max_length: int = None messages_key: str = 'messages' def __post_init__(self): if self.tokenizer.pad_token_id is None: raise ValueError('The tokenizer does not have a pad token. Please set `pad_token_id` in the tokenizer.') if self.max_length is None: self.max_length = min(self.tokenizer.model_max_length, 1024) def __call__(self, examples: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]: prompts = [] completions = [] for example in examples: messages = example[self.messages_key] formatted_chat = self.tokenizer.apply_chat_template(messages, tokenize=False) assistant_messages = [msg for msg in messages if msg['role'] == 'assistant'] last_assistant_message = assistant_messages[-1]['content'] prompt = formatted_chat.rsplit(last_assistant_message, 1)[0] completion = last_assistant_message prompts.append(prompt) completions.append(completion) tokenized_prompts = self.tokenizer(prompts, truncation=True, max_length=self.max_length, padding=False, return_tensors=None) tokenized_completions = self.tokenizer(completions, truncation=True, max_length=self.max_length, padding=False, return_tensors=None) input_ids = [] attention_mask = [] labels = [] for (prompt, completion) in zip(tokenized_prompts['input_ids'], tokenized_completions['input_ids']): combined_input_ids = prompt + completion combined_attention_mask = [1] * len(combined_input_ids) combined_labels = [self.ignore_index] * len(prompt) + completion[:-1] combined_labels.append(self.tokenizer.eos_token_id) input_ids.append(combined_input_ids) attention_mask.append(combined_attention_mask) labels.append(combined_labels) input_ids = [torch.tensor(ids) for ids in input_ids] attention_mask = [torch.tensor(mask) for mask in attention_mask] labels = [torch.tensor(label) for label in labels] input_ids = pad(input_ids, padding_side='left', padding_value=self.tokenizer.pad_token_id) attention_mask = pad(attention_mask, padding_side='left', padding_value=0) labels = pad(labels, padding_side='left', padding_value=self.ignore_index) prompts_input_ids = [torch.tensor(ids) for ids in tokenized_prompts['input_ids']] prompts_input_ids = pad(prompts_input_ids, padding_side='left', padding_value=self.tokenizer.pad_token_id) prompt_attention_mask = pad([torch.tensor([1] * len(ids)) for ids in tokenized_prompts['input_ids']], padding_side='left', padding_value=0) return {'input_ids': input_ids, 'attention_mask': attention_mask, 'labels': labels, 'prompts': prompts_input_ids, 'prompt_attention_mask': prompt_attention_mask} @dataclass class RewardDataCollatorWithPadding: tokenizer: PreTrainedTokenizerBase padding: Union[bool, str] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None return_tensors: str = 'pt' def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: features_chosen = [] features_rejected = [] margin = [] has_margin = 'margin' in features[0] for feature in features: if 'input_ids_chosen' not in feature or 'input_ids_rejected' not in feature or 'attention_mask_chosen' not in feature or ('attention_mask_rejected' not in feature): raise ValueError('The features should include `input_ids_chosen`, `attention_mask_chosen`, `input_ids_rejected` and `attention_mask_rejected`') features_chosen.append({'input_ids': feature['input_ids_chosen'], 'attention_mask': feature['attention_mask_chosen']}) features_rejected.append({'input_ids': feature['input_ids_rejected'], 'attention_mask': feature['attention_mask_rejected']}) if has_margin: margin.append(feature['margin']) batch_chosen = self.tokenizer.pad(features_chosen, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors) batch_rejected = self.tokenizer.pad(features_rejected, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors) batch = {'input_ids_chosen': batch_chosen['input_ids'], 'attention_mask_chosen': batch_chosen['attention_mask'], 'input_ids_rejected': batch_rejected['input_ids'], 'attention_mask_rejected': batch_rejected['attention_mask'], 'return_loss': True} if has_margin: margin = torch.tensor(margin, dtype=torch.float) batch['margin'] = margin return batch def pad(tensors: List[torch.Tensor], padding_value: int=0, padding_side: str='right') -> torch.Tensor: output_shape = np.max([t.shape for t in tensors], 0).tolist() output = torch.full((len(tensors), *output_shape), padding_value, dtype=tensors[0].dtype, device=tensors[0].device) for (i, t) in enumerate(tensors): if padding_side == 'left': seq_slice = slice(output_shape[0] - t.shape[0], output_shape[0]) elif padding_side == 'right': seq_slice = slice(0, t.shape[0]) else: raise ValueError("padding_side must be 'left' or 'right'") slices = (seq_slice,) + tuple((slice(0, s) for s in t.shape[1:])) output[i][slices] = t return output @dataclass class DPODataCollatorWithPadding: pad_token_id: int = 0 label_pad_token_id: int = -100 is_encoder_decoder: Optional[bool] = False def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: padded_batch = {} for k in features[0].keys(): if k.endswith(('_input_ids', '_attention_mask', '_labels', '_pixel_values')): if self.is_encoder_decoder: to_pad = [torch.LongTensor(ex[k]) for ex in features] if k.startswith('prompt') and k.endswith('input_ids'): if self.pad_token_id is None: raise ValueError('Padding is enabled, but the tokenizer is not configured with a padding token. Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`) before calling the trainer.') padding_value = self.pad_token_id elif k.endswith('_attention_mask'): padding_value = 0 elif k.startswith(('chosen', 'rejected', 'completion')) or 'decoder' in k: padding_value = self.label_pad_token_id else: raise ValueError(f"Unexpected key in batch '{k}'") padded_batch[k] = pad_sequence(to_pad, batch_first=True, padding_value=padding_value) else: if k.endswith('_input_ids'): if self.pad_token_id is None: raise ValueError('Padding is enabled, but the tokenizer is not configured with a padding token. Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`) before calling the trainer.') padding_value = self.pad_token_id elif k.endswith('_labels'): padding_value = self.label_pad_token_id elif k.endswith('_attention_mask'): padding_value = 0 elif k.endswith('_pixel_values'): padding_value = 0 else: raise ValueError(f"Unexpected key in batch '{k}'") if k in ['prompt_input_ids', 'prompt_attention_mask']: padding_side = 'left' else: padding_side = 'right' if k.endswith('_pixel_values'): dtype = torch.float32 else: dtype = torch.int64 to_pad = [torch.tensor(ex[k], dtype=dtype) for ex in features] padded_batch[k] = pad(to_pad, padding_value=padding_value, padding_side=padding_side) elif k.endswith('_logps'): padded_batch[k] = torch.tensor([ex[k] for ex in features]) else: padded_batch[k] = [ex[k] for ex in features] return padded_batch class ConstantLengthDataset(IterableDataset): def __init__(self, tokenizer, dataset, dataset_text_field=None, formatting_func=None, infinite=False, seq_length=1024, num_of_sequences=1024, chars_per_token=3.6, eos_token_id=0, shuffle=True, append_concat_token=True, add_special_tokens=True): self.tokenizer = tokenizer if tokenizer.eos_token_id is None: warnings.warn(f'The passed tokenizer does not have an EOS token. We will use the passed eos_token_id instead which corresponds to {eos_token_id}. If this is not the correct EOS token, make sure to pass the correct eos_token_id.') self.concat_token_id = tokenizer.eos_token_id if tokenizer.eos_token_id else eos_token_id self.dataset = dataset self.seq_length = seq_length self.infinite = infinite self.current_size = 0 self.max_buffer_size = seq_length * chars_per_token * num_of_sequences self.shuffle = shuffle self.append_concat_token = append_concat_token self.add_special_tokens = add_special_tokens if formatting_func is None: self.formatting_func = lambda x: x[dataset_text_field] else: self.formatting_func = formatting_func if formatting_func is not None: if formatting_func.__code__.co_argcount > 1: warnings.warn('The passed formatting_func has more than one argument. Usually that function should have a single argument `example` which corresponds to the dictionary returned by each element of the dataset. Make sure you know what you are doing.') def __len__(self): return len(self.dataset) def __iter__(self): iterator = iter(self.dataset) more_examples = True while more_examples: (buffer, buffer_len) = ([], 0) while True: if buffer_len >= self.max_buffer_size: break try: buffer.append(self.formatting_func(next(iterator))) buffer_len += len(buffer[-1]) except StopIteration: if self.infinite: iterator = iter(self.dataset) warnings.warn('The dataset reached end and the iterator is reset to the start.') else: more_examples = False break if self.shuffle: random.shuffle(buffer) tokenized_inputs = self.tokenizer(buffer, add_special_tokens=self.add_special_tokens, truncation=False)['input_ids'] all_token_ids = [] for tokenized_input in tokenized_inputs: if self.append_concat_token: tokenized_input = tokenized_input + [self.concat_token_id] all_token_ids.extend(tokenized_input) examples = [] for i in range(0, len(all_token_ids), self.seq_length): input_ids = all_token_ids[i:i + self.seq_length] if len(input_ids) == self.seq_length: examples.append(input_ids) if self.shuffle: random.shuffle(examples) for example in examples: self.current_size += 1 yield {'input_ids': torch.LongTensor(example), 'labels': torch.LongTensor(example)} @dataclass class RunningMoments: accelerator: Accelerator mean: float = 0 std: float = 1 var: float = 1 count: float = 1e-24 @torch.no_grad() def update(self, xs: torch.Tensor) -> Tuple[float, float]: if self.accelerator.use_distributed: (xs_mean, xs_var, xs_count) = get_global_statistics(self.accelerator, xs) else: xs_count = xs.numel() (xs_var, xs_mean) = torch.var_mean(xs, unbiased=False) (xs_mean, xs_var) = (xs_mean.float(), xs_var.float()) delta = xs_mean - self.mean tot_count = self.count + xs_count new_sum = xs_var * xs_count old_sum = self.var * self.count + delta ** 2 * self.count * xs_count / tot_count tot_sum = old_sum + new_sum self.mean += (delta * xs_count / tot_count).item() new_var = tot_sum / tot_count self.std = (new_var * tot_count / (tot_count - 1)).float().sqrt().item() self.var = new_var.item() self.count = tot_count return (xs_mean.item(), (xs_var * xs_count / (xs_count - 1)).float().sqrt().item()) def save_to_json(self, json_path: str): if self.accelerator.is_main_process: save_dict = dataclasses.asdict(self, dict_factory=lambda x: {k: v for (k, v) in x if k != 'accelerator'}) json_string = json.dumps(save_dict, indent=2, sort_keys=True) + '\n' with open(json_path, 'w', encoding='utf-8') as f: f.write(json_string) @classmethod def load_from_json(cls, accelerator: Accelerator, json_path: str): with open(json_path, encoding='utf-8') as f: text = f.read() return cls(accelerator=accelerator, **json.loads(text)) @torch.no_grad() def get_global_statistics(accelerator, xs: torch.Tensor, mask=None, device='cpu') -> Tuple[torch.Tensor, torch.Tensor, int]: xs = xs.to(accelerator.device) sum_and_count = torch.tensor([xs.sum(), xs.numel() if mask is None else mask.sum()], device=xs.device) sum_and_count = accelerator.reduce(sum_and_count) (global_sum, count) = sum_and_count global_mean = global_sum / count sum_var = torch.sum(((xs - global_mean) ** 2).mul(1 if mask is None else mask)) sum_var = accelerator.reduce(sum_var) global_var = sum_var / count return (global_mean.to(device), global_var.to(device), count.item()) def compute_accuracy(eval_pred) -> Dict[str, float]: (predictions, labels) = eval_pred if np.array(predictions[:, 0] == predictions[:, 1], dtype=float).sum() > 0: warnings.warn(f'There are {np.array(predictions[:, 0] == predictions[:, 1]).sum()} out of {len(predictions[:, 0])} instances where the predictions for both options are equal. As a consequence the accuracy can be misleading.') predictions = np.argmax(predictions, axis=1) accuracy = np.array(predictions == labels, dtype=float).mean().item() return {'accuracy': accuracy} def pad_to_length(tensor: torch.Tensor, length: int, pad_value: Union[int, float], dim: int=-1) -> torch.Tensor: if tensor.size(dim) >= length: return tensor else: pad_size = list(tensor.shape) pad_size[dim] = length - tensor.size(dim) return torch.cat([tensor, pad_value * torch.ones(*pad_size, dtype=tensor.dtype, device=tensor.device)], dim=dim) def disable_dropout_in_model(model: torch.nn.Module) -> None: for module in model.modules(): if isinstance(module, torch.nn.Dropout): module.p = 0 def exact_div(a, b, custom_error_message=''): q = a // b if a != q * b: raise ValueError(f'{custom_error_message}, inexact division: {a} / {b} = {a / b}') return q class PerPromptStatTracker: def __init__(self, buffer_size, min_count): self.buffer_size = buffer_size self.min_count = min_count self.stats = {} def update(self, prompts, rewards): prompts = np.array(prompts) rewards = np.array(rewards) unique = np.unique(prompts) advantages = np.empty_like(rewards) for prompt in unique: prompt_rewards = rewards[prompts == prompt] if prompt not in self.stats: self.stats[prompt] = deque(maxlen=self.buffer_size) self.stats[prompt].extend(prompt_rewards) if len(self.stats[prompt]) < self.min_count: mean = np.mean(rewards) std = np.std(rewards) + 1e-06 else: mean = np.mean(self.stats[prompt]) std = np.std(self.stats[prompt]) + 1e-06 advantages[prompts == prompt] = (prompt_rewards - mean) / std return advantages def get_stats(self): return {k: {'mean': np.mean(v), 'std': np.std(v), 'count': len(v)} for (k, v) in self.stats.items()} def neftune_post_forward_hook(module, input, output): if module.training: dims = torch.tensor(output.size(1) * output.size(2)) mag_norm = module.neftune_noise_alpha / torch.sqrt(dims) output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm) return output def peft_module_casting_to_bf16(model): for (name, module) in model.named_modules(): if isinstance(module, torch.nn.LayerNorm) or 'norm' in name: module = module.to(torch.float32) elif any((x in name for x in ['lm_head', 'embed_tokens', 'wte', 'wpe'])): if hasattr(module, 'weight'): if module.weight.dtype == torch.float32: module = module.to(torch.bfloat16) def trl_sanitze_kwargs_for_tagging(model, tag_names, kwargs=None): if is_unsloth_available(): if hasattr(model, 'config') and getattr(model.config, 'unsloth_version', None) is not None: tag_names.append('unsloth') if kwargs is not None: if 'tags' not in kwargs: kwargs['tags'] = tag_names elif 'tags' in kwargs and isinstance(kwargs['tags'], list): kwargs['tags'].extend(tag_names) elif 'tags' in kwargs and isinstance(kwargs['tags'], str): tag_names.append(kwargs['tags']) kwargs['tags'] = tag_names return kwargs def get_quantization_config(model_config: ModelConfig) -> Optional[BitsAndBytesConfig]: if model_config.load_in_4bit: quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=model_config.torch_dtype, bnb_4bit_quant_type=model_config.bnb_4bit_quant_type, bnb_4bit_use_double_quant=model_config.use_bnb_nested_quant, bnb_4bit_quant_storage=model_config.torch_dtype) elif model_config.load_in_8bit: quantization_config = BitsAndBytesConfig(load_in_8bit=True) else: quantization_config = None return quantization_config def get_kbit_device_map() -> Optional[Dict[str, int]]: if is_xpu_available(): return {'': f'xpu:{PartialState().local_process_index}'} elif torch.cuda.is_available(): return {'': PartialState().local_process_index} else: return None def get_peft_config(model_config: ModelConfig) -> 'Optional[PeftConfig]': if model_config.use_peft is False: return None if not is_peft_available(): raise ValueError('You need to have PEFT library installed in your environment, make sure to install `peft`. Make sure to run `pip install -U peft`.') peft_config = LoraConfig(task_type=model_config.lora_task_type, r=model_config.lora_r, target_modules=model_config.lora_target_modules, lora_alpha=model_config.lora_alpha, lora_dropout=model_config.lora_dropout, bias='none', use_rslora=model_config.use_rslora, modules_to_save=model_config.lora_modules_to_save) return peft_config def get_exp_cap(value, decimal=4): vdtype_max = torch.zeros([1]).to(value.dtype) + torch.finfo(value.dtype).max vdtype_log_max = torch.log(vdtype_max).to(value.device) return torch.floor(vdtype_log_max * 10 ** decimal) / 10 ** decimal if decimal > 0 else vdtype_log_max def cap_exp(value, cap=-1): cap = get_exp_cap(value) if cap < 0 else cap return torch.exp(torch.clamp(value, max=cap)) def print_rich_table(df: pd.DataFrame) -> Table: console = Console() table = Table(show_lines=True) for column in df.columns: table.add_column(column) for (_, row) in df.iterrows(): table.add_row(*row.astype(str).tolist()) console.print(table) SIMPLE_SFT_CHAT_TEMPLATE = "{% for message in messages %}{{' ' + message['content']}}{% endfor %}{{ eos_token }}" SIMPLE_CHAT_TEMPLATE = "{% for message in messages %}{{message['role'].capitalize() + ': ' + message['content'] + '\n\n'}}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}" @dataclass class OnlineTrainerState(TrainerState): episode: int = 0 @dataclass class OnPolicyConfig(TrainingArguments): run_name: Optional[str] = None dataset_num_proc: Optional[int] = None num_mini_batches: int = 1 total_episodes: Optional[int] = None local_rollout_forward_batch_size: int = 64 num_sample_generations: int = 10 response_length: int = 53 stop_token: Optional[Literal['eos']] = None stop_token_id: Optional[int] = None temperature: float = 0.7 missing_eos_penalty: Optional[float] = None sft_model_path: str = 'EleutherAI/pythia-160m' world_size: Optional[int] = None num_total_batches: Optional[int] = None micro_batch_size: Optional[int] = None local_batch_size: Optional[int] = None batch_size: Optional[int] = None local_mini_batch_size: Optional[int] = None mini_batch_size: Optional[int] = None def first_true_indices(bools: torch.Tensor, dtype=torch.long): row_len = bools.size(-1) zero_or_index = row_len * (~bools).type(dtype) + torch.arange(row_len, dtype=dtype, device=bools.device) return torch.min(zero_or_index, dim=-1).values def get_reward(model: torch.nn.Module, query_responses: torch.Tensor, pad_token_id: int, context_length: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: attention_mask = query_responses != pad_token_id position_ids = attention_mask.cumsum(1) - attention_mask.long() lm_backbone = getattr(model, model.base_model_prefix) input_ids = torch.masked_fill(query_responses, ~attention_mask, 0) output = lm_backbone(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, return_dict=True, output_hidden_states=True, use_cache=False) reward_logits = model.score(output.hidden_states[-1]) sequence_lengths = first_true_indices(query_responses[:, context_length:] == pad_token_id) - 1 + context_length return (reward_logits, reward_logits[torch.arange(reward_logits.size(0), device=reward_logits.device), sequence_lengths].squeeze(-1), sequence_lengths) def forward(model: torch.nn.Module, query_responses: torch.Tensor, pad_token_id: int) -> torch.nn.Module: attention_mask = query_responses != pad_token_id position_ids = attention_mask.cumsum(1) - attention_mask.long() input_ids = torch.masked_fill(query_responses, ~attention_mask, 0) return model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, return_dict=True, output_hidden_states=True) def prepare_deepspeed(model: torch.nn.Module, per_device_train_batch_size: int, fp16: bool=False, bf16: bool=False): import deepspeed deepspeed_plugin = AcceleratorState().deepspeed_plugin config_kwargs = deepspeed_plugin.deepspeed_config if config_kwargs['zero_optimization']['stage'] != 3: config_kwargs['train_micro_batch_size_per_gpu'] = per_device_train_batch_size config_kwargs = {'train_micro_batch_size_per_gpu': config_kwargs['train_micro_batch_size_per_gpu'], 'prescale_gradients': False, 'wall_clock_breakdown': False} if bf16: config_kwargs['bf16'] = {'enabled': True} elif fp16: config_kwargs['fp16'] = {'enabled': True} elif hasattr(model, 'config'): hidden_size = max(model.config.hidden_sizes) if getattr(model.config, 'hidden_sizes', None) else getattr(model.config, 'hidden_size', None) if hidden_size is not None and config_kwargs['zero_optimization']['stage'] == 3: config_kwargs.update({'zero_optimization.reduce_bucket_size': hidden_size * hidden_size, 'zero_optimization.stage3_param_persistence_threshold': 10 * hidden_size, 'zero_optimization.stage3_prefetch_bucket_size': 0}) (model, *_) = deepspeed.initialize(model=model, config=config_kwargs) model.eval() return model def truncate_response(stop_token_id: int, pad_token_id: int, responses: torch.Tensor): trunc_idxs = first_true_indices(responses == stop_token_id).unsqueeze(-1) new_size = [1] * (len(responses.size()) - 1) + [responses.shape[1]] idxs = torch.arange(responses.shape[1], device=responses.device).view(*new_size) postprocessed_responses = torch.masked_fill(responses, idxs > trunc_idxs, pad_token_id) return postprocessed_responses def generate(lm_backbone: torch.nn.Module, queries: torch.Tensor, pad_token_id: int, generation_config: GenerationConfig) -> Tuple[torch.Tensor, torch.Tensor]: context_length = queries.shape[1] attention_mask = queries != pad_token_id input_ids = torch.masked_fill(queries, ~attention_mask, 0) output = lm_backbone.generate(input_ids=input_ids, attention_mask=attention_mask, generation_config=generation_config, return_dict_in_generate=True, output_scores=True) logits = torch.stack(output.scores, 1) return (torch.cat((queries, output.sequences[:, context_length:]), dim=1), logits) @torch.no_grad() def batch_generation(model: torch.nn.Module, queries: torch.Tensor, local_rollout_forward_batch_size: int, pad_token_id: int, generation_config: GenerationConfig): query_responses = [] logitss = [] for i in range(0, queries.shape[0], local_rollout_forward_batch_size): query = queries[i:i + local_rollout_forward_batch_size] (query_response, logits) = generate(model, query, pad_token_id, generation_config) query_responses.append(query_response) logitss.append(logits) return (torch.cat(query_responses, 0), torch.cat(logitss, 0)) def add_bos_token_if_needed(bos_token_id: Optional[int], prompt_len_input_ids: int, prompt_tokens: Dict[str, List[int]], chosen_prompt_len_input_ids: int, chosen_tokens: Dict[str, List[int]], rejected_prompt_len_input_ids: int, rejected_tokens: Dict[str, List[int]]): if bos_token_id is not None: if prompt_len_input_ids == 0 or bos_token_id != prompt_tokens['prompt_input_ids'][0]: prompt_tokens['prompt_input_ids'] = [bos_token_id] + prompt_tokens['prompt_input_ids'] prompt_tokens['prompt_attention_mask'] = [1] + prompt_tokens['prompt_attention_mask'] if chosen_prompt_len_input_ids == 0 or bos_token_id != chosen_tokens['prompt_input_ids'][0]: chosen_tokens['prompt_input_ids'] = [bos_token_id] + chosen_tokens['prompt_input_ids'] chosen_tokens['prompt_attention_mask'] = [1] + chosen_tokens['prompt_attention_mask'] if rejected_prompt_len_input_ids == 0 or bos_token_id != rejected_tokens['prompt_input_ids'][0]: rejected_tokens['prompt_input_ids'] = [bos_token_id] + rejected_tokens['prompt_input_ids'] rejected_tokens['prompt_attention_mask'] = [1] + rejected_tokens['prompt_attention_mask'] return (prompt_tokens, chosen_tokens, rejected_tokens) def add_eos_token_if_needed(eos_token_id: int, chosen_tokens: Dict[str, List[int]], rejected_tokens: Dict[str, List[int]]): if len(chosen_tokens['input_ids']) == 0 or eos_token_id != chosen_tokens['input_ids'][-1]: chosen_tokens['input_ids'].append(eos_token_id) chosen_tokens['attention_mask'].append(1) if len(rejected_tokens['input_ids']) == 0 or eos_token_id != rejected_tokens['input_ids'][-1]: rejected_tokens['input_ids'].append(eos_token_id) rejected_tokens['attention_mask'].append(1) return (chosen_tokens, rejected_tokens) def truncate_right(input_ids: torch.Tensor, stop_token_id: int, pad_token_id: int) -> Tuple[torch.Tensor, torch.Tensor]: trunc_idxs = first_true_indices(input_ids == stop_token_id).unsqueeze(-1) new_size = [1] * (len(input_ids.size()) - 1) + [input_ids.shape[1]] idxs = torch.arange(input_ids.shape[1], device=input_ids.device).view(*new_size) output_ids = torch.masked_fill(input_ids, idxs > trunc_idxs, pad_token_id) mask = torch.masked_fill(torch.ones_like(input_ids), idxs > trunc_idxs, 0) return (output_ids, mask) def empty_cache() -> None: if is_torch_xpu_available(): torch.xpu.empty_cache() elif is_torch_mlu_available(): torch.mlu.empty_cache() elif is_torch_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() def decode_and_strip_padding(inputs: torch.Tensor, tokenizer: PreTrainedTokenizerBase) -> List[str]: decoded = tokenizer.batch_decode(inputs, skip_special_tokens=False) return [d.replace(tokenizer.pad_token, '') for d in decoded] # File: trl-main/trl/trainer/xpo_config.py from dataclasses import dataclass from trl.trainer.online_dpo_config import OnlineDPOConfig @dataclass class XPOConfig(OnlineDPOConfig): alpha: float = 1e-05 # File: trl-main/trl/trainer/xpo_trainer.py from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from datasets import Dataset, IterableDataset from transformers import PreTrainedTokenizerBase, TrainerCallback from transformers.modeling_utils import PreTrainedModel from transformers.trainer_utils import EvalPrediction from transformers.training_args import OptimizerNames from transformers.utils import is_apex_available from ..models.utils import unwrap_model_for_generation from .online_dpo_trainer import OnlineDPOTrainer from .utils import empty_cache, get_reward, truncate_right from .xpo_config import XPOConfig if is_apex_available(): from apex import amp class XPOTrainer(OnlineDPOTrainer): _tag_names = ['trl', 'xpo'] def __init__(self, model: Union[PreTrainedModel, nn.Module]=None, ref_model: Union[PreTrainedModel, nn.Module]=None, reward_model: Optional[nn.Module]=None, args: Optional[XPOConfig]=None, data_collator: Optional[Callable]=None, train_dataset: Optional[Union[Dataset, IterableDataset]]=None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]]=None, tokenizer: Optional[PreTrainedTokenizerBase]=None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]]=None, callbacks: Optional[List[TrainerCallback]]=None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]=(None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]]=None) -> None: super().__init__(model=model, ref_model=ref_model, reward_model=reward_model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics) self.stats = {'loss/dpo': [], 'loss/xpo': [], 'objective/kl': [], 'objective/entropy': [], 'objective/model_scores': [], 'objective/ref_scores': [], 'objective/scores_margin': [], 'rewards/chosen': [], 'rewards/rejected': [], 'rewards/accuracies': [], 'rewards/margins': [], 'logps/chosen': [], 'logps/rejected': [], 'val/model_contain_eos_token': [], 'val/ref_contain_eos_token': []} def _generate_completions(self, model, ref_model, prompts): with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model: model_output = unwrapped_model.generate(input_ids=prompts['input_ids'], attention_mask=prompts['attention_mask'], generation_config=self.generation_config) with torch.no_grad(), unwrap_model_for_generation(ref_model, self.accelerator) as unwrapped_ref_model: ref_output = unwrapped_ref_model.generate(input_ids=prompts['input_ids'], attention_mask=prompts['attention_mask'], generation_config=self.generation_config) return (model_output, ref_output) def _process_completions(self, model_output, ref_output, prompts): context_length = prompts['input_ids'].shape[1] model_completion_ids = model_output[:, context_length:] (model_completion_ids, model_completion_mask) = truncate_right(model_completion_ids, self.tokenizer.eos_token_id, self.tokenizer.pad_token_id) model_data = {'input_ids': torch.cat((prompts['input_ids'], model_completion_ids), dim=1), 'attention_mask': torch.cat((prompts['attention_mask'], model_completion_mask), dim=1)} ref_completion_ids = ref_output[:, context_length:] (ref_completion_ids, ref_completion_mask) = truncate_right(ref_completion_ids, self.tokenizer.eos_token_id, self.tokenizer.pad_token_id) ref_data = {'input_ids': torch.cat((prompts['input_ids'], ref_completion_ids), dim=1), 'attention_mask': torch.cat((prompts['attention_mask'], ref_completion_mask), dim=1)} return (model_data, ref_data) def _compute_rewards(self, model_data, ref_data, context_length): all_input_ids = torch.cat([model_data['input_ids'], ref_data['input_ids']], dim=0) with torch.no_grad(): (_, all_scores, _) = get_reward(self.reward_model, all_input_ids, self.tokenizer.pad_token_id, context_length) (model_scores, ref_scores) = all_scores.chunk(2) if self.args.missing_eos_penalty is not None: model_contain_eos = torch.any(model_data['input_ids'] == self.tokenizer.eos_token_id, dim=-1) ref_contain_eos = torch.any(ref_data['input_ids'] == self.tokenizer.eos_token_id, dim=-1) model_scores[~model_contain_eos] -= self.args.missing_eos_penalty ref_scores[~ref_contain_eos] -= self.args.missing_eos_penalty return (model_scores, ref_scores) def _compute_logprobs(self, model, ref_model, model_data, ref_data, context_length): def compute_logprobs_for_data(m, data): output = m(data['input_ids'], attention_mask=data['attention_mask']) logits = output.logits[:, context_length - 1:-1] logprobs = F.log_softmax(logits, dim=-1) token_logprobs = torch.gather(logprobs, 2, data['input_ids'][:, context_length:].unsqueeze(-1)).squeeze(-1) return token_logprobs model_logprobs_model_data = compute_logprobs_for_data(model, model_data) model_logprobs_ref_data = compute_logprobs_for_data(model, ref_data) with torch.no_grad(): ref_logprobs_model_data = compute_logprobs_for_data(ref_model, model_data) ref_logprobs_ref_data = compute_logprobs_for_data(ref_model, ref_data) model_padding_mask = model_data['attention_mask'][:, context_length:] == 0 ref_padding_mask = ref_data['attention_mask'][:, context_length:] == 0 model_logprobs_model_data = model_logprobs_model_data.masked_fill(model_padding_mask, 0.0) model_logprobs_ref_data = model_logprobs_ref_data.masked_fill(ref_padding_mask, 0.0) ref_logprobs_ref_data = ref_logprobs_ref_data.masked_fill(ref_padding_mask, 0.0) ref_logprobs_model_data = ref_logprobs_model_data.masked_fill(model_padding_mask, 0.0) return (model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data) def _compute_losses(self, model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data, chosen_mask): model_logprobs_model_data_sum = model_logprobs_model_data.sum(1) model_logprobs_ref_data_sum = model_logprobs_ref_data.sum(1) ref_logprobs_ref_data_sum = ref_logprobs_ref_data.sum(1) ref_logprobs_model_data_sum = ref_logprobs_model_data.sum(1) chosen_model_logprobs = torch.where(chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum) chosen_ref_logprobs = torch.where(chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum) chosen_log_ratios = chosen_model_logprobs - chosen_ref_logprobs rejected_model_logprobs = torch.where(~chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum) rejected_ref_logprobs = torch.where(~chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum) rejected_log_ratios = rejected_model_logprobs - rejected_ref_logprobs logits = chosen_log_ratios - rejected_log_ratios if self.args.loss_type == 'sigmoid': dpo_losses = -F.logsigmoid(self.args.beta * logits) elif self.args.loss_type == 'ipo': dpo_losses = (logits - 1 / (2 * self.args.beta)) ** 2 else: raise NotImplementedError(f'invalid loss type {self.args.loss_type}') xpo_losses = self.args.alpha * model_logprobs_ref_data_sum loss = (dpo_losses + xpo_losses).mean() return (loss, dpo_losses, xpo_losses) def _log_statistics(self, model_data, ref_data, model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data, model_scores, ref_scores, dpo_losses, xpo_losses, context_length): def gather_mean(tensor): return self.accelerator.gather(tensor).mean().item() self.stats['loss/dpo'].append(gather_mean(dpo_losses)) self.stats['loss/xpo'].append(gather_mean(xpo_losses)) self.stats['objective/model_scores'].append(gather_mean(model_scores)) self.stats['objective/ref_scores'].append(gather_mean(ref_scores)) self.stats['objective/scores_margin'].append(gather_mean(model_scores - ref_scores)) chosen_mask = model_scores >= ref_scores model_logprobs_model_data_sum = model_logprobs_model_data.sum(1) model_logprobs_ref_data_sum = model_logprobs_ref_data.sum(1) ref_logprobs_ref_data_sum = ref_logprobs_ref_data.sum(1) ref_logprobs_model_data_sum = ref_logprobs_model_data.sum(1) chosen_model_logprobs = torch.where(chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum) chosen_ref_logprobs = torch.where(chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum) chosen_log_ratios = chosen_model_logprobs - chosen_ref_logprobs rejected_model_logprobs = torch.where(~chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum) rejected_ref_logprobs = torch.where(~chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum) rejected_log_ratios = rejected_model_logprobs - rejected_ref_logprobs self.stats['logps/chosen'].append(gather_mean(chosen_model_logprobs.mean() + chosen_ref_logprobs.mean())) self.stats['logps/rejected'].append(gather_mean(rejected_model_logprobs.mean() + rejected_ref_logprobs.mean())) chosen_rewards = chosen_log_ratios * self.args.beta rejected_rewards = rejected_log_ratios * self.args.beta self.stats['rewards/chosen'].append(gather_mean(chosen_rewards.mean())) self.stats['rewards/rejected'].append(gather_mean(rejected_rewards.mean())) kl_model_data = model_logprobs_model_data - ref_logprobs_model_data kl_ref_data = model_logprobs_ref_data - ref_logprobs_ref_data mean_kl = (kl_model_data.sum(1) + kl_ref_data.sum(1)).mean() / 2 self.stats['objective/kl'].append(gather_mean(mean_kl)) entropy_model_data = -model_logprobs_model_data.sum(1) entropy_ref_data = -model_logprobs_ref_data.sum(1) mean_entropy = (entropy_model_data.mean() + entropy_ref_data.mean()) / 2 self.stats['objective/entropy'].append(gather_mean(mean_entropy)) margin = chosen_rewards - rejected_rewards self.stats['rewards/margins'].append(gather_mean(margin.mean())) accuracy = (margin > 0).float() self.stats['rewards/accuracies'].append(gather_mean(accuracy.mean())) model_eos = (model_data['input_ids'][:, context_length:] == self.tokenizer.eos_token_id).any(dim=1) ref_eos = (ref_data['input_ids'][:, context_length:] == self.tokenizer.eos_token_id).any(dim=1) self.stats['val/model_contain_eos_token'].append(gather_mean(model_eos.float())) self.stats['val/ref_contain_eos_token'].append(gather_mean(ref_eos.float())) def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: model.train() ref_model = self.ref_model ref_model.eval() inputs = self._prepare_inputs(inputs) context_length = inputs['prompt_input_ids'].shape[1] prompts = {'input_ids': inputs['prompt_input_ids'], 'attention_mask': inputs['prompt_attention_mask']} del inputs (model_output, ref_output) = self._generate_completions(model, ref_model, prompts) (model_data, ref_data) = self._process_completions(model_output, ref_output, prompts) (model_data_scores, ref_data_scores) = self._compute_rewards(model_data, ref_data, context_length) (model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data) = self._compute_logprobs(model, ref_model, model_data, ref_data, context_length) (loss, dpo_losses, xpo_losses) = self._compute_losses(model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data, model_data_scores >= ref_data_scores) self._log_statistics(model_data, ref_data, model_logprobs_model_data.detach(), model_logprobs_ref_data.detach(), ref_logprobs_ref_data, ref_logprobs_model_data, model_data_scores, ref_data_scores, dpo_losses.detach(), xpo_losses.detach(), context_length) if self.args.torch_empty_cache_steps is not None and self.state.global_step % self.args.torch_empty_cache_steps == 0: empty_cache() kwargs = {} if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: kwargs['learning_rate'] = self._get_learning_rate() if self.args.n_gpu > 1: loss = loss.mean() if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: self.accelerator.backward(loss, **kwargs) return loss.detach() / self.args.gradient_accumulation_steps