|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
Fine-tuning the library models for multiple choice. |
|
""" |
|
|
|
|
|
import json |
|
import logging |
|
import os |
|
import sys |
|
from dataclasses import dataclass, field |
|
from itertools import chain |
|
from pathlib import Path |
|
from typing import Optional, Union |
|
|
|
import datasets |
|
import tensorflow as tf |
|
from datasets import load_dataset |
|
|
|
import transformers |
|
from transformers import ( |
|
CONFIG_NAME, |
|
TF2_WEIGHTS_NAME, |
|
AutoConfig, |
|
AutoTokenizer, |
|
DefaultDataCollator, |
|
HfArgumentParser, |
|
PushToHubCallback, |
|
TFAutoModelForMultipleChoice, |
|
TFTrainingArguments, |
|
create_optimizer, |
|
set_seed, |
|
) |
|
from transformers.tokenization_utils_base import PreTrainedTokenizerBase |
|
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry |
|
|
|
|
|
|
|
check_min_version("4.28.0.dev0") |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
|
|
|
@dataclass |
|
class DataCollatorForMultipleChoice: |
|
""" |
|
Data collator that will dynamically pad the inputs for multiple choice received. |
|
|
|
Args: |
|
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]): |
|
The tokenizer used for encoding the data. |
|
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): |
|
Select a strategy to pad the returned sequences (according to the model's padding side and padding index) |
|
among: |
|
|
|
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence |
|
if provided). |
|
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum |
|
acceptable input length for the model if that argument is not provided. |
|
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different |
|
lengths). |
|
max_length (`int`, *optional*): |
|
Maximum length of the returned list and optionally padding length (see above). |
|
pad_to_multiple_of (`int`, *optional*): |
|
If set will pad the sequence to a multiple of the provided value. |
|
|
|
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= |
|
7.5 (Volta). |
|
""" |
|
|
|
tokenizer: PreTrainedTokenizerBase |
|
padding: Union[bool, str, PaddingStrategy] = True |
|
max_length: Optional[int] = None |
|
pad_to_multiple_of: Optional[int] = None |
|
|
|
def __call__(self, features): |
|
label_name = "label" if "label" in features[0].keys() else "labels" |
|
labels = [feature.pop(label_name) for feature in features] |
|
batch_size = len(features) |
|
num_choices = len(features[0]["input_ids"]) |
|
flattened_features = [ |
|
[{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features |
|
] |
|
flattened_features = list(chain(*flattened_features)) |
|
|
|
batch = self.tokenizer.pad( |
|
flattened_features, |
|
padding=self.padding, |
|
max_length=self.max_length, |
|
pad_to_multiple_of=self.pad_to_multiple_of, |
|
return_tensors="np", |
|
) |
|
|
|
|
|
batch = {k: tf.reshape(v, (batch_size, num_choices, -1)) for k, v in batch.items()} |
|
|
|
batch["labels"] = tf.convert_to_tensor(labels, dtype=tf.int64) |
|
return batch |
|
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass |
|
class ModelArguments: |
|
""" |
|
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. |
|
""" |
|
|
|
model_name_or_path: str = field( |
|
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} |
|
) |
|
config_name: Optional[str] = field( |
|
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} |
|
) |
|
tokenizer_name: Optional[str] = field( |
|
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} |
|
) |
|
cache_dir: Optional[str] = field( |
|
default=None, |
|
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, |
|
) |
|
use_fast_tokenizer: bool = field( |
|
default=True, |
|
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, |
|
) |
|
model_revision: str = field( |
|
default="main", |
|
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, |
|
) |
|
use_auth_token: bool = field( |
|
default=False, |
|
metadata={ |
|
"help": ( |
|
"Will use the token generated when running `huggingface-cli login` (necessary to use this script " |
|
"with private models)." |
|
) |
|
}, |
|
) |
|
|
|
|
|
@dataclass |
|
class DataTrainingArguments: |
|
""" |
|
Arguments pertaining to what data we are going to input our model for training and eval. |
|
""" |
|
|
|
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) |
|
validation_file: Optional[str] = field( |
|
default=None, |
|
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, |
|
) |
|
overwrite_cache: bool = field( |
|
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} |
|
) |
|
preprocessing_num_workers: Optional[int] = field( |
|
default=None, |
|
metadata={"help": "The number of processes to use for the preprocessing."}, |
|
) |
|
max_seq_length: Optional[int] = field( |
|
default=None, |
|
metadata={ |
|
"help": ( |
|
"The maximum total input sequence length after tokenization. If passed, sequences longer " |
|
"than this will be truncated, sequences shorter will be padded." |
|
) |
|
}, |
|
) |
|
pad_to_max_length: bool = field( |
|
default=False, |
|
metadata={ |
|
"help": ( |
|
"Whether to pad all samples to the maximum sentence length. " |
|
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More " |
|
"efficient on GPU but very bad for TPU." |
|
) |
|
}, |
|
) |
|
max_train_samples: Optional[int] = field( |
|
default=None, |
|
metadata={ |
|
"help": ( |
|
"For debugging purposes or quicker training, truncate the number of training examples to this " |
|
"value if set." |
|
) |
|
}, |
|
) |
|
max_eval_samples: Optional[int] = field( |
|
default=None, |
|
metadata={ |
|
"help": ( |
|
"For debugging purposes or quicker training, truncate the number of evaluation examples to this " |
|
"value if set." |
|
) |
|
}, |
|
) |
|
|
|
def __post_init__(self): |
|
if self.train_file is not None: |
|
extension = self.train_file.split(".")[-1] |
|
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." |
|
if self.validation_file is not None: |
|
extension = self.validation_file.split(".")[-1] |
|
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." |
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
|
|
|
|
|
|
|
|
|
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) |
|
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
|
|
|
|
|
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) |
|
else: |
|
model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
|
|
|
|
|
|
|
send_example_telemetry("run_swag", model_args, data_args, framework="tensorflow") |
|
|
|
output_dir = Path(training_args.output_dir) |
|
output_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
logging.basicConfig( |
|
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
|
datefmt="%m/%d/%Y %H:%M:%S", |
|
handlers=[logging.StreamHandler(sys.stdout)], |
|
) |
|
log_level = training_args.get_process_log_level() |
|
logger.setLevel(log_level) |
|
datasets.utils.logging.set_verbosity(log_level) |
|
transformers.utils.logging.set_verbosity(log_level) |
|
transformers.utils.logging.enable_default_handler() |
|
transformers.utils.logging.enable_explicit_format() |
|
|
|
|
|
|
|
checkpoint = None |
|
if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir: |
|
if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file(): |
|
checkpoint = output_dir |
|
logger.info( |
|
f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this" |
|
" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch." |
|
) |
|
else: |
|
raise ValueError( |
|
f"Output directory ({training_args.output_dir}) already exists and is not empty. " |
|
"Use --overwrite_output_dir to continue regardless." |
|
) |
|
|
|
|
|
|
|
set_seed(training_args.seed) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if data_args.train_file is not None or data_args.validation_file is not None: |
|
data_files = {} |
|
if data_args.train_file is not None: |
|
data_files["train"] = data_args.train_file |
|
if data_args.validation_file is not None: |
|
data_files["validation"] = data_args.validation_file |
|
extension = data_args.train_file.split(".")[-1] |
|
raw_datasets = load_dataset( |
|
extension, |
|
data_files=data_files, |
|
cache_dir=model_args.cache_dir, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
else: |
|
|
|
raw_datasets = load_dataset( |
|
"swag", |
|
"regular", |
|
cache_dir=model_args.cache_dir, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
|
|
|
|
|
|
|
|
ending_names = [f"ending{i}" for i in range(4)] |
|
context_name = "sent1" |
|
question_header_name = "sent2" |
|
|
|
|
|
|
|
if checkpoint is not None: |
|
config_path = training_args.output_dir |
|
elif model_args.config_name: |
|
config_path = model_args.config_name |
|
else: |
|
config_path = model_args.model_name_or_path |
|
|
|
|
|
|
|
|
|
config = AutoConfig.from_pretrained( |
|
config_path, |
|
cache_dir=model_args.cache_dir, |
|
revision=model_args.model_revision, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
tokenizer = AutoTokenizer.from_pretrained( |
|
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, |
|
cache_dir=model_args.cache_dir, |
|
use_fast=model_args.use_fast_tokenizer, |
|
revision=model_args.model_revision, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
|
|
|
|
|
|
if data_args.max_seq_length is None: |
|
max_seq_length = tokenizer.model_max_length |
|
if max_seq_length > 1024: |
|
logger.warning( |
|
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " |
|
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx." |
|
) |
|
max_seq_length = 1024 |
|
else: |
|
if data_args.max_seq_length > tokenizer.model_max_length: |
|
logger.warning( |
|
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" |
|
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." |
|
) |
|
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) |
|
|
|
def preprocess_function(examples): |
|
first_sentences = [[context] * 4 for context in examples[context_name]] |
|
question_headers = examples[question_header_name] |
|
second_sentences = [ |
|
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers) |
|
] |
|
|
|
|
|
first_sentences = list(chain(*first_sentences)) |
|
second_sentences = list(chain(*second_sentences)) |
|
|
|
|
|
tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True, max_length=max_seq_length) |
|
|
|
data = {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()} |
|
return data |
|
|
|
if training_args.do_train: |
|
if "train" not in raw_datasets: |
|
raise ValueError("--do_train requires a train dataset") |
|
train_dataset = raw_datasets["train"] |
|
if data_args.max_train_samples is not None: |
|
max_train_samples = min(len(train_dataset), data_args.max_train_samples) |
|
train_dataset = train_dataset.select(range(max_train_samples)) |
|
with training_args.main_process_first(desc="train dataset map pre-processing"): |
|
train_dataset = train_dataset.map( |
|
preprocess_function, |
|
batched=True, |
|
num_proc=data_args.preprocessing_num_workers, |
|
load_from_cache_file=not data_args.overwrite_cache, |
|
) |
|
|
|
if training_args.do_eval: |
|
if "validation" not in raw_datasets: |
|
raise ValueError("--do_eval requires a validation dataset") |
|
eval_dataset = raw_datasets["validation"] |
|
if data_args.max_eval_samples is not None: |
|
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) |
|
eval_dataset = eval_dataset.select(range(max_eval_samples)) |
|
with training_args.main_process_first(desc="validation dataset map pre-processing"): |
|
eval_dataset = eval_dataset.map( |
|
preprocess_function, |
|
batched=True, |
|
num_proc=data_args.preprocessing_num_workers, |
|
load_from_cache_file=not data_args.overwrite_cache, |
|
) |
|
|
|
if data_args.pad_to_max_length: |
|
data_collator = DefaultDataCollator(return_tensors="np") |
|
else: |
|
|
|
data_collator = DataCollatorForMultipleChoice(tokenizer) |
|
|
|
|
|
with training_args.strategy.scope(): |
|
|
|
if checkpoint is None: |
|
model_path = model_args.model_name_or_path |
|
else: |
|
model_path = checkpoint |
|
model = TFAutoModelForMultipleChoice.from_pretrained( |
|
model_path, |
|
config=config, |
|
cache_dir=model_args.cache_dir, |
|
revision=model_args.model_revision, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
|
|
num_replicas = training_args.strategy.num_replicas_in_sync |
|
total_train_batch_size = training_args.per_device_train_batch_size * num_replicas |
|
total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas |
|
|
|
if training_args.do_train: |
|
num_train_steps = (len(train_dataset) // total_train_batch_size) * int(training_args.num_train_epochs) |
|
if training_args.warmup_steps > 0: |
|
num_warmup_steps = training_args.warmup_steps |
|
elif training_args.warmup_ratio > 0: |
|
num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) |
|
else: |
|
num_warmup_steps = 0 |
|
optimizer, lr_schedule = create_optimizer( |
|
init_lr=training_args.learning_rate, |
|
num_train_steps=num_train_steps, |
|
num_warmup_steps=num_warmup_steps, |
|
adam_beta1=training_args.adam_beta1, |
|
adam_beta2=training_args.adam_beta2, |
|
adam_epsilon=training_args.adam_epsilon, |
|
weight_decay_rate=training_args.weight_decay, |
|
adam_global_clipnorm=training_args.max_grad_norm, |
|
) |
|
else: |
|
optimizer = None |
|
model.compile(optimizer=optimizer, metrics=["accuracy"], jit_compile=training_args.xla) |
|
|
|
|
|
|
|
push_to_hub_model_id = training_args.push_to_hub_model_id |
|
model_name = model_args.model_name_or_path.split("/")[-1] |
|
if not push_to_hub_model_id: |
|
push_to_hub_model_id = f"{model_name}-finetuned-multiplechoice" |
|
|
|
model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice"} |
|
|
|
if training_args.push_to_hub: |
|
callbacks = [ |
|
PushToHubCallback( |
|
output_dir=training_args.output_dir, |
|
hub_model_id=push_to_hub_model_id, |
|
hub_token=training_args.push_to_hub_token, |
|
tokenizer=tokenizer, |
|
**model_card_kwargs, |
|
) |
|
] |
|
else: |
|
callbacks = [] |
|
|
|
|
|
|
|
eval_metrics = None |
|
if training_args.do_train: |
|
dataset_options = tf.data.Options() |
|
dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tf_train_dataset = model.prepare_tf_dataset( |
|
train_dataset, |
|
shuffle=True, |
|
batch_size=total_train_batch_size, |
|
collate_fn=data_collator, |
|
).with_options(dataset_options) |
|
|
|
if training_args.do_eval: |
|
validation_data = model.prepare_tf_dataset( |
|
eval_dataset, |
|
shuffle=False, |
|
batch_size=total_eval_batch_size, |
|
collate_fn=data_collator, |
|
drop_remainder=True, |
|
).with_options(dataset_options) |
|
else: |
|
validation_data = None |
|
history = model.fit( |
|
tf_train_dataset, |
|
validation_data=validation_data, |
|
epochs=int(training_args.num_train_epochs), |
|
callbacks=callbacks, |
|
) |
|
eval_metrics = {key: val[-1] for key, val in history.history.items()} |
|
|
|
|
|
|
|
if training_args.do_eval and not training_args.do_train: |
|
dataset_options = tf.data.Options() |
|
dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF |
|
|
|
tf_eval_dataset = model.prepare_tf_dataset( |
|
eval_dataset, |
|
shuffle=False, |
|
batch_size=total_eval_batch_size, |
|
collate_fn=data_collator, |
|
drop_remainder=True, |
|
).with_options(dataset_options) |
|
eval_results = model.evaluate(tf_eval_dataset) |
|
eval_metrics = {"val_loss": eval_results[0], "val_accuracy": eval_results[1]} |
|
|
|
|
|
if eval_metrics is not None and training_args.output_dir is not None: |
|
output_eval_file = os.path.join(training_args.output_dir, "all_results.json") |
|
with open(output_eval_file, "w") as writer: |
|
writer.write(json.dumps(eval_metrics)) |
|
|
|
|
|
|
|
if training_args.output_dir is not None and not training_args.push_to_hub: |
|
|
|
model.save_pretrained(training_args.output_dir) |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|