bragovo's picture
Create TRAIN.md
c95a95b
|
raw
history blame
2.76 kB

https://colab.research.google.com/drive/1rT472vEOPjYCKdZ1CEg0IWm-h0dployN?usp=sharing


!pip install datasets transformers evaluate wandb py7zr sentencepiece huggingface_hub rouge_score accelerate

import wandb
wandb.login()


from huggingface_hub import interpreter_login

interpreter_login()

from datasets import interleave_datasets, load_dataset

samsum_dataset = load_dataset("bragovo/dsum_en", split="train")
samsum_ru_dataset = load_dataset("bragovo/dsum_ru", split="train")

dataset = interleave_datasets([samsum_dataset, samsum_ru_dataset])
dataset = dataset.train_test_split(test_size=0.2)

from transformers import AutoTokenizer

checkpoint = "cointegrated/rut5-base-multitask"
# checkpoint = "t5-small"
tokenizer = AutoTokenizer.from_pretrained(checkpoint, legacy=False)

prefix = "summarize: "

def preprocess_function(examples):
    inputs = [prefix + doc for doc in examples["dialogue"]]

    model_inputs = tokenizer(inputs)
    labels = tokenizer(text_target=examples["summary"])

    model_inputs["labels"] = labels["input_ids"]
    return model_inputs

tokenized_dataset = dataset.map(preprocess_function, batched=True)

from transformers import DataCollatorForSeq2Seq

data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint)

import evaluate

rouge = evaluate.load("rouge")

import numpy as np

def compute_metrics(eval_pred):
    predictions, labels = eval_pred
    decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)
    labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
    decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)

    result = rouge.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)

    prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions]
    result["gen_len"] = np.mean(prediction_lens)

    return {k: round(v, 4) for k, v in result.items()}


from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer

model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)

training_args = Seq2SeqTrainingArguments(
    output_dir="bragovo/flux-mt5-base-multitask-model",
    evaluation_strategy="epoch",
    learning_rate=2e-5,
    per_device_train_batch_size=4,
    per_device_eval_batch_size=4,
    weight_decay=0.01,
    save_total_limit=3,
    num_train_epochs=4,
    predict_with_generate=True,
    fp16=True,
    push_to_hub=True,
)

trainer = Seq2SeqTrainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_dataset["train"],
    eval_dataset=tokenized_dataset["test"],
    tokenizer=tokenizer,
    data_collator=data_collator,
    compute_metrics=compute_metrics,
)

trainer.train()
trainer.push_to_hub()