jeduardogruiz
commited on
Commit
•
92ee7ff
1
Parent(s):
c40c211
Create Peft.train
Browse files- Peft.train +20 -0
Peft.train
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
https://github.com/huggingface/peft
|
2 |
+
from transformers import AutoModelForSeq2SeqLM
|
3 |
+
from peft import get_peft_config, get_peft_model, LoraConfig, TaskType
|
4 |
+
model_name_or_path = "bigscience/mt0-large"
|
5 |
+
tokenizer_name_or_path = "bigscience/mt0-large"
|
6 |
+
|
7 |
+
peft_config = LoraConfig(
|
8 |
+
task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
|
9 |
+
)
|
10 |
+
|
11 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
|
12 |
+
model = get_peft_model(model, peft_config)
|
13 |
+
model.print_trainable_parameters()
|
14 |
+
"trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282"
|
15 |
+
@Misc{peft,
|
16 |
+
title = {PEFT: State-of-the-art Parameter-Efficient Fine-Tuning methods},
|
17 |
+
author = {Sourab Mangrulkar and Sylvain Gugger and Lysandre Debut and Younes Belkada and Sayak Paul and Benjamin Bossan},
|
18 |
+
howpublished = {\url{https://github.com/huggingface/peft}},
|
19 |
+
year = {2022}
|
20 |
+
}
|