Commit
·
f140b0f
1
Parent(s):
b4f4f9d
Cleaning autotrain
Browse files- autotrain/autotrain.yml +0 -34
- autotrain/run_autotrain.py +0 -67
autotrain/autotrain.yml
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
task: llm-sft
|
2 |
-
base_model: mistralai/Mistral-7B-Instruct-v0.3
|
3 |
-
project_name: autotrain-mistral-v03-prompt-experiment-mc-R-FA-sg
|
4 |
-
log: tensorboard
|
5 |
-
backend: spaces-l4x1
|
6 |
-
|
7 |
-
data:
|
8 |
-
path: derek-thomas/labeled-multiple-choice-explained-mistral-tokenized
|
9 |
-
train_split: train
|
10 |
-
# valid_split: val
|
11 |
-
valid_split: null
|
12 |
-
chat_template: none
|
13 |
-
column_mapping:
|
14 |
-
text_column: conversation_R_FA_sg
|
15 |
-
|
16 |
-
params:
|
17 |
-
block_size: 1024
|
18 |
-
model_max_length: 1024
|
19 |
-
epochs: 2
|
20 |
-
batch_size: 1
|
21 |
-
lr: 3e-5
|
22 |
-
peft: true
|
23 |
-
quantization: int4
|
24 |
-
target_modules: all-linear
|
25 |
-
padding: left
|
26 |
-
optimizer: adamw_torch
|
27 |
-
scheduler: linear
|
28 |
-
gradient_accumulation: 8
|
29 |
-
mixed_precision: bf16
|
30 |
-
|
31 |
-
hub:
|
32 |
-
username: derek-thomas
|
33 |
-
token: ${HF_TOKEN}
|
34 |
-
push_to_hub: true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
autotrain/run_autotrain.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import subprocess
|
3 |
-
|
4 |
-
import yaml
|
5 |
-
|
6 |
-
# Base config
|
7 |
-
config_template = {
|
8 |
-
"task": "llm-sft",
|
9 |
-
"base_model": "mistralai/Mistral-7B-Instruct-v0.3",
|
10 |
-
"project_name": "",
|
11 |
-
"log": "tensorboard",
|
12 |
-
"backend": "spaces-l4x1",
|
13 |
-
"data": {
|
14 |
-
"path": "derek-thomas/labeled-multiple-choice-explained-mistral-tokenized",
|
15 |
-
"train_split": "train",
|
16 |
-
"valid_split": None,
|
17 |
-
"chat_template": "none",
|
18 |
-
"column_mapping": {
|
19 |
-
"text_column": ""
|
20 |
-
},
|
21 |
-
},
|
22 |
-
"params": {
|
23 |
-
"block_size": 1024,
|
24 |
-
"model_max_length": 1024,
|
25 |
-
"epochs": 2,
|
26 |
-
"batch_size": 1,
|
27 |
-
"lr": 3e-5,
|
28 |
-
"peft": True,
|
29 |
-
"quantization": "int4",
|
30 |
-
"target_modules": "all-linear",
|
31 |
-
"padding": "left",
|
32 |
-
"optimizer": "adamw_torch",
|
33 |
-
"scheduler": "linear",
|
34 |
-
"gradient_accumulation": 8,
|
35 |
-
"mixed_precision": "bf16",
|
36 |
-
},
|
37 |
-
"hub": {
|
38 |
-
"username": "derek-thomas",
|
39 |
-
"token": os.getenv('HF_TOKEN'),
|
40 |
-
"push_to_hub": True,
|
41 |
-
},
|
42 |
-
}
|
43 |
-
|
44 |
-
# Suffix options
|
45 |
-
project_suffixes = ["RFA-gpt3-5", "RFA-mistral", "FAR-gpt3-5", "FAR-mistral", "FA"]
|
46 |
-
text_columns = ["conversation_RFA_gpt3_5", "conversation_RFA_mistral", "conversation_FAR_gpt3_5",
|
47 |
-
"conversation_FAR_mistral", "conversation_FA"]
|
48 |
-
|
49 |
-
# Directory to store generated configs
|
50 |
-
output_dir = "./autotrain_configs"
|
51 |
-
os.makedirs(output_dir, exist_ok=True)
|
52 |
-
|
53 |
-
# Generate configs and run commands
|
54 |
-
for project_suffix, text_column in zip(project_suffixes, text_columns):
|
55 |
-
# Modify the config
|
56 |
-
config = config_template.copy()
|
57 |
-
config["project_name"] = f"mistral-v03-poe-{project_suffix}"
|
58 |
-
config["data"]["column_mapping"]["text_column"] = text_column
|
59 |
-
|
60 |
-
# Save the config to a YAML file
|
61 |
-
config_path = os.path.join(output_dir, f"{text_column}.yml")
|
62 |
-
with open(config_path, "w") as f:
|
63 |
-
yaml.dump(config, f)
|
64 |
-
|
65 |
-
# Run the command
|
66 |
-
print(f"Running autotrain with config: {config_path}")
|
67 |
-
subprocess.run(["autotrain", "--config", config_path])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|