Spaces:
wuhp
/
Running on Zero

wuhp commited on
Commit
4cf237b
·
verified ·
1 Parent(s): 4e66e3d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -56
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  import spaces
3
- import torch
4
  from datasets import load_dataset
 
5
  from transformers import (
6
  AutoConfig,
7
  AutoTokenizer,
@@ -13,28 +13,28 @@ from transformers import (
13
  )
14
 
15
  ##############################################################################
16
- # ZeroGPU constraints:
17
- # 1) No GPU calls in top-level code
18
- # 2) Decorate GPU-using functions with @spaces.GPU(...)
19
  ##############################################################################
 
 
20
 
21
- TEXT_PIPELINE = None # We'll store an inference pipeline after training (if any).
 
22
 
23
- # We'll train on a subset of WikiText-2 to keep it short for ZeroGPU demonstration.
24
- NUM_EXAMPLES = 1000
25
 
26
- @spaces.GPU(duration=300) # 5 minutes to do a quick demo train
27
  def finetune_small_subset():
28
  """
29
- Demonstration:
30
- - Loads 'wuhp/myr1' (DeepSeek)
31
- - Finetunes on a small subset of WikiText-2
32
- - Disables fp16 to avoid "Attempting to unscale FP16 gradients" error
33
- - Saves model to 'finetuned_myr1'
34
- - Reloads as pipeline for inference
35
  """
 
36
  # 1) Load dataset
37
  ds = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
 
38
  ds = ds.select(range(min(NUM_EXAMPLES, len(ds))))
39
 
40
  # 2) Load config, tokenizer, model
@@ -48,27 +48,17 @@ def finetune_small_subset():
48
  subfolder="myr1",
49
  trust_remote_code=True
50
  )
51
-
52
- # If your GPU supports BF16 (e.g. A100), you can try:
53
- # bf16 = True, and fp16 = False
54
- # Otherwise, just keep fp16=False
55
- # We'll do bf16=False so we definitely skip half-precision
56
- # (which avoids the "Attempting to unscale FP16 gradients" error).
57
- bf16 = False
58
- fp16 = False
59
-
60
  model = AutoModelForCausalLM.from_pretrained(
61
  "wuhp/myr1",
62
  subfolder="myr1",
63
  config=config,
64
- # Only auto-detect if we do normal float32 or bfloat16.
65
- # (We do not want normal fp16 in training.)
66
- torch_dtype=torch.bfloat16 if bf16 else torch.float32,
67
- device_map="auto",
68
  trust_remote_code=True
69
  )
70
 
71
- # 3) Tokenize data
72
  def tokenize_fn(ex):
73
  return tokenizer(ex["text"], truncation=True, max_length=512)
74
 
@@ -77,72 +67,73 @@ def finetune_small_subset():
77
 
78
  collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
79
 
80
- # 4) TrainingArguments
81
  training_args = TrainingArguments(
82
  output_dir="finetuned_myr1",
83
  num_train_epochs=1,
84
  per_device_train_batch_size=1,
85
  gradient_accumulation_steps=2,
86
  logging_steps=10,
87
- save_steps=999999, # won't save mid-training
88
  save_total_limit=1,
89
- # Turn off half precision explicitly
90
- fp16=fp16,
91
- bf16=bf16,
92
- # If the above doesn't fix it, remove advanced features that auto uses
93
- # gradient scaling, or do more manual approach.
94
  )
95
 
96
- # 5) Build Trainer
97
  trainer = Trainer(
98
  model=model,
99
  args=training_args,
100
  train_dataset=ds,
101
- data_collator=collator
102
  )
103
 
104
  # 6) Train
105
  trainer.train()
106
 
107
- # 7) Save final
108
  trainer.save_model("finetuned_myr1")
109
  tokenizer.save_pretrained("finetuned_myr1")
110
 
111
- # 8) Reload the newly trained model as a pipeline
112
  finetuned_model = AutoModelForCausalLM.from_pretrained(
113
  "finetuned_myr1",
114
- torch_dtype=torch.bfloat16 if bf16 else torch.float32,
115
  device_map="auto",
116
  trust_remote_code=True
117
  )
 
118
  global TEXT_PIPELINE
119
  TEXT_PIPELINE = pipeline("text-generation", model=finetuned_model, tokenizer=tokenizer)
120
  return "Finetuning complete! Model reloaded for inference."
121
 
 
122
  def ensure_pipeline():
123
  """
124
- If we haven't finetuned yet, or if TEXT_PIPELINE is None,
125
- load the original model from 'wuhp/myr1' for inference.
126
  """
127
  global TEXT_PIPELINE
128
  if TEXT_PIPELINE is None:
129
- tokenizer = AutoTokenizer.from_pretrained("wuhp/myr1", subfolder="myr1", trust_remote_code=True)
130
- # We'll do float32 for inference if no BF16 or fp16.
131
- model = AutoModelForCausalLM.from_pretrained(
132
  "wuhp/myr1",
133
  subfolder="myr1",
134
- torch_dtype=torch.float32,
135
- device_map="auto",
136
  trust_remote_code=True
137
  )
 
 
 
 
 
 
 
138
  TEXT_PIPELINE = pipeline("text-generation", model=model, tokenizer=tokenizer)
139
  return TEXT_PIPELINE
140
 
141
- @spaces.GPU(duration=120) # up to 2 minutes for text generation
 
142
  def predict(prompt, temperature, top_p, min_new_tokens, max_new_tokens):
143
  """
144
- Generates text from the (finetuned) pipeline or the original model.
145
- Allows user to adjust temperature, top_p, and token range [260..5000].
146
  """
147
  pipe = ensure_pipeline()
148
  out = pipe(
@@ -155,20 +146,20 @@ def predict(prompt, temperature, top_p, min_new_tokens, max_new_tokens):
155
  )
156
  return out[0]["generated_text"]
157
 
 
158
  # Build Gradio UI
159
  with gr.Blocks() as demo:
160
- gr.Markdown("## ZeroGPU Mini-Finetuning (No FP16) + Long Text Generation")
161
 
162
- # 1) Button to run finetune_small_subset()
163
- finetune_btn = gr.Button("Finetune WikiText-2 (Subset)")
164
  status_box = gr.Textbox(label="Finetune Status")
165
  finetune_btn.click(fn=finetune_small_subset, outputs=status_box)
166
 
167
- gr.Markdown("Use 'Generate' to produce text from either the newly finetuned or original model.")
168
 
169
  prompt_in = gr.Textbox(lines=3, label="Prompt")
170
- temperature = gr.Slider(0.0, 1.5, value=0.7, step=0.1, label="Temperature")
171
- top_p = gr.Slider(0.0, 1.0, value=0.9, step=0.05, label="Top-p")
172
  min_tokens = gr.Slider(260, 5000, value=260, step=10, label="Min New Tokens")
173
  max_tokens = gr.Slider(260, 5000, value=500, step=50, label="Max New Tokens")
174
 
 
1
  import gradio as gr
2
  import spaces
 
3
  from datasets import load_dataset
4
+ import torch
5
  from transformers import (
6
  AutoConfig,
7
  AutoTokenizer,
 
13
  )
14
 
15
  ##############################################################################
16
+ # GLOBALS / ZERO-GPU APPROACH
 
 
17
  ##############################################################################
18
+ # We store a global pipeline after finetuning (if any).
19
+ TEXT_PIPELINE = None
20
 
21
+ # We'll train on only 50 examples from WikiText-2 to keep it short.
22
+ NUM_EXAMPLES = 50
23
 
 
 
24
 
25
+ @spaces.GPU(duration=600) # up to 600 seconds (10 minutes) for mini-finetraining
26
  def finetune_small_subset():
27
  """
28
+ 1) Loads 'wuhp/myr1' in 8-bit,
29
+ 2) Takes 50 examples from WikiText-2,
30
+ 3) Finetunes for 1 epoch,
31
+ 4) Saves to 'finetuned_myr1/',
32
+ 5) Reloads the new model into a pipeline for inference.
 
33
  """
34
+
35
  # 1) Load dataset
36
  ds = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
37
+ # Keep only 50 to fit ephemeral GPU time
38
  ds = ds.select(range(min(NUM_EXAMPLES, len(ds))))
39
 
40
  # 2) Load config, tokenizer, model
 
48
  subfolder="myr1",
49
  trust_remote_code=True
50
  )
51
+ # 8-bit loading via bitsandbytes
 
 
 
 
 
 
 
 
52
  model = AutoModelForCausalLM.from_pretrained(
53
  "wuhp/myr1",
54
  subfolder="myr1",
55
  config=config,
56
+ load_in_8bit=True, # <--- 8-bit
57
+ device_map="auto", # let HF manage device placement
 
 
58
  trust_remote_code=True
59
  )
60
 
61
+ # 3) Tokenize
62
  def tokenize_fn(ex):
63
  return tokenizer(ex["text"], truncation=True, max_length=512)
64
 
 
67
 
68
  collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
69
 
70
+ # 4) TrainingArguments: no fp16 to avoid half-precision gradient issues
71
  training_args = TrainingArguments(
72
  output_dir="finetuned_myr1",
73
  num_train_epochs=1,
74
  per_device_train_batch_size=1,
75
  gradient_accumulation_steps=2,
76
  logging_steps=10,
77
+ save_steps=999999, # skip mid-training saves
78
  save_total_limit=1,
79
+ fp16=False, # <--- disable FP16
 
 
 
 
80
  )
81
 
82
+ # 5) Trainer
83
  trainer = Trainer(
84
  model=model,
85
  args=training_args,
86
  train_dataset=ds,
87
+ data_collator=collator,
88
  )
89
 
90
  # 6) Train
91
  trainer.train()
92
 
93
+ # 7) Save final model
94
  trainer.save_model("finetuned_myr1")
95
  tokenizer.save_pretrained("finetuned_myr1")
96
 
97
+ # 8) Reload the newly finetuned model as a pipeline (for inference)
98
  finetuned_model = AutoModelForCausalLM.from_pretrained(
99
  "finetuned_myr1",
 
100
  device_map="auto",
101
  trust_remote_code=True
102
  )
103
+
104
  global TEXT_PIPELINE
105
  TEXT_PIPELINE = pipeline("text-generation", model=finetuned_model, tokenizer=tokenizer)
106
  return "Finetuning complete! Model reloaded for inference."
107
 
108
+
109
  def ensure_pipeline():
110
  """
111
+ If no pipeline yet, load the original model from wuhp/myr1 for inference.
112
+ (In 8-bit or normal float? We can do normal float here for a simpler approach.)
113
  """
114
  global TEXT_PIPELINE
115
  if TEXT_PIPELINE is None:
116
+ tokenizer = AutoTokenizer.from_pretrained(
 
 
117
  "wuhp/myr1",
118
  subfolder="myr1",
 
 
119
  trust_remote_code=True
120
  )
121
+ model = AutoModelForCausalLM.from_pretrained(
122
+ "wuhp/myr1",
123
+ subfolder="myr1",
124
+ trust_remote_code=True,
125
+ load_in_8bit=True, # load in 8-bit also for inference
126
+ device_map="auto"
127
+ )
128
  TEXT_PIPELINE = pipeline("text-generation", model=model, tokenizer=tokenizer)
129
  return TEXT_PIPELINE
130
 
131
+
132
+ @spaces.GPU(duration=120) # up to 120s for text generation
133
  def predict(prompt, temperature, top_p, min_new_tokens, max_new_tokens):
134
  """
135
+ Generates text from either the finetuned pipeline (if it exists) or the base model.
136
+ Allows user to adjust temperature, top_p, min/max tokens.
137
  """
138
  pipe = ensure_pipeline()
139
  out = pipe(
 
146
  )
147
  return out[0]["generated_text"]
148
 
149
+
150
  # Build Gradio UI
151
  with gr.Blocks() as demo:
152
+ gr.Markdown("## ZeroGPU: Mini-Finetune with 8-bit + Extended Generation")
153
 
154
+ finetune_btn = gr.Button("Finetune on 50 lines of WikiText-2 (up to 10 min)")
 
155
  status_box = gr.Textbox(label="Finetune Status")
156
  finetune_btn.click(fn=finetune_small_subset, outputs=status_box)
157
 
158
+ gr.Markdown("After finetuning, or even without it, generate text below:")
159
 
160
  prompt_in = gr.Textbox(lines=3, label="Prompt")
161
+ temperature = gr.Slider(0.0, 1.5, step=0.1, value=0.7, label="Temperature")
162
+ top_p = gr.Slider(0.0, 1.0, step=0.05, value=0.9, label="Top-p")
163
  min_tokens = gr.Slider(260, 5000, value=260, step=10, label="Min New Tokens")
164
  max_tokens = gr.Slider(260, 5000, value=500, step=50, label="Max New Tokens")
165