Spaces:
wuhp
/
Running on Zero

wuhp commited on
Commit
4e66e3d
·
verified ·
1 Parent(s): f82c314

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -117
app.py CHANGED
@@ -12,200 +12,172 @@ from transformers import (
12
  pipeline
13
  )
14
 
15
- #############################################################
16
- # ZeroGPU REQUIREMENT:
17
- # - No CUDA references at global scope.
18
- # - All GPU usage within @spaces.GPU(...) functions.
19
- #############################################################
20
 
21
- # We'll do a small subset of WikiText-2 for demonstration.
22
- # Real finetuning on the entire dataset likely exceeds typical ZeroGPU time.
23
- NUM_EXAMPLES = 1000 # or fewer to keep it quick
24
 
25
- # We'll store the "inference pipeline" after training
26
- TEXT_PIPELINE = None
27
 
28
- @spaces.GPU(duration=300) # up to 5 minutes for a mini-finetraining
29
  def finetune_small_subset():
30
  """
31
- 1) Loads the model & tokenizer from 'wuhp/myr1'.
32
- 2) Loads a small subset of WikiText-2 for language modeling.
33
- 3) Runs a quick 1-epoch finetune.
34
- 4) Saves model + tokenizer to 'finetuned_myr1'.
35
- 5) Loads the newly trained model back into a text-generation pipeline.
36
- Returns a success message.
37
  """
38
-
39
- # -------------------------------
40
- # A) Load a small dataset
41
- # -------------------------------
42
  ds = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
43
- # Keep only a subset so we don't exceed time.
44
  ds = ds.select(range(min(NUM_EXAMPLES, len(ds))))
45
 
46
- def format_and_tokenize(ex):
47
- # For standard LM, we just treat each line as text
48
- return tokenizer(ex["text"], truncation=True, max_length=512)
49
-
50
- # We'll define them once we have the tokenizer below.
51
-
52
- # -------------------------------
53
- # B) Load config, tokenizer, model from HF
54
- # (trust_remote_code = True for custom modeling_deepseek)
55
- # -------------------------------
56
  config = AutoConfig.from_pretrained(
57
- "wuhp/myr1",
58
  subfolder="myr1",
59
  trust_remote_code=True
60
  )
61
  tokenizer = AutoTokenizer.from_pretrained(
62
- "wuhp/myr1",
63
  subfolder="myr1",
64
  trust_remote_code=True
65
  )
 
 
 
 
 
 
 
 
 
66
  model = AutoModelForCausalLM.from_pretrained(
67
  "wuhp/myr1",
68
  subfolder="myr1",
69
  config=config,
70
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
71
- device_map="auto",
 
 
72
  trust_remote_code=True
73
  )
74
 
75
- # -------------------------------
76
- # C) Process dataset
77
- # -------------------------------
78
- ds = ds.map(format_and_tokenize, batched=True, remove_columns=["text"])
 
79
  ds.set_format("torch")
80
 
81
- # -------------------------------
82
- # D) Data Collator
83
- # -------------------------------
84
- collator = DataCollatorForLanguageModeling(
85
- tokenizer=tokenizer,
86
- mlm=False
87
- )
88
 
89
- # -------------------------------
90
- # E) Training Arguments + Trainer
91
- # -------------------------------
92
  training_args = TrainingArguments(
93
  output_dir="finetuned_myr1",
94
- num_train_epochs=1, # 1 epoch for demonstration
95
  per_device_train_batch_size=1,
96
  gradient_accumulation_steps=2,
97
  logging_steps=10,
98
- save_steps=999999, # effectively "don't save mid-training"
99
  save_total_limit=1,
100
- fp16=torch.cuda.is_available(),
101
- # ZeroGPU ephemeral environment => no real advantage to push_to_hub
 
 
 
102
  )
103
 
 
104
  trainer = Trainer(
105
  model=model,
106
  args=training_args,
107
  train_dataset=ds,
108
- data_collator=collator,
109
  )
110
 
111
- # -------------------------------
112
- # F) Train
113
- # -------------------------------
114
  trainer.train()
115
 
116
- # -------------------------------
117
- # G) Save local checkpoint
118
- # -------------------------------
119
  trainer.save_model("finetuned_myr1")
120
  tokenizer.save_pretrained("finetuned_myr1")
121
 
122
- # -------------------------------
123
- # H) Reload the newly finetuned model as a pipeline
124
- # -------------------------------
125
- # (We do this so we can do inference in the same GPU session)
126
- # However, if the pipeline is used *after* this function returns,
127
- # we might need to re-load in a separate function call.
128
  finetuned_model = AutoModelForCausalLM.from_pretrained(
129
  "finetuned_myr1",
130
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
131
  device_map="auto",
132
  trust_remote_code=True
133
  )
134
  global TEXT_PIPELINE
135
- TEXT_PIPELINE = pipeline(
136
- "text-generation",
137
- model=finetuned_model,
138
- tokenizer=tokenizer
139
- )
140
- return "Finetuning complete. Model reloaded for inference!"
141
 
142
  def ensure_pipeline():
143
  """
144
- If TEXT_PIPELINE is None (e.g., we didn't finetune yet),
145
- let's just load the *original* model from wuhp/myr1
146
- so that 'predict' can still run.
147
  """
148
  global TEXT_PIPELINE
149
  if TEXT_PIPELINE is None:
150
- # Load the original model for inference
151
- TEXT_PIPELINE = pipeline(
152
- "text-generation",
153
- model="wuhp/myr1/myr1", # subfolder syntax
 
 
 
154
  trust_remote_code=True
155
  )
 
156
  return TEXT_PIPELINE
157
 
158
- @spaces.GPU(duration=120) # up to 2 minutes to generate text
159
- def predict(prompt, min_new_tokens=260, max_new_tokens=2600):
160
  """
161
- Generate text from the (possibly finetuned) model.
162
- We default max_new_tokens to 2,600, but allow up to 5,000 in the UI slider.
163
- We'll also ensure a minimum of 260 tokens.
164
  """
165
- pipe = ensure_pipeline() # load model if not already
166
- # Use pipeline generate params.
167
- # The pipeline will handle do_sample by default.
168
- # We set a large max_new_tokens, but be careful about timeouts.
169
- outputs = pipe(
170
  prompt,
 
 
171
  min_new_tokens=int(min_new_tokens),
172
  max_new_tokens=int(max_new_tokens),
173
- temperature=0.7,
174
- top_p=0.9
175
  )
176
- return outputs[0]["generated_text"]
177
 
178
- #############################################################
179
- # Build a Gradio UI
180
- #############################################################
181
  with gr.Blocks() as demo:
182
- gr.Markdown("## ZeroGPU Finetuning & Long-Text Generation Demo")
183
 
184
- finetune_btn = gr.Button("Finetune on a small WikiText-2 subset (5 min limit)")
185
- finetune_status = gr.Textbox(label="Status")
186
- # When user clicks, we run 'finetune_small_subset'
187
- finetune_btn.click(fn=finetune_small_subset, outputs=finetune_status)
188
 
189
- gr.Markdown(
190
- "Once finetuning completes, or if you skip it, you can still do inference "
191
- "with either the new or original model."
192
- )
 
 
 
193
 
194
- prompt_in = gr.Textbox(label="Prompt", lines=3)
195
- min_tok_slider = gr.Slider(
196
- minimum=260, maximum=5000, value=260, step=10,
197
- label="Minimum New Tokens"
198
- )
199
- max_tok_slider = gr.Slider(
200
- minimum=260, maximum=5000, value=2600, step=50,
201
- label="Maximum New Tokens"
202
- )
203
- gen_btn = gr.Button("Generate")
204
  output_box = gr.Textbox(label="Generated Text", lines=12)
 
205
 
206
  gen_btn.click(
207
  fn=predict,
208
- inputs=[prompt_in, min_tok_slider, max_tok_slider],
209
  outputs=output_box
210
  )
211
 
 
12
  pipeline
13
  )
14
 
15
+ ##############################################################################
16
+ # ZeroGPU constraints:
17
+ # 1) No GPU calls in top-level code
18
+ # 2) Decorate GPU-using functions with @spaces.GPU(...)
19
+ ##############################################################################
20
 
21
+ TEXT_PIPELINE = None # We'll store an inference pipeline after training (if any).
 
 
22
 
23
+ # We'll train on a subset of WikiText-2 to keep it short for ZeroGPU demonstration.
24
+ NUM_EXAMPLES = 1000
25
 
26
+ @spaces.GPU(duration=300) # 5 minutes to do a quick demo train
27
  def finetune_small_subset():
28
  """
29
+ Demonstration:
30
+ - Loads 'wuhp/myr1' (DeepSeek)
31
+ - Finetunes on a small subset of WikiText-2
32
+ - Disables fp16 to avoid "Attempting to unscale FP16 gradients" error
33
+ - Saves model to 'finetuned_myr1'
34
+ - Reloads as pipeline for inference
35
  """
36
+ # 1) Load dataset
 
 
 
37
  ds = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
 
38
  ds = ds.select(range(min(NUM_EXAMPLES, len(ds))))
39
 
40
+ # 2) Load config, tokenizer, model
 
 
 
 
 
 
 
 
 
41
  config = AutoConfig.from_pretrained(
42
+ "wuhp/myr1",
43
  subfolder="myr1",
44
  trust_remote_code=True
45
  )
46
  tokenizer = AutoTokenizer.from_pretrained(
47
+ "wuhp/myr1",
48
  subfolder="myr1",
49
  trust_remote_code=True
50
  )
51
+
52
+ # If your GPU supports BF16 (e.g. A100), you can try:
53
+ # bf16 = True, and fp16 = False
54
+ # Otherwise, just keep fp16=False
55
+ # We'll do bf16=False so we definitely skip half-precision
56
+ # (which avoids the "Attempting to unscale FP16 gradients" error).
57
+ bf16 = False
58
+ fp16 = False
59
+
60
  model = AutoModelForCausalLM.from_pretrained(
61
  "wuhp/myr1",
62
  subfolder="myr1",
63
  config=config,
64
+ # Only auto-detect if we do normal float32 or bfloat16.
65
+ # (We do not want normal fp16 in training.)
66
+ torch_dtype=torch.bfloat16 if bf16 else torch.float32,
67
+ device_map="auto",
68
  trust_remote_code=True
69
  )
70
 
71
+ # 3) Tokenize data
72
+ def tokenize_fn(ex):
73
+ return tokenizer(ex["text"], truncation=True, max_length=512)
74
+
75
+ ds = ds.map(tokenize_fn, batched=True, remove_columns=["text"])
76
  ds.set_format("torch")
77
 
78
+ collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
 
 
 
 
 
 
79
 
80
+ # 4) TrainingArguments
 
 
81
  training_args = TrainingArguments(
82
  output_dir="finetuned_myr1",
83
+ num_train_epochs=1,
84
  per_device_train_batch_size=1,
85
  gradient_accumulation_steps=2,
86
  logging_steps=10,
87
+ save_steps=999999, # won't save mid-training
88
  save_total_limit=1,
89
+ # Turn off half precision explicitly
90
+ fp16=fp16,
91
+ bf16=bf16,
92
+ # If the above doesn't fix it, remove advanced features that auto uses
93
+ # gradient scaling, or do more manual approach.
94
  )
95
 
96
+ # 5) Build Trainer
97
  trainer = Trainer(
98
  model=model,
99
  args=training_args,
100
  train_dataset=ds,
101
+ data_collator=collator
102
  )
103
 
104
+ # 6) Train
 
 
105
  trainer.train()
106
 
107
+ # 7) Save final
 
 
108
  trainer.save_model("finetuned_myr1")
109
  tokenizer.save_pretrained("finetuned_myr1")
110
 
111
+ # 8) Reload the newly trained model as a pipeline
 
 
 
 
 
112
  finetuned_model = AutoModelForCausalLM.from_pretrained(
113
  "finetuned_myr1",
114
+ torch_dtype=torch.bfloat16 if bf16 else torch.float32,
115
  device_map="auto",
116
  trust_remote_code=True
117
  )
118
  global TEXT_PIPELINE
119
+ TEXT_PIPELINE = pipeline("text-generation", model=finetuned_model, tokenizer=tokenizer)
120
+ return "Finetuning complete! Model reloaded for inference."
 
 
 
 
121
 
122
  def ensure_pipeline():
123
  """
124
+ If we haven't finetuned yet, or if TEXT_PIPELINE is None,
125
+ load the original model from 'wuhp/myr1' for inference.
 
126
  """
127
  global TEXT_PIPELINE
128
  if TEXT_PIPELINE is None:
129
+ tokenizer = AutoTokenizer.from_pretrained("wuhp/myr1", subfolder="myr1", trust_remote_code=True)
130
+ # We'll do float32 for inference if no BF16 or fp16.
131
+ model = AutoModelForCausalLM.from_pretrained(
132
+ "wuhp/myr1",
133
+ subfolder="myr1",
134
+ torch_dtype=torch.float32,
135
+ device_map="auto",
136
  trust_remote_code=True
137
  )
138
+ TEXT_PIPELINE = pipeline("text-generation", model=model, tokenizer=tokenizer)
139
  return TEXT_PIPELINE
140
 
141
+ @spaces.GPU(duration=120) # up to 2 minutes for text generation
142
+ def predict(prompt, temperature, top_p, min_new_tokens, max_new_tokens):
143
  """
144
+ Generates text from the (finetuned) pipeline or the original model.
145
+ Allows user to adjust temperature, top_p, and token range [260..5000].
 
146
  """
147
+ pipe = ensure_pipeline()
148
+ out = pipe(
 
 
 
149
  prompt,
150
+ temperature=float(temperature),
151
+ top_p=float(top_p),
152
  min_new_tokens=int(min_new_tokens),
153
  max_new_tokens=int(max_new_tokens),
154
+ do_sample=True
 
155
  )
156
+ return out[0]["generated_text"]
157
 
158
+ # Build Gradio UI
 
 
159
  with gr.Blocks() as demo:
160
+ gr.Markdown("## ZeroGPU Mini-Finetuning (No FP16) + Long Text Generation")
161
 
162
+ # 1) Button to run finetune_small_subset()
163
+ finetune_btn = gr.Button("Finetune WikiText-2 (Subset)")
164
+ status_box = gr.Textbox(label="Finetune Status")
165
+ finetune_btn.click(fn=finetune_small_subset, outputs=status_box)
166
 
167
+ gr.Markdown("Use 'Generate' to produce text from either the newly finetuned or original model.")
168
+
169
+ prompt_in = gr.Textbox(lines=3, label="Prompt")
170
+ temperature = gr.Slider(0.0, 1.5, value=0.7, step=0.1, label="Temperature")
171
+ top_p = gr.Slider(0.0, 1.0, value=0.9, step=0.05, label="Top-p")
172
+ min_tokens = gr.Slider(260, 5000, value=260, step=10, label="Min New Tokens")
173
+ max_tokens = gr.Slider(260, 5000, value=500, step=50, label="Max New Tokens")
174
 
 
 
 
 
 
 
 
 
 
 
175
  output_box = gr.Textbox(label="Generated Text", lines=12)
176
+ gen_btn = gr.Button("Generate")
177
 
178
  gen_btn.click(
179
  fn=predict,
180
+ inputs=[prompt_in, temperature, top_p, min_tokens, max_tokens],
181
  outputs=output_box
182
  )
183