awacke1 commited on
Commit
12c8e07
Β·
verified Β·
1 Parent(s): feabd29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -16
app.py CHANGED
@@ -1,45 +1,63 @@
1
  import os
2
  import gradio as gr
3
  from transformers import AutoModel, AutoTokenizer
4
- from fastapi import FastAPI
5
 
6
  def process_models(model_name, save_dir, additional_models):
7
  log_lines = []
8
 
9
  # Process primary model
10
- log_lines.append(f"πŸš€ Loading model: **{model_name}**")
11
  try:
12
  model = AutoModel.from_pretrained(model_name)
13
  tokenizer = AutoTokenizer.from_pretrained(model_name)
14
  model_save_path = os.path.join(save_dir, model_name.replace("/", "_"))
15
  os.makedirs(model_save_path, exist_ok=True)
16
  model.save_pretrained(model_save_path)
17
- log_lines.append(f"βœ… Saved **{model_name}** to `{model_save_path}`")
18
  except Exception as e:
19
- log_lines.append(f"❌ Error with **{model_name}**: {e}")
20
 
21
  # Process additional models if any
22
  if additional_models:
23
  for m in additional_models:
24
- log_lines.append(f"πŸš€ Loading model: **{m}**")
25
  try:
26
  model = AutoModel.from_pretrained(m)
27
  tokenizer = AutoTokenizer.from_pretrained(m)
28
  model_save_path = os.path.join(save_dir, m.replace("/", "_"))
29
  os.makedirs(model_save_path, exist_ok=True)
30
  model.save_pretrained(model_save_path)
31
- log_lines.append(f"βœ… Saved **{m}** to `{model_save_path}`")
32
  except Exception as e:
33
- log_lines.append(f"❌ Error with **{m}**: {e}")
34
 
35
  return "\n".join(log_lines)
36
 
37
- # A Mermaid diagram embedded in Markdown as a mini-glossary.
38
- mermaid_glossary = """
39
- ```mermaid
40
- graph LR
41
- A[πŸš€ Model Input] --> B[Load Model]
42
- B --> C[πŸ’Ύ Save Model]
43
- D[🧩 Additional Models] --> B
44
- ```
45
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import gradio as gr
3
  from transformers import AutoModel, AutoTokenizer
 
4
 
5
  def process_models(model_name, save_dir, additional_models):
6
  log_lines = []
7
 
8
  # Process primary model
9
+ log_lines.append(f"πŸš€ Loading model: {model_name}")
10
  try:
11
  model = AutoModel.from_pretrained(model_name)
12
  tokenizer = AutoTokenizer.from_pretrained(model_name)
13
  model_save_path = os.path.join(save_dir, model_name.replace("/", "_"))
14
  os.makedirs(model_save_path, exist_ok=True)
15
  model.save_pretrained(model_save_path)
16
+ log_lines.append(f"βœ… Saved {model_name} to {model_save_path}")
17
  except Exception as e:
18
+ log_lines.append(f"❌ Error with {model_name}: {e}")
19
 
20
  # Process additional models if any
21
  if additional_models:
22
  for m in additional_models:
23
+ log_lines.append(f"πŸš€ Loading model: {m}")
24
  try:
25
  model = AutoModel.from_pretrained(m)
26
  tokenizer = AutoTokenizer.from_pretrained(m)
27
  model_save_path = os.path.join(save_dir, m.replace("/", "_"))
28
  os.makedirs(model_save_path, exist_ok=True)
29
  model.save_pretrained(model_save_path)
30
+ log_lines.append(f"βœ… Saved {m} to {model_save_path}")
31
  except Exception as e:
32
+ log_lines.append(f"❌ Error with {m}: {e}")
33
 
34
  return "\n".join(log_lines)
35
 
36
+ with gr.Blocks() as demo:
37
+ gr.Markdown("# HuggingFace Model Loader & Saver")
38
+ gr.Markdown("Load and save HuggingFace models locally using Transformers.")
39
+
40
+ with gr.Row():
41
+ model_name_input = gr.Textbox(label="πŸš€ Model", value="openai-gpt", placeholder="Enter model name")
42
+ save_dir_input = gr.Textbox(label="πŸ’Ύ Save Dir", value="./hugging", placeholder="Enter save directory")
43
+
44
+ additional_models_input = gr.Dropdown(
45
+ label="🧩 Additional Models",
46
+ choices=["bert-base-uncased", "gpt2", "roberta-base"],
47
+ value=[],
48
+ multiselect=True,
49
+ info="Select additional models"
50
+ )
51
+
52
+ run_button = gr.Button("Load & Save Model")
53
+ output_log = gr.Textbox(label="Output Log", lines=10)
54
+
55
+ run_button.click(
56
+ fn=process_models,
57
+ inputs=[model_name_input, save_dir_input, additional_models_input],
58
+ outputs=output_log
59
+ )
60
+
61
+ if __name__ == "__main__":
62
+ # Launch the Gradio app. Hugging Face Spaces will execute this file with python.
63
+ demo.launch(server_name="0.0.0.0", server_port=7860)