aekpic877 commited on
Commit
674e436
·
verified ·
1 Parent(s): 17c5983

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -23
app.py CHANGED
@@ -1,29 +1,26 @@
1
- import torch
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
3
 
4
- # Specify the model and tokenizer
5
  model_name = "deepseek-ai/deepseek-math-7b-instruct"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
8
- model.generation_config = GenerationConfig.from_pretrained(model_name)
9
- model.generation_config.pad_token_id = model.generation_config.eos_token_id
10
 
11
- # Function to read text from a file
12
- def read_input_text(file_path):
13
- with open(file_path, 'r', encoding='utf-8') as file:
14
- text = file.read()
15
- return text.strip()
 
16
 
17
- # Example usage: Replace 'input.txt' with your file path
18
- input_text = read_input_text('input.txt')
 
 
 
 
 
 
19
 
20
- # Prepare input as a chat message
21
- messages = [{"role": "user", "content": input_text}]
22
- input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
23
-
24
- # Generate outputs from the model
25
- outputs = model.generate(input_tensor.to(model.device), max_new_tokens=100)
26
-
27
- # Decode the generated output
28
- result = tokenizer.decode(outputs[0][input_tensor.shape[1]:], skip_special_tokens=True)
29
- print(result)
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, Conversation
3
 
4
+ # Load your model from Hugging Face Transformers
5
  model_name = "deepseek-ai/deepseek-math-7b-instruct"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
8
 
9
+ # Define a function to use the model
10
+ def math_inference(input_text):
11
+ inputs = tokenizer(input_text, return_tensors="pt")
12
+ output = model.generate(**inputs)
13
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
14
+ return response
15
 
16
+ # Create a Gradio interface
17
+ iface = gr.Interface(
18
+ fn=math_inference,
19
+ inputs=gr.Textbox(prompt="Input math question"),
20
+ outputs=gr.Textbox(prompt="Math answer"),
21
+ layout="vertical",
22
+ title="Math Solver"
23
+ )
24
 
25
+ # Launch the Gradio interface
26
+ iface.launch()