rhshah commited on
Commit
b1890b3
1 Parent(s): a453724

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -14
app.py CHANGED
@@ -1,14 +1,29 @@
1
- import gradio as gr
2
- # Load the model from Hugging Face
3
- model = gr.load("models/rhshah/MediumGEN_LLama2")
4
- import time
5
- # Define the function to use the model
6
- def predict(input):
7
- time.sleep(10)
8
- return "model(input)"
9
-
10
- # Create the Gradio interface
11
- iface = gr.Interface(fn=predict, inputs="text", outputs="text")
12
-
13
- # Launch the interface
14
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import gradio as gr
2
+ # # Load the model from Hugging Face
3
+ # model = gr.load("models/rhshah/MediumGEN_LLama2")
4
+ # import time
5
+ # # Define the function to use the model
6
+ # def predict(input):
7
+ # time.sleep(10)
8
+ # return "model(input)"
9
+
10
+ # # Create the Gradio interface
11
+ # iface = gr.Interface(fn=predict, inputs="text", outputs="text")
12
+
13
+ # # Launch the interface
14
+ # iface.launch()
15
+
16
+ from peft import AutoPeftModelForCausalLM
17
+ from transformers import AutoTokenizer
18
+ import torch
19
+ peft_model_dir = "models/rhshah/MediumGEN_LLama2"
20
+
21
+
22
+ # load base LLM model and tokenizer
23
+ trained_model = AutoPeftModelForCausalLM.from_pretrained(
24
+ peft_model_dir,
25
+ low_cpu_mem_usage=True,
26
+ # torch_dtype=torch.float16,
27
+ # load_in_4bit=True,
28
+ )
29
+ tokenizer = AutoTokenizer.from_pretrained(peft_model_dir)