Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
from peft import PeftModel, PeftConfig
|
3 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
import torch
|
5 |
|
6 |
# Device configuration (prioritize GPU if available)
|
@@ -26,7 +26,8 @@ def greet(text):
|
|
26 |
with torch.no_grad(): # Disable gradient calculation for inference
|
27 |
batch = tokenizer(f'"{text}" ->:', return_tensors='pt') # Move tensors to device
|
28 |
with torch.cuda.amp.autocast(): # Enable mixed-precision if available
|
29 |
-
output_tokens = model.generate(**batch
|
|
|
30 |
return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
31 |
|
32 |
iface = gr.Interface(fn=greet, inputs="text", outputs="text", title="PEFT Model for Big Brain")
|
|
|
1 |
import gradio as gr
|
2 |
from peft import PeftModel, PeftConfig
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
4 |
import torch
|
5 |
|
6 |
# Device configuration (prioritize GPU if available)
|
|
|
26 |
with torch.no_grad(): # Disable gradient calculation for inference
|
27 |
batch = tokenizer(f'"{text}" ->:', return_tensors='pt') # Move tensors to device
|
28 |
with torch.cuda.amp.autocast(): # Enable mixed-precision if available
|
29 |
+
output_tokens = model.generate(**batch
|
30 |
+
, max_new_tokens=15)
|
31 |
return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
32 |
|
33 |
iface = gr.Interface(fn=greet, inputs="text", outputs="text", title="PEFT Model for Big Brain")
|