|
import gradio as gr |
|
from peft import PeftModel, PeftConfig |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import torch |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
model_id = "phearion/bigbrain-v0.0.1" |
|
|
|
|
|
config = PeftConfig.from_pretrained("phearion/bigbrain-v0.0.1") |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) |
|
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path) |
|
|
|
|
|
model = PeftModel.from_pretrained(model, model_id) |
|
|
|
def greet(text): |
|
with torch.no_grad(): |
|
batch = tokenizer(text, return_tensors='pt').to(device) |
|
with torch.cuda.amp.autocast(): |
|
output_tokens = model.generate(**batch, max_new_tokens=15) |
|
return tokenizer.decode(output_tokens[0], skip_special_tokens=True) |
|
|
|
iface = gr.Interface(fn=greet, inputs="text", outputs="text", title="PEFT Model for Big Brain") |
|
iface.launch(share=True) |