File size: 1,210 Bytes
8194866
187b236
3bf71d2
9641b31
8194866
679bcc5
 
95b028c
058347f
679bcc5
327ad9f
d2222b4
9641b31
 
 
20f6853
bf67241
d08a677
20f6853
679bcc5
 
 
 
bf67241
3bf71d2
679bcc5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import gradio as gr
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# Device configuration (prioritize GPU if available)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_id = "phearion/bigbrain-v0.0.1"

# Load models and tokenizer efficiently
config = PeftConfig.from_pretrained("phearion/bigbrain-v0.0.1")
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)

# Load the Lora model
model = PeftModel.from_pretrained(model, model_id)
model.to(device)

def greet(text):
    with torch.no_grad():  # Disable gradient calculation for inference
        batch = tokenizer(text, return_tensors='pt').to(device)  # Move tensors to device
        with torch.cuda.amp.autocast():  # Enable mixed-precision if available
            output_tokens = model.generate(**batch, max_new_tokens=15)
    return tokenizer.decode(output_tokens[0], skip_special_tokens=True)

iface = gr.Interface(fn=greet, inputs="text", outputs="text", title="PEFT Model for Big Brain", live=True)
iface.launch(share=True)  # Share directly to Gradio Space