File size: 1,433 Bytes
8194866
60093a0
d030e25
9641b31
8194866
679bcc5
 
95b028c
058347f
3287b3a
 
 
 
 
 
 
679bcc5
327ad9f
d2222b4
3287b3a
9641b31
 
20f6853
d08a677
20f6853
679bcc5
8783dd5
679bcc5
d030e25
 
bf67241
3bf71d2
e4c199d
4cf26ce
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import gradio as gr
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
import torch

# Device configuration (prioritize GPU if available)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_id = "phearion/bigbrain-v0.0.1"

bnb_config = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4",
            bnb_4bit_compute_dtype=torch.bfloat16
        )

# Load models and tokenizer efficiently
config = PeftConfig.from_pretrained("phearion/bigbrain-v0.0.1")
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, quantization_config=bnb_config)

# Load the Lora model
model = PeftModel.from_pretrained(model, model_id)

def greet(text):
    with torch.no_grad():  # Disable gradient calculation for inference
        batch = tokenizer(f'"{text}" ->:', return_tensors='pt')  # Move tensors to device
        with torch.cuda.amp.autocast():  # Enable mixed-precision if available
            output_tokens = model.generate(**batch
, max_new_tokens=15)
    return tokenizer.decode(output_tokens[0], skip_special_tokens=True)

iface = gr.Interface(fn=greet, inputs="text", outputs="text", title="PEFT Model for Big Brain")
iface.launch()  # Share directly to Gradio Space