|
import gradio as gr |
|
from peft import PeftModel, PeftConfig |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig |
|
import torch |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
model_id = "Phearion/bigbrain-v0.0.1" |
|
|
|
bnb_config = BitsAndBytesConfig( |
|
load_in_4bit=True, |
|
bnb_4bit_use_double_quant=True, |
|
bnb_4bit_quant_type="nf4", |
|
bnb_4bit_compute_dtype=torch.bfloat16 |
|
) |
|
|
|
|
|
config = PeftConfig.from_pretrained(model_id) |
|
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) |
|
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, quantization_config=bnb_config) |
|
|
|
|
|
model = PeftModel.from_pretrained(model, model_id) |
|
|
|
def greet(text): |
|
with torch.no_grad(): |
|
|
|
input_text = "<s>### Instruction:\nYou are a data analyst tasked with helping students finding resources, respond in JSON format.\n\n" + |
|
f"### Input:\n{text}\n\n" + |
|
"### Response:\n" |
|
|
|
batch = tokenizer(input_text, return_tensors='pt', add_special_tokens=True).to(device) |
|
|
|
with torch.cuda.amp.autocast(): |
|
output_tokens = model.generate( |
|
**batch, |
|
max_new_tokens=25, |
|
do_sample=True, |
|
pad_token_id=tokenizer.eos_token_id, |
|
) |
|
|
|
|
|
response = tokenizer.decode(output_tokens[0][len(batch['input_ids'][0]):], skip_special_tokens=True) |
|
|
|
|
|
response_parts = response.split("\n### Response:") |
|
return response_parts[0] |
|
|
|
iface = gr.Interface(fn=greet, inputs="text", outputs="text", title="PEFT Model for Big Brain") |
|
iface.launch() |