File size: 1,184 Bytes
e8b0c24 37a0a60 e8b0c24 1580888 e8b0c24 1580888 37a0a60 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import gradio as gr
import torch
from transformers import AutoTokenizer, GenerationConfig
from peft import AutoPeftModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("izh97/zephyr-beta-climate-change-assistant")
model = AutoPeftModelForCausalLM.from_pretrained(
"izh97/zephyr-beta-climate-change-assistant",
low_cpu_mem_usage=True,
return_dict=True,
torch_dtype=torch.float16,
device_map="cuda")
model = model.to('cuda:0')
generation_config = GenerationConfig(
do_sample=True,
top_k=10,
temperature=0.2,
max_new_tokens=256,
pad_token_id=tokenizer.unk_token_id
)
def ask(text):
messages = [
{
"role": "user",
"content": str(text),
},
]
inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda")
inputs_length = inputs.shape[1]
outputs = model.generate(inputs, generation_config=generation_config,
return_dict_in_generate=True)
outputs = outputs.sequences[0, inputs_length:]
return tokenizer.decode(outputs, skip_special_tokens=True)
iface = gr.Interface(fn=ask, inputs="text", outputs="text")
iface.launch() |