liuguicheng's picture
Update app.py
3bc3047
import gradio as gr
gr.Interface.load("models/codefuse-ai/CodeFuse-CodeLlama-34B").launch()
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
)
tokenizer = AutoTokenizer.from_pretrained(mode_name_or_path, trust_remote_code=True, use_fast=False, legacy=False)
tokenizer.padding_side = "left"
tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids("<unk>")
tokenizer.eos_token_id = tokenizer.convert_tokens_to_ids("</s>")
# 如果显存不够,可以考虑量化加载
model = AutoModelForCausalLM.from_pretrained(mode_name_or_path,
trust_remote_code=True,
load_in_4bit=False,
device_map="auto",
torch_dtype=torch.bfloat16)
model.eval()
HUMAN_ROLE_START_TAG = "<|role_start|>human<|role_end|>"
BOT_ROLE_START_TAG = "<|role_start|>bot<|role_end|>"
text = f"{HUMAN_ROLE_START_TAG}请用C++实现求解第n个斐波那契数{BOT_ROLE_START_TAG}"
inputs = tokenizer(text, return_tensors='pt', padding=True, add_special_tokens=False).to("cuda")
outputs = model.generate(
inputs=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
max_new_tokens=512,
top_p=0.95,
temperature=0.1,
do_sample=True,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id
)
gen_text = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[1]:], skip_special_tokens=True)
print(gen_text)