YI-6B-200k / app.py
Tonic's picture
Update app.py
381bab8
raw
history blame
6 kB
import os
import math
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import gradio as gr
import sentencepiece
import gc
title = "# Welcome to 🙋🏻‍♂️Tonic's🌷Tulu Chat!"
description = """[allenai/tulu-2-dpo-7b](https://huggingface.co/allenai/tulu-2-dpo-7b) and larger Tulu-2 models are Instruct Llama Finetunes using the [mistralai/Mistral-7B](https://huggingface.co/mistralai/Mistral-7B-v0.1) recipe. You can use [allenai/tulu-2-13b](https://huggingface.co/allenai/tulu-2-13b) here via API using Gradio by scrolling down and clicking Use 'Via API' or privately by [cloning this space on huggingface](https://huggingface.co/spaces/Tonic1/TuluDemo?duplicate=true) See also the large model here : [allenai/tulu-2-dpo-70b](https://huggingface.co/allenai/tulu-2-dpo-70b) . [Join my active builders' server on discord](https://discord.gg/VqTxc76K3u). Let's build together!. [Add this Space as a discord bot to your server by clicking this link](https://discord.com/oauth2/authorize?client_id=1176628808212828231&scope=bot+applications.commands&permissions=326417525824). Big thanks to 🤗Huggingface Organisation for the🫂Community Grant"""
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:50'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_name = "allenai/tulu-2-dpo-13b"
tokenizer = AutoTokenizer.from_pretrained("allenai/tulu-2-dpo-13b")
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
# bos_token_id = 1
# eos_token_id = 2
# tokenizer.bos_token_id = bos_token_id
# tokenizer.eos_token_id = eos_token_id
# model.config.bos_token_id = bos_token_id
# model.config.eos_token_id = eos_token_id
# if tokenizer.pad_token is None:
# tokenizer.pad_token = tokenizer.eos_token
# model.config.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
class TuluChatBot:
def __init__(self, model, tokenizer, system_message="You are 🌷Tulu, an AI language model created by Tonic-AI. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."):
self.model = model
self.tokenizer = tokenizer
self.system_message = system_message
def set_system_message(self, new_system_message):
self.system_message = new_system_message
def format_prompt(self, user_message):
prompt = f"<|assistant|>\n{self.system_message}\n<|user|>{user_message}\n<|assistant|>\n"
return prompt
def Tulu(self, user_message, temperature, max_new_tokens, top_p, repetition_penalty, do_sample):
try:
prompt = self.format_prompt(user_message)
inputs = self.tokenizer(prompt, return_tensors='pt', add_special_tokens=True)
input_ids = inputs["input_ids"].to(self.model.device)
attention_mask = inputs["attention_mask"].to(self.model.device)
output_ids = self.model.generate(
input_ids,
attention_mask=attention_mask,
max_length=input_ids.shape[1] + max_new_tokens,
temperature=temperature,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=do_sample
)
response = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
response = response.strip()
response = response.split("<|assistant|>\n")[-1]
return response
finally:
del input_ids, attention_mask, output_ids
gc.collect()
torch.cuda.empty_cache()
def gradio_Tulu(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample):
Tulu_bot.set_system_message(system_message)
if not do_sample:
max_length = 780
temperature = 0.9
top_p = 0.9
repetition_penalty = 0.9
response = Tulu_bot.Tulu(user_message, temperature, max_new_tokens, top_p, repetition_penalty, do_sample)
return response
# Initialize TuluChatBot
Tulu_bot = TuluChatBot(model, tokenizer)
# Gradio interface function
def gradio_Tulu(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample):
Tulu_bot.set_system_message(system_message)
response = Tulu_bot.Tulu(user_message, temperature, max_new_tokens, top_p, repetition_penalty, do_sample)
return response
with gr.Blocks(theme = "ParityError/Anime") as demo:
gr.Markdown(title)
gr.Markdown(description)
with gr.Row():
system_message = gr.Textbox(label="Optional 🌷Tulu Assistant Message", lines=2)
user_message = gr.Textbox(label="Your Message", lines=3)
with gr.Row():
do_sample = gr.Checkbox(label="Advanced", value=True)
with gr.Accordion("Advanced Settings", open=lambda do_sample: do_sample):
with gr.Row():
max_new_tokens = gr.Slider(label="Max new tokens", value=250, minimum=20, maximum=450, step=1)
temperature = gr.Slider(label="Temperature", value=0.3, minimum=0.1, maximum=1.0, step=0.1)
top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.01, maximum=0.99, step=0.05)
repetition_penalty = gr.Slider(label="Repetition penalty", value=0.9, minimum=0.05, maximum=1.0, step=0.05)
submit_button = gr.Button("Submit")
output_text = gr.Textbox(label="🌷Tulu Response")
def process(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample):
return gradio_Tulu(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample)
submit_button.click(
process,
inputs=[user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample],
outputs=output_text
)
demo.launch()