|
from huggingface_hub import InferenceClient |
|
import gradio as gr |
|
|
|
|
|
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3") |
|
|
|
|
|
def format_prompt(message, history): |
|
prompt = "<s>" |
|
for user_prompt, bot_response in history: |
|
prompt += f"[INST] {user_prompt} [/INST] {bot_response} " |
|
prompt += f"[INST] {message} [/INST]</s>" |
|
return prompt |
|
|
|
|
|
def generate( |
|
prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, |
|
): |
|
|
|
temperature = max(float(temperature), 1e-2) |
|
top_p = float(top_p) |
|
|
|
|
|
generate_kwargs = dict( |
|
temperature=temperature, |
|
max_new_tokens=max_new_tokens, |
|
top_p=top_p, |
|
repetition_penalty=repetition_penalty, |
|
do_sample=True, |
|
seed=42, |
|
) |
|
|
|
|
|
formatted_prompt = format_prompt(prompt, history) |
|
|
|
|
|
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) |
|
output = "" |
|
|
|
|
|
for response in stream: |
|
output += response.token.text |
|
yield output |
|
return output |
|
|
|
|
|
additional_inputs=[ |
|
gr.Slider( |
|
label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, |
|
interactive=True, info="Higher values produce more diverse outputs" |
|
), |
|
gr.Slider( |
|
label="Max new tokens", value=256, minimum=0, maximum=1048, step=64, |
|
interactive=True, info="The maximum numbers of new tokens" |
|
), |
|
gr.Slider( |
|
label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1.0, step=0.05, |
|
interactive=True, info="Higher values sample more low-probability tokens" |
|
), |
|
gr.Slider( |
|
label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, |
|
interactive=True, info="Penalize repeated tokens" |
|
) |
|
] |
|
|
|
|
|
gr.ChatInterface( |
|
fn=generate, |
|
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), |
|
additional_inputs=additional_inputs, |
|
title="Mistral 7B v0.3 ChatGPT Clone", |
|
description="A ChatGPT clone using Mistral 7B model. Adjust parameters to fine-tune the generation." |
|
).launch(show_api=False) |