|
import gradio as gr |
|
import os |
|
import spaces |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer |
|
from threading import Thread |
|
from typing import Generator |
|
|
|
|
|
HF_TOKEN = os.environ.get("HF_TOKEN", None) |
|
|
|
|
|
DESCRIPTION = """ |
|
<div> |
|
<h1 style="text-align: center;">SPUM Table Extraction</h1> |
|
<p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/khulaifi95/Llama-3.1-8B-Reason-Blend-888k"><b>Meta Llama3 8b Chat</b></a>. Meta Llama3 is the new open LLM and comes in two sizes: 8b and 70b. Feel free to play with it, or duplicate to run privately!</p> |
|
</div> |
|
""" |
|
|
|
PLACEHOLDER = """ |
|
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;"> |
|
<img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8e75e61cc9bab22b7ce3dec85ab0e6db1da5d107/Meta_lockup_positive%20primary_RGB.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; "> |
|
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Materials GPT</h1> |
|
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p> |
|
</div> |
|
""" |
|
|
|
|
|
css = """ |
|
h1 { |
|
text-align: center; |
|
display: block; |
|
} |
|
#duplicate-button { |
|
margin: auto; |
|
color: white; |
|
background: #1565c0; |
|
border-radius: 100vh; |
|
} |
|
""" |
|
|
|
|
|
model_id = "khulaifi95/Llama-3.1-8B-Reason-Blend-888k" |
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto") |
|
terminators = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|eot_id|>")] |
|
|
|
|
|
@spaces.GPU |
|
def chat_llama3_8b( |
|
message: str, history: list, temperature: float, max_new_tokens: int |
|
) -> Generator[str, None, None]: |
|
""" |
|
Generate a streaming response using the llama3-8b model. |
|
Args: |
|
message (str): The input message. |
|
history (list): The conversation history used by ChatInterface. |
|
temperature (float): The temperature for generating the response. |
|
max_new_tokens (int): The maximum number of new tokens to generate. |
|
Returns: |
|
str: The generated response. |
|
""" |
|
conversation = [] |
|
for user, assistant in history: |
|
conversation.extend( |
|
[ |
|
{"role": "user", "content": user}, |
|
{"role": "assistant", "content": assistant}, |
|
] |
|
) |
|
conversation.append({"role": "user", "content": message}) |
|
|
|
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to( |
|
model.device |
|
) |
|
|
|
streamer = TextIteratorStreamer( |
|
tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True |
|
) |
|
|
|
generate_kwargs = dict( |
|
input_ids=input_ids, |
|
streamer=streamer, |
|
max_new_tokens=max_new_tokens, |
|
do_sample=True, |
|
temperature=temperature, |
|
eos_token_id=terminators, |
|
) |
|
|
|
if temperature == 0: |
|
generate_kwargs["do_sample"] = False |
|
|
|
t = Thread(target=model.generate, kwargs=generate_kwargs) |
|
t.start() |
|
|
|
outputs = [] |
|
for text in streamer: |
|
outputs.append(text) |
|
|
|
yield "".join(outputs) |
|
|
|
|
|
|
|
chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label="Gradio ChatInterface") |
|
|
|
with gr.Blocks(fill_height=True, css=css) as demo: |
|
gr.Markdown(DESCRIPTION) |
|
gr.ChatInterface( |
|
fn=chat_llama3_8b, |
|
chatbot=chatbot, |
|
fill_height=True, |
|
additional_inputs_accordion=gr.Accordion( |
|
label="⚙️ Parameters", open=False, render=False |
|
), |
|
additional_inputs=[ |
|
gr.Slider( |
|
minimum=0, |
|
maximum=1, |
|
step=0.1, |
|
value=0.95, |
|
label="Temperature", |
|
render=False, |
|
), |
|
gr.Slider( |
|
minimum=128, |
|
maximum=4096, |
|
step=1, |
|
value=512, |
|
label="Max new tokens", |
|
render=False, |
|
), |
|
], |
|
examples=[ |
|
["The detonative temperature of this polypropylene is 2000°F."], |
|
["The preparation method according to claim 1, characterized in that the SO2 accounts for 30 wt% and the Fe2O3 accounts for 70 wt%."], |
|
], |
|
cache_examples=False, |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|