llama test 2
Browse files
app.py
CHANGED
@@ -1,143 +1,59 @@
|
|
1 |
import gradio as gr
|
2 |
-
import os
|
3 |
import spaces
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
h1 {
|
38 |
-
text-align: center;
|
39 |
-
display: block;
|
40 |
-
}
|
41 |
-
#duplicate-button {
|
42 |
-
margin: auto;
|
43 |
-
color: white;
|
44 |
-
background: #1565c0;
|
45 |
-
border-radius: 100vh;
|
46 |
-
}
|
47 |
-
"""
|
48 |
-
|
49 |
-
# Load the tokenizer and model
|
50 |
-
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
51 |
-
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto") # to("cuda:0")
|
52 |
-
terminators = [
|
53 |
-
tokenizer.eos_token_id,
|
54 |
-
tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
55 |
-
]
|
56 |
-
|
57 |
-
@spaces.GPU(duration=120)
|
58 |
-
def chat_llama3_8b(message: str,
|
59 |
-
history: list,
|
60 |
-
temperature: float,
|
61 |
-
max_new_tokens: int
|
62 |
-
) -> str:
|
63 |
-
"""
|
64 |
-
Generate a streaming response using the llama3-8b model.
|
65 |
-
Args:
|
66 |
-
message (str): The input message.
|
67 |
-
history (list): The conversation history used by ChatInterface.
|
68 |
-
temperature (float): The temperature for generating the response.
|
69 |
-
max_new_tokens (int): The maximum number of new tokens to generate.
|
70 |
-
Returns:
|
71 |
-
str: The generated response.
|
72 |
-
"""
|
73 |
-
conversation = []
|
74 |
-
for user, assistant in history:
|
75 |
-
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
76 |
-
conversation.append({"role": "user", "content": message})
|
77 |
-
|
78 |
-
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
|
79 |
-
|
80 |
-
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
81 |
-
|
82 |
-
generate_kwargs = dict(
|
83 |
-
input_ids= input_ids,
|
84 |
-
streamer=streamer,
|
85 |
max_new_tokens=max_new_tokens,
|
86 |
-
do_sample=True,
|
87 |
-
temperature=temperature,
|
88 |
eos_token_id=terminators,
|
|
|
|
|
|
|
89 |
)
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
for
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
gr.Markdown(DESCRIPTION)
|
110 |
-
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
|
111 |
-
gr.ChatInterface(
|
112 |
-
fn=chat_llama3_8b,
|
113 |
-
chatbot=chatbot,
|
114 |
-
fill_height=True,
|
115 |
-
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
|
116 |
-
additional_inputs=[
|
117 |
-
gr.Slider(minimum=0,
|
118 |
-
maximum=1,
|
119 |
-
step=0.1,
|
120 |
-
value=0.95,
|
121 |
-
label="Temperature",
|
122 |
-
render=False),
|
123 |
-
gr.Slider(minimum=128,
|
124 |
-
maximum=4096,
|
125 |
-
step=1,
|
126 |
-
value=512,
|
127 |
-
label="Max new tokens",
|
128 |
-
render=False ),
|
129 |
-
],
|
130 |
-
examples=[
|
131 |
-
['How to setup a human base on Mars? Give short answer.'],
|
132 |
-
['Explain theory of relativity to me like I’m 8 years old.'],
|
133 |
-
['What is 9,000 * 9,000?'],
|
134 |
-
['Write a pun-filled happy birthday message to my friend Alex.'],
|
135 |
-
['Justify why a penguin might make a good king of the jungle.']
|
136 |
-
],
|
137 |
-
cache_examples=False,
|
138 |
-
)
|
139 |
-
|
140 |
-
gr.Markdown(LICENSE)
|
141 |
-
|
142 |
-
if __name__ == "__main__":
|
143 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
|
|
2 |
import spaces
|
3 |
+
import torch
|
4 |
+
|
5 |
+
import transformers
|
6 |
+
import torch
|
7 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
8 |
+
|
9 |
+
model_name = "meta-llama/Meta-Llama-3-8B-Instruct"
|
10 |
+
|
11 |
+
pipeline = transformers.pipeline(
|
12 |
+
"text-generation",
|
13 |
+
model=model_name,
|
14 |
+
model_kwargs={"torch_dtype": torch.bfloat16},
|
15 |
+
device="cuda",
|
16 |
+
)
|
17 |
+
|
18 |
+
@spaces.GPU
|
19 |
+
def chat_function(message, history, system_prompt,max_new_tokens,temperature):
|
20 |
+
messages = [
|
21 |
+
{"role": "system", "content": system_prompt},
|
22 |
+
{"role": "user", "content": message},
|
23 |
+
]
|
24 |
+
prompt = pipeline.tokenizer.apply_chat_template(
|
25 |
+
messages,
|
26 |
+
tokenize=False,
|
27 |
+
add_generation_prompt=True
|
28 |
+
)
|
29 |
+
terminators = [
|
30 |
+
pipeline.tokenizer.eos_token_id,
|
31 |
+
pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
32 |
+
]
|
33 |
+
temp = temperature + 0.1
|
34 |
+
outputs = pipeline(
|
35 |
+
prompt,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
max_new_tokens=max_new_tokens,
|
|
|
|
|
37 |
eos_token_id=terminators,
|
38 |
+
do_sample=True,
|
39 |
+
temperature=temp,
|
40 |
+
top_p=0.9,
|
41 |
)
|
42 |
+
return outputs[0]["generated_text"][len(prompt):]
|
43 |
+
|
44 |
+
gr.ChatInterface(
|
45 |
+
chat_function,
|
46 |
+
chatbot=gr.Chatbot(height=400),
|
47 |
+
textbox=gr.Textbox(placeholder="Enter message here", container=False, scale=7),
|
48 |
+
title="LLAMA 3 8B Chat",
|
49 |
+
description="""
|
50 |
+
This space is dedicated for chatting with Meta's Latest LLM - Llama 8b Instruct. Find this model here: https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct
|
51 |
+
Feel free to play with customization in the "Additional Inputs".
|
52 |
+
""",
|
53 |
+
theme="soft",
|
54 |
+
additional_inputs=[
|
55 |
+
gr.Textbox("You are helpful AI.", label="System Prompt"),
|
56 |
+
gr.Slider(512, 4096, label="Max New Tokens"),
|
57 |
+
gr.Slider(0, 1, label="Temperature")
|
58 |
+
]
|
59 |
+
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|