mhdaw commited on
Commit
b534a82
·
verified ·
1 Parent(s): c59ce0e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +135 -46
app.py CHANGED
@@ -1,64 +1,153 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
 
 
 
 
 
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
27
 
28
- response = ""
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
41
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
 
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  if __name__ == "__main__":
64
  demo.launch()
 
1
  import gradio as gr
2
+ import os
3
+ import spaces
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
5
+ from threading import Thread
6
 
7
+
8
+ TITLE = '''
9
+ <h1 style="text-align: center;">Google Gemma2 2B it <a href="https://huggingface.co/spaces/ysharma/Chat_with_Meta_llama3_1_8b?duplicate=true" id="duplicate-button"><button style="color:white">Duplicate this Space</button></a></h1>
10
+ '''
11
+
12
+ DESCRIPTION = '''
13
+ <div>
14
+ </div>
15
+ '''
16
+
17
+ LICENSE = """
18
+ <p/>
19
+ ---
20
+ Built with Gemma
21
  """
22
+
23
+ PLACEHOLDER = """
24
+ <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
25
+ <img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/c21ff9c8e7ecb2f7d957a72f2ef03c610ac7bbc4/Meta_lockup_positive%20primary_RGB_small.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
26
+ <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Meta llama3.1</h1>
27
+ <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p>
28
+ </div>
29
  """
 
30
 
31
+ css = """
32
+ h1 {
33
+ text-align: center;
34
+ display: block;
35
+ display: flex;
36
+ align-items: center;
37
+ justify-content: center;
38
+ }
39
 
40
+ #duplicate-button {
41
+ margin-left: 10px;
42
+ color: white;
43
+ background: #1565c0;
44
+ border-radius: 100vh;
45
+ font-size: 1rem;
46
+ padding: 3px 5px;
47
+ }
48
+ """
49
 
50
+ model_id = "google/gemma-2-2b-it"
 
 
 
 
51
 
52
+ # Load the tokenizer and model
53
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
54
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
55
+ terminators = [
56
+ tokenizer.eos_token_id,
57
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
58
+ ]
59
 
60
+ MAX_INPUT_TOKEN_LENGTH = 1024
61
 
62
+ # Gradio inference function
63
+ @spaces.GPU(duration=120)
64
+ def chat_llama3_1_8b(message: str,
65
+ history: list,
66
+ temperature: float,
67
+ max_new_tokens: int
68
+ ) -> str:
69
+ """
70
+ Generate a streaming response using the llama3-8b model.
71
+ Args:
72
+ message (str): The input message.
73
+ history (list): The conversation history used by ChatInterface.
74
+ temperature (float): The temperature for generating the response.
75
+ max_new_tokens (int): The maximum number of new tokens to generate.
76
+ Returns:
77
+ str: The generated response.
78
+ """
79
+ conversation = []
80
+ for user, assistant in history:
81
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
82
+ conversation.append({"role": "user", "content": message})
83
+
84
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
85
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
86
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
87
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
88
+ input_ids = input_ids.to(model.device)
89
+
90
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
91
+
92
+ generate_kwargs = dict(
93
+ input_ids= input_ids,
94
+ streamer=streamer,
95
+ max_new_tokens=max_new_tokens,
96
+ do_sample=temperature != 0, # This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash.
97
  temperature=temperature,
98
+ eos_token_id=terminators,
99
+ )
 
100
 
101
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
102
+ t.start()
103
 
104
+ outputs = []
105
+ for text in streamer:
106
+ outputs.append(text)
107
+ yield "".join(outputs)
108
+
109
 
110
+ # Gradio block
111
+ chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
+ with gr.Blocks(fill_height=True, css=css) as demo:
114
 
115
+ gr.Markdown(TITLE)
116
+ gr.Markdown(DESCRIPTION)
117
+ #gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
118
+ gr.ChatInterface(
119
+ fn=chat_llama3_1_8b,
120
+ chatbot=chatbot,
121
+ fill_height=True,
122
+ examples_per_page=3,
123
+ additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
124
+ additional_inputs=[
125
+ gr.Slider(minimum=0,
126
+ maximum=1,
127
+ step=0.1,
128
+ value=0.95,
129
+ label="Temperature",
130
+ render=False),
131
+ gr.Slider(minimum=128,
132
+ maximum=4096,
133
+ step=1,
134
+ value=512,
135
+ label="Max new tokens",
136
+ render=False ),
137
+ ],
138
+ examples=[
139
+ ["What is the best way to open a can of worms?"],
140
+ ["The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1. "],
141
+ ['How to setup a human base on Mars? Give short answer.'],
142
+ ['Explain theory of relativity to me like I’m 8 years old.'],
143
+ ['What is 9,000 * 9,000?'],
144
+ ['Write a pun-filled happy birthday message to my friend Alex.'],
145
+ ['Justify why a penguin might make a good king of the jungle.']
146
+ ],
147
+ cache_examples=False,
148
+ )
149
+
150
+ gr.Markdown(LICENSE)
151
+
152
  if __name__ == "__main__":
153
  demo.launch()