Update app.py
Browse files
app.py
CHANGED
@@ -2,63 +2,64 @@ import streamlit as st
|
|
2 |
from gradio_client import Client
|
3 |
|
4 |
# Constants
|
5 |
-
|
6 |
-
|
7 |
-
This
|
8 |
-
fine-tuned for chat instructions.
|
9 |
"""
|
10 |
|
11 |
# Initialize client
|
12 |
-
|
|
|
13 |
|
14 |
with st.sidebar:
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
18 |
|
|
|
19 |
# Prediction function
|
20 |
-
def
|
21 |
-
with st.status("Requesting
|
22 |
-
st.write("Requesting API
|
23 |
-
response =
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
)
|
32 |
st.write("Done")
|
33 |
return response
|
34 |
|
35 |
# Streamlit UI
|
36 |
-
st.title(
|
37 |
-
st.write(
|
|
|
38 |
|
39 |
-
if "
|
40 |
-
st.session_state.
|
41 |
|
42 |
# Display chat messages from history on app rerun
|
43 |
-
for message in st.session_state.
|
44 |
with st.chat_message(message["role"]):
|
45 |
st.markdown(message["content"])
|
46 |
|
47 |
# React to user input
|
48 |
-
if
|
49 |
# Display user message in chat message container
|
50 |
-
st.chat_message("
|
51 |
# Add user message to chat history
|
52 |
-
st.session_state.
|
53 |
|
54 |
-
response =
|
55 |
-
user_input,
|
56 |
-
system_prompt_input,
|
57 |
-
temperature_slider,
|
58 |
-
max_new_tokens_slider
|
59 |
-
)
|
60 |
# Display assistant response in chat message container
|
61 |
with st.chat_message("assistant", avatar='🦙'):
|
62 |
st.markdown(response)
|
63 |
# Add assistant response to chat history
|
64 |
-
st.session_state.
|
|
|
2 |
from gradio_client import Client
|
3 |
|
4 |
# Constants
|
5 |
+
TITLE = "Llama2 70B Chatbot"
|
6 |
+
DESCRIPTION = """
|
7 |
+
This Space demonstrates model [Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) by Meta,
|
8 |
+
a Llama 2 model with 70B parameters fine-tuned for chat instructions.
|
9 |
"""
|
10 |
|
11 |
# Initialize client
|
12 |
+
client = Client("https://ysharma-explore-llamav2-with-tgi.hf.space/")
|
13 |
+
|
14 |
|
15 |
with st.sidebar:
|
16 |
+
system_promptSide = st.text_input("Optional system prompt:")
|
17 |
+
temperatureSide = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.9, step=0.05)
|
18 |
+
max_new_tokensSide = st.slider("Max new tokens", min_value=0.0, max_value=4096.0, value=4096.0, step=64.0)
|
19 |
+
ToppSide = st.slider("Top-p (nucleus sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
|
20 |
+
RepetitionpenaltySide = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05)
|
21 |
+
|
22 |
|
23 |
+
|
24 |
# Prediction function
|
25 |
+
def predict(message, system_prompt, temperature, max_new_tokens,Topp,Repetitionpenalty):
|
26 |
+
with st.status("Requesting LLama-2"):
|
27 |
+
st.write("Requesting API")
|
28 |
+
response = client.predict(
|
29 |
+
message, # str in 'Message' Textbox component
|
30 |
+
system_prompt, # str in 'Optional system prompt' Textbox component
|
31 |
+
temperature, # int | float (numeric value between 0.0 and 1.0)
|
32 |
+
max_new_tokens, # int | float (numeric value between 0 and 4096)
|
33 |
+
Topp, # int | float (numeric value between 0.0 and 1)
|
34 |
+
Repetitionpenalty, # int | float (numeric value between 1.0 and 2.0)
|
35 |
+
api_name="/chat"
|
36 |
)
|
37 |
st.write("Done")
|
38 |
return response
|
39 |
|
40 |
# Streamlit UI
|
41 |
+
st.title(TITLE)
|
42 |
+
st.write(DESCRIPTION)
|
43 |
+
|
44 |
|
45 |
+
if "messages" not in st.session_state:
|
46 |
+
st.session_state.messages = []
|
47 |
|
48 |
# Display chat messages from history on app rerun
|
49 |
+
for message in st.session_state.messages:
|
50 |
with st.chat_message(message["role"]):
|
51 |
st.markdown(message["content"])
|
52 |
|
53 |
# React to user input
|
54 |
+
if prompt := st.chat_input("Ask LLama-2-70b anything..."):
|
55 |
# Display user message in chat message container
|
56 |
+
st.chat_message("human",avatar = "🧑💻").markdown(prompt)
|
57 |
# Add user message to chat history
|
58 |
+
st.session_state.messages.append({"role": "human", "content": prompt})
|
59 |
|
60 |
+
response = predict(prompt,system_promptSide,temperatureSide,max_new_tokensSide,ToppSide,RepetitionpenaltySide)
|
|
|
|
|
|
|
|
|
|
|
61 |
# Display assistant response in chat message container
|
62 |
with st.chat_message("assistant", avatar='🦙'):
|
63 |
st.markdown(response)
|
64 |
# Add assistant response to chat history
|
65 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|