|
import streamlit as st |
|
from gradio_client import Client |
|
|
|
|
|
APP_TITLE = "Llama2 70B Chatbot" |
|
APP_DESCRIPTION = """ |
|
This application demonstrates the Llama-2-70b chatbot model by Meta, |
|
fine-tuned for chat instructions. You can interact with the model and ask questions. |
|
""" |
|
|
|
|
|
llama2_client = Client("https://ysharma-explore-llamav2-with-tgi.hf.space/") |
|
|
|
with st.sidebar: |
|
system_prompt_input = st.text_input("Optional system prompt:") |
|
temperature_slider = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.9, step=0.05) |
|
max_new_tokens_slider = st.slider("Max new tokens", min_value=0.0, max_value=4096.0, value=4096.0, step=64.0) |
|
|
|
|
|
def get_llama2_response(user_message, system_prompt, temperature, max_new_tokens, topp=0.6, repetition_penalty=1.2): |
|
with st.status("Requesting Llama-2"): |
|
st.write("Requesting API...") |
|
response = llama2_client.predict( |
|
user_message, |
|
system_prompt, |
|
temperature, |
|
max_new_tokens, |
|
topp, |
|
repetition_penalty, |
|
api_name="/chat" |
|
) |
|
st.write("Done") |
|
return response |
|
|
|
|
|
st.title(APP_TITLE) |
|
st.write(APP_DESCRIPTION) |
|
|
|
if "chat_history" not in st.session_state: |
|
st.session_state.chat_history = [] |
|
|
|
|
|
for message in st.session_state.chat_history: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
|
|
if user_input := st.chat_input("Ask Llama-2-70B anything..."): |
|
|
|
st.chat_message("user", avatar="🧑💻").markdown(user_input) |
|
|
|
st.session_state.chat_history.append({"role": "user", "content": user_input}) |
|
|
|
response = get_llama2_response( |
|
user_input, |
|
system_prompt_input, |
|
temperature_slider, |
|
max_new_tokens_slider |
|
) |
|
|
|
with st.chat_message("assistant", avatar='🦙'): |
|
st.markdown(response) |
|
|
|
st.session_state.chat_history.append({"role": "assistant", "content": response}) |
|
|