Commit
β’
1ced004
1
Parent(s):
e4b011f
Fix : remove random sampling from bot response (#3)
Browse files- Fix : remove random sampling from bot response (d35ece6b014a24149efb046d716c40b35cca0ed8)
Co-authored-by: Anand <[email protected]>
app.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import requests
|
2 |
import streamlit as st
|
3 |
-
import random
|
4 |
import time
|
5 |
|
6 |
st.title("Rasa Chatbot Interface")
|
@@ -27,11 +26,13 @@ if user_input := st.chat_input("What is up?"):
|
|
27 |
response = requests.post('https://omdenalc-omdena-ng-lagos-chatbot-model.hf.space/webhooks/rest/webhook', json=payload)
|
28 |
bot_reply = response.json()
|
29 |
|
|
|
|
|
|
|
30 |
# Display assistant response in chat message container
|
31 |
with st.chat_message("assistant"):
|
32 |
message_placeholder = st.empty()
|
33 |
full_response = ""
|
34 |
-
assistant_response = random.choice(bot_reply)["text"]
|
35 |
# Simulate stream of response with milliseconds delay
|
36 |
for chunk in assistant_response.split():
|
37 |
full_response += chunk + " "
|
@@ -39,5 +40,6 @@ if user_input := st.chat_input("What is up?"):
|
|
39 |
# Add a blinking cursor to simulate typing
|
40 |
message_placeholder.markdown(full_response + "β")
|
41 |
message_placeholder.markdown(full_response)
|
|
|
42 |
# Add assistant response to chat history
|
43 |
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
|
|
1 |
import requests
|
2 |
import streamlit as st
|
|
|
3 |
import time
|
4 |
|
5 |
st.title("Rasa Chatbot Interface")
|
|
|
26 |
response = requests.post('https://omdenalc-omdena-ng-lagos-chatbot-model.hf.space/webhooks/rest/webhook', json=payload)
|
27 |
bot_reply = response.json()
|
28 |
|
29 |
+
# Extract assistant response
|
30 |
+
assistant_response = bot_reply[0]["text"]
|
31 |
+
|
32 |
# Display assistant response in chat message container
|
33 |
with st.chat_message("assistant"):
|
34 |
message_placeholder = st.empty()
|
35 |
full_response = ""
|
|
|
36 |
# Simulate stream of response with milliseconds delay
|
37 |
for chunk in assistant_response.split():
|
38 |
full_response += chunk + " "
|
|
|
40 |
# Add a blinking cursor to simulate typing
|
41 |
message_placeholder.markdown(full_response + "β")
|
42 |
message_placeholder.markdown(full_response)
|
43 |
+
|
44 |
# Add assistant response to chat history
|
45 |
st.session_state.messages.append({"role": "assistant", "content": full_response})
|