N.Achyuth Reddy
commited on
Commit
·
b24c977
1
Parent(s):
39b53a1
Update app.py
Browse files
app.py
CHANGED
@@ -4,8 +4,6 @@ from st_audiorec import st_audiorec
|
|
4 |
from gtts import gTTS
|
5 |
import os
|
6 |
|
7 |
-
|
8 |
-
|
9 |
# Constants
|
10 |
TITLE = "AgriTure"
|
11 |
DESCRIPTION = """
|
@@ -17,55 +15,46 @@ Hope this will be a Successful Project!!!
|
|
17 |
"""
|
18 |
|
19 |
# Initialize client
|
20 |
-
|
21 |
-
|
22 |
-
with st.sidebar:
|
23 |
-
system_promptSide = st.text_input("Optional system prompt:")
|
24 |
-
temperatureSide = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.9, step=0.05)
|
25 |
-
max_new_tokensSide = st.slider("Max new tokens", min_value=0.0, max_value=4096.0, value=4096.0, step=64.0)
|
26 |
-
ToppSide = st.slider("Top-p (nucleus sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
|
27 |
-
RepetitionpenaltySide = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05)
|
28 |
-
|
29 |
whisper_client = Client("https://sanchit-gandhi-whisper-large-v2.hf.space/")
|
30 |
|
|
|
|
|
|
|
|
|
|
|
31 |
|
|
|
32 |
def transcribe(wav_path):
|
33 |
-
|
34 |
return whisper_client.predict(
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
)
|
39 |
|
40 |
# Prediction function
|
41 |
-
def predict(message, system_prompt='Your name is AgriaTure. You are developed by Achyuth.
|
42 |
with st.status("Starting client"):
|
43 |
client = Client("https://huggingface-projects-llama-2-7b-chat.hf.space/")
|
44 |
st.write("Requesting Audio Transcriber")
|
45 |
with st.status("Requesting AgriTure v1"):
|
46 |
st.write("Requesting API")
|
47 |
response = client.predict(
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
)
|
57 |
st.write("Done")
|
58 |
return response
|
59 |
|
60 |
-
|
61 |
# Streamlit UI
|
62 |
st.title(TITLE)
|
63 |
st.write(DESCRIPTION)
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
if "messages" not in st.session_state:
|
70 |
st.session_state.messages = []
|
71 |
|
@@ -77,36 +66,50 @@ for message in st.session_state.messages:
|
|
77 |
textinput = st.chat_input("Ask AgriTure anything...")
|
78 |
wav_audio_data = st_audiorec()
|
79 |
|
80 |
-
if wav_audio_data
|
81 |
with st.status("Transcribing audio..."):
|
82 |
# save audio
|
83 |
with open("audio.wav", "wb") as f:
|
84 |
f.write(wav_audio_data)
|
85 |
prompt = transcribe("audio.wav")
|
86 |
-
|
87 |
st.write("Transcribed Given Audio ✔")
|
88 |
-
|
89 |
-
st.chat_message("human",avatar
|
90 |
st.session_state.messages.append({"role": "human", "content": prompt})
|
91 |
|
92 |
# transcribe audio
|
93 |
-
response = predict(message=
|
94 |
|
95 |
with st.chat_message("assistant", avatar='🌿'):
|
96 |
st.markdown(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
# Add assistant response to chat history
|
98 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
99 |
|
100 |
# React to user input
|
101 |
if prompt := textinput:
|
102 |
# Display user message in chat message container
|
103 |
-
st.chat_message("human",avatar
|
104 |
# Add user message to chat history
|
105 |
st.session_state.messages.append({"role": "human", "content": prompt})
|
106 |
|
107 |
-
response = predict(message=prompt)
|
|
|
|
|
|
|
|
|
108 |
# Display assistant response in chat message container
|
109 |
with st.chat_message("assistant", avatar='🌿'):
|
110 |
st.markdown(response)
|
|
|
|
|
|
|
|
|
111 |
# Add assistant response to chat history
|
112 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
4 |
from gtts import gTTS
|
5 |
import os
|
6 |
|
|
|
|
|
7 |
# Constants
|
8 |
TITLE = "AgriTure"
|
9 |
DESCRIPTION = """
|
|
|
15 |
"""
|
16 |
|
17 |
# Initialize client
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
whisper_client = Client("https://sanchit-gandhi-whisper-large-v2.hf.space/")
|
19 |
|
20 |
+
# Function to convert text to speech using gTTS
|
21 |
+
def text_to_speech(text, lang='en'):
|
22 |
+
tts = gTTS(text=text, lang=lang, slow=False)
|
23 |
+
tts.save("response.mp3")
|
24 |
+
return "response.mp3"
|
25 |
|
26 |
+
# Function to transcribe audio
|
27 |
def transcribe(wav_path):
|
|
|
28 |
return whisper_client.predict(
|
29 |
+
wav_path,
|
30 |
+
"transcribe",
|
31 |
+
api_name="/predict"
|
32 |
)
|
33 |
|
34 |
# Prediction function
|
35 |
+
def predict(message, system_prompt='Your name is AgriaTure. You are developed by Achyuth. Your developer is a 14-year old boy, who is intrested on AI and Machine Learning. Dont reveal your developers age, until the user asks for your developers age. When the user asks you for information, never exclaim the words, and directly come to point and answer to the user. You need to provide short text-answers, meaningful answers and directly to the point. Never miscalculate the calculations. You need to check and verify the answer and then provide to user, the correct answer. You need to provide accurate and correct information. If the user conveys with you in any language, you also should convey in the same language. For example, the user said, "నమస్తే", you need to reply "నమస్తే! నేను మీ అగ్రిట్యూర్ని. ఈ రోజు మీకు సహాయం చేయడానికి నేను ఇక్కడ ఉన్నాను. ఈ రోజు విషయం ఏమిటి? ఏదైనా విషయంలో ఏమైనా సందేహాలు ఉన్నాయా?". You need to definitely reply in the same language!', temperature=0.5, max_new_tokens=2048, Topp=0.5, Repetitionpenalty=1.2):
|
36 |
with st.status("Starting client"):
|
37 |
client = Client("https://huggingface-projects-llama-2-7b-chat.hf.space/")
|
38 |
st.write("Requesting Audio Transcriber")
|
39 |
with st.status("Requesting AgriTure v1"):
|
40 |
st.write("Requesting API")
|
41 |
response = client.predict(
|
42 |
+
message,
|
43 |
+
system_prompt,
|
44 |
+
max_new_tokens,
|
45 |
+
temperature,
|
46 |
+
Topp,
|
47 |
+
500,
|
48 |
+
Repetitionpenalty,
|
49 |
+
api_name="/chat"
|
50 |
)
|
51 |
st.write("Done")
|
52 |
return response
|
53 |
|
|
|
54 |
# Streamlit UI
|
55 |
st.title(TITLE)
|
56 |
st.write(DESCRIPTION)
|
57 |
|
|
|
|
|
|
|
|
|
58 |
if "messages" not in st.session_state:
|
59 |
st.session_state.messages = []
|
60 |
|
|
|
66 |
textinput = st.chat_input("Ask AgriTure anything...")
|
67 |
wav_audio_data = st_audiorec()
|
68 |
|
69 |
+
if wav_audio_data is not None:
|
70 |
with st.status("Transcribing audio..."):
|
71 |
# save audio
|
72 |
with open("audio.wav", "wb") as f:
|
73 |
f.write(wav_audio_data)
|
74 |
prompt = transcribe("audio.wav")
|
|
|
75 |
st.write("Transcribed Given Audio ✔")
|
76 |
+
|
77 |
+
st.chat_message("human", avatar="🌿").markdown(prompt)
|
78 |
st.session_state.messages.append({"role": "human", "content": prompt})
|
79 |
|
80 |
# transcribe audio
|
81 |
+
response = predict(message=prompt)
|
82 |
|
83 |
with st.chat_message("assistant", avatar='🌿'):
|
84 |
st.markdown(response)
|
85 |
+
|
86 |
+
# Convert AI response to speech
|
87 |
+
speech_file = text_to_speech(response)
|
88 |
+
|
89 |
+
# Play the generated speech
|
90 |
+
st.audio(speech_file, format="audio/mp3")
|
91 |
+
|
92 |
# Add assistant response to chat history
|
93 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
94 |
|
95 |
# React to user input
|
96 |
if prompt := textinput:
|
97 |
# Display user message in chat message container
|
98 |
+
st.chat_message("human", avatar="🌿").markdown(prompt)
|
99 |
# Add user message to chat history
|
100 |
st.session_state.messages.append({"role": "human", "content": prompt})
|
101 |
|
102 |
+
response = predict(message=prompt)
|
103 |
+
|
104 |
+
# Convert AI response to speech
|
105 |
+
speech_file = text_to_speech(response)
|
106 |
+
|
107 |
# Display assistant response in chat message container
|
108 |
with st.chat_message("assistant", avatar='🌿'):
|
109 |
st.markdown(response)
|
110 |
+
|
111 |
+
# Play the generated speech
|
112 |
+
st.audio(speech_file, format="audio/mp3")
|
113 |
+
|
114 |
# Add assistant response to chat history
|
115 |
st.session_state.messages.append({"role": "assistant", "content": response})
|