FlawlessAI / app.py
N.Achyuth Reddy
Update app.py
ac37763
raw
history blame
3.8 kB
import streamlit as st
from gradio_client import Client
from st_audiorec import st_audiorec
from gtts import gTTS
from IPython.display import Audio, display
# Constants
TITLE = "AgriTure"
DESCRIPTION = """
----
This Project demonstrates a model fine-tuned by Achyuth. This Model is named as "AgriaTure". This Model helps the farmers and scientists to develop the art of agriculture and farming.
Hope this will be a Successful Project!!!
~Achyuth
----
"""
# Initialize client
whisper_client = Client("https://sanchit-gandhi-whisper-large-v2.hf.space/")
# Function to convert text to speech using gTTS
def text_to_speech(text, lang='en'):
tts = gTTS(text=text, lang=lang, slow=False)
tts.save("response.mp3")
return "response.mp3"
# Function to transcribe audio
def transcribe(wav_path):
return whisper_client.predict(
wav_path,
"transcribe",
api_name="/predict"
)
# Prediction function
def predict(message, system_prompt='Your name is AgriaTure...', temperature=0.7, max_new_tokens=4096, Topp=0.5, Repetitionpenalty=1.2):
with st.status("Starting client"):
client = Client("https://huggingface-projects-llama-2-7b-chat.hf.space/")
st.write("Requesting Audio Transcriber")
with st.status("Requesting AgriTure v1"):
st.write("Requesting API")
response = client.predict(
message,
system_prompt,
max_new_tokens,
temperature,
Topp,
500,
Repetitionpenalty,
api_name="/chat"
)
st.write("Done")
return response
# Streamlit UI
st.title(TITLE)
st.write(DESCRIPTION)
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"], avatar=("πŸ§‘β€πŸ’»" if message["role"] == 'human' else 'πŸ¦™')):
st.markdown(message["content"])
textinput = st.chat_input("Ask AgriTure anything...")
wav_audio_data = st_audiorec()
if wav_audio_data is not None:
with st.status("Transcribing audio..."):
# save audio
with open("audio.wav", "wb") as f:
f.write(wav_audio_data)
prompt = transcribe("audio.wav")
st.write("Transcribed Given Audio βœ”")
st.chat_message("human", avatar="🌿").markdown(prompt)
st.session_state.messages.append({"role": "human", "content": prompt})
# transcribe audio
response = predict(message=prompt)
with st.chat_message("assistant", avatar='🌿'):
st.markdown(response)
# Convert AI response to speech
speech_file = text_to_speech(response)
# Display assistant response in chat message container
with st.chat_message("assistant", avatar='🌿'):
st.markdown(response)
# Play the generated speech
display(Audio(speech_file, autoplay=True))
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
# React to user input
if prompt := textinput:
# Display user message in chat message container
st.chat_message("human", avatar="🌿").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "human", "content": prompt})
response = predict(message=prompt)
# Convert AI response to speech
speech_file = text_to_speech(response)
# Display assistant response in chat message container
with st.chat_message("assistant", avatar='🌿'):
st.markdown(response)
# Play the generated speech
display(Audio(speech_file, autoplay=True))
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})