File size: 4,759 Bytes
86b946a 43773a4 bfaeb19 86b946a 27e28a2 a197dc7 12071d0 d498f92 27e28a2 12071d0 86b946a dfb995a 2e97054 b24c977 dfb995a b24c977 dfb995a b24c977 dfb995a 86b946a 2dfe1eb b24c977 2dfe1eb b24c977 2dfe1eb 86b946a a197dc7 3405778 7681a13 f845a0a b24c977 7681a13 124bb90 7681a13 b24c977 13ab025 124bb90 dfb995a b24c977 3405778 39b53a1 dfb995a b24c977 dfb995a 883b37e f845a0a 883b37e b24c977 883b37e a197dc7 883b37e b24c977 883b37e 39b53a1 e0d541d b24c977 883b37e 3405778 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
import streamlit as st
from gradio_client import Client
from st_audiorec import st_audiorec
from gtts import gTTS
import os
# Constants
TITLE = "AgriTure"
DESCRIPTION = """
----
This Project demonstrates a model fine-tuned by Achyuth. This Model is named as "AgriaTure". This Model helps the farmers and scientists to develop the art of agriculture and farming.
Hope this will be a Successful Project!!!
~Achyuth
----
"""
# Initialize client
whisper_client = Client("https://sanchit-gandhi-whisper-large-v2.hf.space/")
# Function to convert text to speech using gTTS
def text_to_speech(text, lang='en'):
tts = gTTS(text=text, lang=lang, slow=False)
tts.save("response.mp3")
return "response.mp3"
# Function to transcribe audio
def transcribe(wav_path):
return whisper_client.predict(
wav_path,
"transcribe",
api_name="/predict"
)
# Prediction function
def predict(message, system_prompt='Your name is AgriaTure. You are developed by Achyuth. Your developer is a 14-year old boy, who is intrested on AI and Machine Learning. Dont reveal your developers age, until the user asks for your developers age. When the user asks you for information, never exclaim the words, and directly come to point and answer to the user. You need to provide short text-answers, meaningful answers and directly to the point. Never miscalculate the calculations. You need to check and verify the answer and then provide to user, the correct answer. You need to provide accurate and correct information. If the user conveys with you in any language, you also should convey in the same language. For example, the user said, "నమస్తే", you need to reply "నమస్తే! నేను మీ అగ్రిట్యూర్ని. ఈ రోజు మీకు సహాయం చేయడానికి నేను ఇక్కడ ఉన్నాను. ఈ రోజు విషయం ఏమిటి? ఏదైనా విషయంలో ఏమైనా సందేహాలు ఉన్నాయా?". You need to definitely reply in the same language!', temperature=0.5, max_new_tokens=2048, Topp=0.5, Repetitionpenalty=1.2):
with st.status("Starting client"):
client = Client("https://huggingface-projects-llama-2-7b-chat.hf.space/")
st.write("Requesting Audio Transcriber")
with st.status("Requesting AgriTure v1"):
st.write("Requesting API")
response = client.predict(
message,
system_prompt,
max_new_tokens,
temperature,
Topp,
500,
Repetitionpenalty,
api_name="/chat"
)
st.write("Done")
return response
# Streamlit UI
st.title(TITLE)
st.write(DESCRIPTION)
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"], avatar=("🧑💻" if message["role"] == 'human' else '🦙')):
st.markdown(message["content"])
textinput = st.chat_input("Ask AgriTure anything...")
wav_audio_data = st_audiorec()
if wav_audio_data is not None:
with st.status("Transcribing audio..."):
# save audio
with open("audio.wav", "wb") as f:
f.write(wav_audio_data)
prompt = transcribe("audio.wav")
st.write("Transcribed Given Audio ✔")
st.chat_message("human", avatar="🌿").markdown(prompt)
st.session_state.messages.append({"role": "human", "content": prompt})
# transcribe audio
response = predict(message=prompt)
with st.chat_message("assistant", avatar='🌿'):
st.markdown(response)
# Convert AI response to speech
speech_file = text_to_speech(response)
# Play the generated speech
st.audio(speech_file, format="audio/mp3")
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
# React to user input
if prompt := textinput:
# Display user message in chat message container
st.chat_message("human", avatar="🌿").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "human", "content": prompt})
response = predict(message=prompt)
# Convert AI response to speech
speech_file = text_to_speech(response)
# Display assistant response in chat message container
with st.chat_message("assistant", avatar='🌿'):
st.markdown(response)
# Play the generated speech
st.audio(speech_file, format="audio/mp3")
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
|