ocean_helper / app.py
Bero66's picture
Update app.py
354588d verified
raw
history blame
3.6 kB
import pandas as pd
df = pd.read_csv('./combined_oceanography_questions.csv')
context_data = []
for i in range(len(df)):
context = ""
for j in range(len(df.columns)):
context += df.columns[j]
context += ": "
context += df.iloc[i][j]
context += " "
context_data.append(context)
import os
# Get the secret key from the environment
groq_key = os.environ.get('bero')
## LLM used for RAG
from langchain_groq import ChatGroq
llm = ChatGroq(model="llama-3.1-70b-versatile",api_key=groq_key)
## Embedding model!
from langchain_huggingface import HuggingFaceEmbeddings
embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")
# create vector store!
from langchain_chroma import Chroma
vectorstore = Chroma(
collection_name="ocean_dataset_store",
embedding_function=embed_model,
)
# add data to vector nstore
vectorstore.add_texts(context_data)
retriever = vectorstore.as_retriever()
from langchain_core.prompts import PromptTemplate
template = ("""You are an ocean expert.
Use the provided context to answer the question.
If you don't know the answer, say so. Explain your answer in detail.
Do not discuss the context in your response; just provide the answer directly.
Context: {context}
Question: {question}
Answer:""")
rag_prompt = PromptTemplate.from_template(template)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
rag_chain = (
{"context": retriever, "question": RunnablePassthrough()}
| rag_prompt
| llm
| StrOutputParser()
)
import gradio as gr
import gradio as gr
def sepia(input_img):
sepia_filter = np.array([
[0.393, 0.769, 0.189],
[0.349, 0.686, 0.168],
[0.272, 0.534, 0.131]
])
sepia_img = input_img.dot(sepia_filter.T)
sepia_img /= sepia_img.max()
return sepia_img
demo = gr.Interface(sepia, gr.Image(), "image")
def rag_memory_stream(message, history):
partial_text = ""
for new_text in rag_chain.stream(message):
partial_text += new_text
yield partial_text
css = """ html, body {
margin: 0;
padding: 0;
height: 100%;
overflow: hidden;
}
body::before {
content: '';
position: fixed;
top: 0;
left: 0;
width: 100vw;
height: 100vh;
background-image: url('https://thumbs.dreamstime.com/z/sea-water-ocean-wave-surfing-surface-colorful-vibrant-sunset-barrel-shape-124362369.jpg?ct=jpeg');
background-size: cover;
background-repeat: no-repeat;
opacity: 0.35; /* Faint background image */
background-position: center;
z-index: -1; /* Keep the background behind text */
}
.gradio-container {
display: flex;
justify-content: center;
align-items: center;
height: 100vh; /* Ensure the content is vertically centered */
}"""
examples = [
"How many oceans exist?",
"What is ocean temperature?",
"What is the thermocline?",
"What species of fish are found in Lake Kivu?",
"How do local communities catch fish in Lake Nokoué?"
]
description = "Ocean sciences Assistance"
title = "I can answer any questions related to Ocean: Try me!"
demo = gr.ChatInterface(fn=rag_memory_stream,
type="messages",
title=title,
description=description,
fill_height=True,
css = css,
examples=examples,
theme="glass",
)
if __name__ == "__main__":
demo.launch()