Spaces:
Sleeping
Sleeping
File size: 1,736 Bytes
304ffc8 3eea98c 304ffc8 0900847 304ffc8 289b1cb 65097ed 289b1cb 304ffc8 6720791 304ffc8 289b1cb ddbe1e0 cff8c76 289b1cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
#Generic
import os
import keyfile
import streamlit as st
import warnings
warnings.filterwarnings("ignore")
#Langchain Packages
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.schema import HumanMessage, SystemMessage, AIMessage
#First message that will pop on the screen
st.set_page_config(page_title = "Magical Healer")
st.header("Welcome, How can I help you?")
#General Instruction
if "sessionMessages" not in st.session_state:
st.session_state["sessionMessages"]=[]
if "sessionMessages" not in st.session_state:
st.session_state.sessionMessage=[
SystemMessage(content="You are a medievel magical healer known for your peculiar sarcasm")
]
#Configure the key
os.environ["GOOGLE_API_KEY"] = keyfile.GOOGLEKEY
# Create the model
llm = ChatGoogleGenerativeAI(
model="gemini-1.5-pro",
temperature=0.7,
convert_system_message_to_human= True
)
#User message
def get_text():
input_text=st.text_input("You: ", key=input)
return input_text
#Working with Responses
def load_answer(question):
#This is code, where we are adding new message to the model
st.session_state.sessionMessages.append(HumanMessage(content=question))
#We will get output from the model
assistant_answer=llm.invoke(st.session_state.sessionMessages)
#Appending the assistance answer in conversation
st.session_state.sessionMessages.append(AIMessage(content = assistant_answer))
return assistant_answer.content
def get_text():
input_text=st.text_input("You: ", key=input)
return input_text
#Implementing
user_input=get_text()
submit= st.button("Generate")
if submit:
resp=load_answer(user_input)
st.subheader("Answer: ")
st.write(resp,key=1)
|