File size: 2,202 Bytes
304ffc8
 
 
 
 
3eea98c
304ffc8
 
 
 
 
 
0900847
304ffc8
 
6a2930d
 
 
 
65097ed
 
6a2930d
289b1cb
 
 
 
304ffc8
6720791
 
 
 
 
 
 
 
 
 
 
304ffc8
289b1cb
 
6a2930d
 
 
 
 
 
 
 
 
 
 
289b1cb
6a2930d
 
 
 
 
 
 
289b1cb
 
 
 
 
 
 
 
 
 
 
 
 
ddbe1e0
cff8c76
3d3ad6c
289b1cb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
#Generic
import os
import keyfile
import streamlit as st
import warnings
warnings.filterwarnings("ignore")

#Langchain Packages
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.schema import HumanMessage, SystemMessage, AIMessage

#First message that will pop on the screen
st.set_page_config(page_title = "Magical Healer")
st.header("Welcome, How can I help you?")


class AIMessage(BaseModel):
    content: str

if "sessionMessages" not in st.session_state:
    st.session_state["sessionMessages"]=[]
#General Instruction
if "sessionMessages" not in st.session_state:
    st.session_state.sessionMessage=[
        SystemMessage(content="You are a medievel magical healer known for your peculiar sarcasm")
    ]

#Configure the key
os.environ["GOOGLE_API_KEY"] = keyfile.GOOGLEKEY


# Create the model
llm = ChatGoogleGenerativeAI(
    model="gemini-1.5-pro",
    temperature=0.7,
    convert_system_message_to_human= True
)

#User message
def load_answer(question):
    st.session_state.sessionMessages.append(HumanMessage(content=question))
    assistant_response = llm.invoke(st.session_state.sessionMessages)
    
    # Assuming assistant_response is an object with a 'content' attribute
    if hasattr(assistant_response, 'content') and isinstance(assistant_response.content, str):
        processed_content = assistant_response.content
        st.session_state.sessionMessages.append(AIMessage(content=processed_content))
    else:
        st.error("Invalid response received from AI.")
        processed_content = "Sorry, I couldn't process your request."

    return processed_content

#Working with Responses
#def load_answer(question):
    
#    st.session_state.sessionMessages.append(HumanMessage(content=question))
#    assistant_answer=llm.invoke(st.session_state.sessionMessages)
#    st.session_state.sessionMessages.append(AIMessage(content = assistant_answer))
#    return assistant_answer.content

def get_text():
    input_text=st.text_input("You: ", key=input)
    return input_text

#Implementing
user_input=get_text()
submit= st.button("Generate")

if submit:
    resp=load_answer(user_input)
    st.subheader("Answer: ")
    st.write(resp,key=1)