jdmorzan's picture
Update app.py
9f7d1a3 verified
raw
history blame
3.4 kB
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory, ConversationSummaryMemory
from langchain.chat_models import ChatOpenAI
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
import os
import sys
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from langchain.prompts import ChatPromptTemplate
from langchain.prompts.chat import SystemMessagePromptTemplate, HumanMessagePromptTemplate
#embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2')
embeddings = OpenAIEmbeddings(api_key='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')
#vectordb=Chroma.from_documents(document_chunks,embedding=embeddings, persist_directory='./ai_vocacional_v2')
vectordb = Chroma(persist_directory="./ai_vocacional_v2", embedding_function=embeddings)
llm=ChatOpenAI(temperature=0, model_name='gpt-4o-mini', api_key='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')
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=True)
general_system_template = r"""
Eres un asistente AI avanzado especializado en asesorar a alumnos con su ingreso a la universidad.
Toma los siguientes documentos de contexto {context} y responde únicamente basado en este contexto.
"""
general_user_template = "Pregunta:```{question}```"
messages = [
SystemMessagePromptTemplate.from_template(general_system_template),
HumanMessagePromptTemplate.from_template(general_user_template)
]
qa_prompt = ChatPromptTemplate.from_messages( messages )
pdf_qa = ConversationalRetrievalChain.from_llm(
llm = llm,
retriever=vectordb.as_retriever(search_kwargs={'k':16})
, combine_docs_chain_kwargs={'prompt': qa_prompt},
memory = memory#,max_tokens_limit=4000
)
#Clarification: Si la pregunta del usuario es vaga o le faltan detalles importantes para dar una respuseta, debes realizar preguntas de clarificación para entender sus necesidades y darle la asistencia adecuada.
#Constraints: Debes responder solamente con la informacion disponible y saludar solo una vez. En caso no tengas una respuesta o no estés seguro, no inventes respuesta.
import gradio as gr
# Define chat interface
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox()
clear = gr.Button("Clear")
chat_history = []
def user(query, chat_history):
print("User query:", query)
print("Chat history:", chat_history)
# Convert chat history to list of tuples
chat_history_tuples = []
for message in chat_history:
chat_history_tuples.append((message[0], message[1]))
# Get result from QA chain
result = pdf_qa({"question": query})#, "chat_history": chat_history_tuples})
# Append user message and response to chat history
chat_history.append((query, result["answer"]))
print("Updated chat history:", chat_history)
return gr.update(value=""), chat_history
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False)
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.launch()