File size: 3,397 Bytes
9f7d1a3
 
 
3d0a266
 
9f7d1a3
 
 
3d0a266
9f7d1a3
 
3d0a266
 
9f7d1a3
 
 
 
 
3d0a266
9f7d1a3
 
3d0a266
9f7d1a3
 
3d0a266
9f7d1a3
 
3d0a266
9f7d1a3
 
 
 
 
 
 
 
 
3d0a266
 
9f7d1a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d0a266
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory, ConversationSummaryMemory
from langchain.chat_models import ChatOpenAI


from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma

import os
import sys


from langchain.schema import (
    AIMessage,
    HumanMessage,
    SystemMessage
)

from langchain.prompts import ChatPromptTemplate
from langchain.prompts.chat import SystemMessagePromptTemplate, HumanMessagePromptTemplate

#embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2')
embeddings  = OpenAIEmbeddings(api_key='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')

#vectordb=Chroma.from_documents(document_chunks,embedding=embeddings, persist_directory='./ai_vocacional_v2')
vectordb = Chroma(persist_directory="./ai_vocacional_v2", embedding_function=embeddings)

llm=ChatOpenAI(temperature=0, model_name='gpt-4o-mini', api_key='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')

memory = ConversationBufferMemory(
    memory_key='chat_history',
    return_messages=True)

general_system_template = r"""
Eres un asistente AI avanzado especializado en asesorar a alumnos con su ingreso a la universidad. 
Toma los siguientes documentos de contexto {context} y responde únicamente basado en este contexto.
"""

general_user_template = "Pregunta:```{question}```"
messages = [
            SystemMessagePromptTemplate.from_template(general_system_template),
            HumanMessagePromptTemplate.from_template(general_user_template)
]
qa_prompt = ChatPromptTemplate.from_messages( messages )

pdf_qa = ConversationalRetrievalChain.from_llm(
            llm = llm,
            retriever=vectordb.as_retriever(search_kwargs={'k':16})
            , combine_docs_chain_kwargs={'prompt': qa_prompt},
            memory = memory#,max_tokens_limit=4000
            )

#Clarification: Si la pregunta del usuario es vaga o le faltan detalles importantes para dar una respuseta, debes realizar preguntas de clarificación para entender sus necesidades y darle la asistencia adecuada.
#Constraints: Debes responder solamente con la informacion disponible y saludar solo una vez. En caso no tengas una respuesta o no estés seguro, no inventes respuesta.

import gradio as gr
# Define chat interface
with gr.Blocks() as demo:
    chatbot = gr.Chatbot()
    msg = gr.Textbox()
    clear = gr.Button("Clear")
    chat_history = []

    def user(query, chat_history):
        print("User query:", query)
        print("Chat history:", chat_history)

        # Convert chat history to list of tuples
        chat_history_tuples = []
        for message in chat_history:
            chat_history_tuples.append((message[0], message[1]))

        # Get result from QA chain
        result = pdf_qa({"question": query})#, "chat_history": chat_history_tuples})

        # Append user message and response to chat history
        chat_history.append((query, result["answer"]))
        print("Updated chat history:", chat_history)

        return gr.update(value=""), chat_history


    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False)
    clear.click(lambda: None, None, chatbot, queue=False)

if __name__ == "__main__":
    demo.launch()