File size: 1,580 Bytes
f08d86f
 
 
 
 
 
 
 
 
 
 
 
e1851d8
f08d86f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bd79569
f08d86f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings import CacheBackedEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.storage import LocalFileStore
from langchain.chains import RetrievalQA
from langchain_openai import ChatOpenAI
import os

def create_index():
    # load the data
    dir = os.path.dirname(__file__)    
    df_path = dir + '/data/train.csv'
    loader = CSVLoader(file_path = df_path)
    data = loader.load()
    
    # create the embeddings model
    embeddings_model = OpenAIEmbeddings()

    # create the cache backed embeddings in vector store
    store = LocalFileStore("./cache")
    cached_embeder = CacheBackedEmbeddings.from_bytes_store(
        embeddings_model, store, namespace=embeddings_model.model
    )
    vector_store = FAISS.from_documents(data, embeddings_model)

    return vector_store.as_retriever()

def setup(openai_key):
     # Set the API key for OpenAI
    os.environ["OPENAI_API_KEY"] = openai_key
    retriver = create_index()
    llm = ChatOpenAI(model="gpt-4")
    return retriver, llm

def ai_doctor(openai_key,query):

    # Setup
    retriever,llm = setup(openai_key)
    # Create the QA chain
    handler = StdOutCallbackHandler()

    qa_with_sources_chain = RetrievalQA.from_chain_type(
        llm=llm,
        retriever=retriever,
        callbacks=[handler],
        return_source_documents=True
    )

    # Ask a question
    res = qa_with_sources_chain({"query":query})
    return (res['result'])