Spaces:
Sleeping
Sleeping
Danielrahmai1991
commited on
Commit
•
e1ce179
1
Parent(s):
6afb9b1
Update main.py
Browse files
main.py
CHANGED
@@ -50,40 +50,40 @@ llm_chain_model = LLMChain(prompt=prompt1, llm=llm)
|
|
50 |
# for retriver
|
51 |
|
52 |
|
53 |
-
def format_docs(docs):
|
54 |
-
|
55 |
-
|
56 |
-
model_name = "BAAI/bge-base-en-v1.5"
|
57 |
-
model_kwargs = {"device":'cpu'}
|
58 |
-
encode_kwargs = {'normalize_embeddings':True}
|
59 |
-
|
60 |
-
hf = HuggingFaceEmbeddings(
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
)
|
65 |
-
|
66 |
-
|
67 |
-
vectorstore = Chroma(
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
)
|
72 |
-
|
73 |
-
retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 6})
|
74 |
-
template = """you are the financial ai assistant
|
75 |
-
{context}
|
76 |
-
Question: {question}
|
77 |
-
Helpful Answer:"""
|
78 |
-
custom_rag_prompt = PromptTemplate.from_template(template)
|
79 |
-
|
80 |
-
rag_chain = (
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
)
|
86 |
-
print("retriver done")
|
87 |
|
88 |
from fastapi import FastAPI
|
89 |
|
@@ -106,7 +106,7 @@ class Item(BaseModel):
|
|
106 |
|
107 |
@app.post("/prompt/")
|
108 |
def create_item(item: Item):
|
109 |
-
message_response =
|
110 |
return {"item": item, "message": "LLm response", 'response': message_response}
|
111 |
|
112 |
|
|
|
50 |
# for retriver
|
51 |
|
52 |
|
53 |
+
# def format_docs(docs):
|
54 |
+
# return "\n\n".join(doc.page_content for doc in docs)
|
55 |
+
|
56 |
+
# model_name = "BAAI/bge-base-en-v1.5"
|
57 |
+
# model_kwargs = {"device":'cpu'}
|
58 |
+
# encode_kwargs = {'normalize_embeddings':True}
|
59 |
+
|
60 |
+
# hf = HuggingFaceEmbeddings(
|
61 |
+
# model_name = model_name,
|
62 |
+
# model_kwargs = model_kwargs,
|
63 |
+
# encode_kwargs = encode_kwargs
|
64 |
+
# )
|
65 |
+
|
66 |
+
|
67 |
+
# vectorstore = Chroma(
|
68 |
+
# collection_name="example_collection",
|
69 |
+
# embedding_function=hf,
|
70 |
+
# persist_directory="./chroma_langchain_db", # Where to save data locally, remove if not neccesary
|
71 |
+
# )
|
72 |
+
|
73 |
+
# retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 6})
|
74 |
+
# template = """you are the financial ai assistant
|
75 |
+
# {context}
|
76 |
+
# Question: {question}
|
77 |
+
# Helpful Answer:"""
|
78 |
+
# custom_rag_prompt = PromptTemplate.from_template(template)
|
79 |
+
|
80 |
+
# rag_chain = (
|
81 |
+
# {"context": retriever | format_docs, "question": RunnablePassthrough()}
|
82 |
+
# | custom_rag_prompt
|
83 |
+
# | llm
|
84 |
+
# | StrOutputParser()
|
85 |
+
# )
|
86 |
+
# print("retriver done")
|
87 |
|
88 |
from fastapi import FastAPI
|
89 |
|
|
|
106 |
|
107 |
@app.post("/prompt/")
|
108 |
def create_item(item: Item):
|
109 |
+
message_response = llm_chain_model.run(item.get('question'))
|
110 |
return {"item": item, "message": "LLm response", 'response': message_response}
|
111 |
|
112 |
|