Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ from langchain.text_splitter import CharacterTextSplitter
|
|
4 |
from langchain.chains.question_answering import load_qa_chain
|
5 |
from langchain.llms import OpenAI
|
6 |
from dotenv import load_dotenv
|
|
|
7 |
import os
|
8 |
import subprocess
|
9 |
|
@@ -61,55 +62,8 @@ doc_retriever = vectordb.as_retriever()
|
|
61 |
from langchain.chains import RetrievalQA
|
62 |
shakespeare_qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=doc_retriever)
|
63 |
|
64 |
-
# from langchain.utilities import SerpAPIWrapper
|
65 |
-
# search = SerpAPIWrapper()
|
66 |
-
|
67 |
-
# from langchain.agents import initialize_agent, Tool
|
68 |
-
# from langchain.agents import AgentType
|
69 |
-
# from langchain.tools import BaseTool
|
70 |
-
# from langchain.llms import OpenAI
|
71 |
-
# from langchain import LLMMathChain, SerpAPIWrapper
|
72 |
-
|
73 |
-
# tools = [
|
74 |
-
# Tool(
|
75 |
-
# name = "Shakespeare QA System",
|
76 |
-
# func=shakespeare_qa.run,
|
77 |
-
# description="useful for when you need to answer questions about Shakespeare's works. Input should be a fully formed question."
|
78 |
-
# ),
|
79 |
-
# Tool(
|
80 |
-
# name = "SERP API Search",
|
81 |
-
# func=search.run,
|
82 |
-
# description="useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question."
|
83 |
-
# ),
|
84 |
-
# ]
|
85 |
-
|
86 |
-
# from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
|
87 |
-
# memory = ConversationBufferMemory(memory_key="chat_history")
|
88 |
-
# readonlymemory = ReadOnlySharedMemory(memory=memory)
|
89 |
-
|
90 |
-
# from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
|
91 |
-
|
92 |
-
# prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
|
93 |
-
# suffix = """Begin!"
|
94 |
-
|
95 |
-
# {chat_history}
|
96 |
-
# Question: {input}
|
97 |
-
# {agent_scratchpad}"""
|
98 |
-
|
99 |
-
# prompt = ZeroShotAgent.create_prompt(
|
100 |
-
# tools,
|
101 |
-
# prefix=prefix,
|
102 |
-
# suffix=suffix,
|
103 |
-
# input_variables=["input", "chat_history", "agent_scratchpad"]
|
104 |
-
# )
|
105 |
-
|
106 |
-
|
107 |
-
# from langchain import OpenAI, LLMChain, PromptTemplate
|
108 |
-
# llm_chain = LLMChain(llm=llm, prompt=prompt)
|
109 |
-
# agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
|
110 |
-
# agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)
|
111 |
-
|
112 |
def make_inference(query):
|
|
|
113 |
return(shakespeare_qa.run(input=query))
|
114 |
|
115 |
if __name__ == "__main__":
|
|
|
4 |
from langchain.chains.question_answering import load_qa_chain
|
5 |
from langchain.llms import OpenAI
|
6 |
from dotenv import load_dotenv
|
7 |
+
import logging
|
8 |
import os
|
9 |
import subprocess
|
10 |
|
|
|
62 |
from langchain.chains import RetrievalQA
|
63 |
shakespeare_qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=doc_retriever)
|
64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
def make_inference(query):
|
66 |
+
logging.info(query)
|
67 |
return(shakespeare_qa.run(input=query))
|
68 |
|
69 |
if __name__ == "__main__":
|