Spaces:
Runtime error
Runtime error
removed some commented out ccode and moved import statements to the top of the file
Browse files- mixtral_agent.py +42 -48
mixtral_agent.py
CHANGED
@@ -12,9 +12,15 @@ from langchain.agents.format_scratchpad import format_log_to_str
|
|
12 |
from langchain.agents.output_parsers import (
|
13 |
ReActJsonSingleInputOutputParser,
|
14 |
)
|
|
|
|
|
|
|
|
|
|
|
15 |
from langchain.tools.render import render_text_description
|
16 |
import os
|
17 |
|
|
|
18 |
import dotenv
|
19 |
|
20 |
dotenv.load_dotenv()
|
@@ -31,39 +37,9 @@ llm = ChatOllama(
|
|
31 |
)
|
32 |
prompt = ChatPromptTemplate.from_template("Tell me a short joke about {topic}")
|
33 |
|
34 |
-
|
35 |
-
# learn more about the LCEL on
|
36 |
-
# https://python.langchain.com/docs/expression_language/why
|
37 |
-
chain = prompt | llm | StrOutputParser()
|
38 |
|
39 |
-
# for brevity, response is printed in terminal
|
40 |
-
# You can use LangServe to deploy your application for
|
41 |
-
# production
|
42 |
-
print(chain.invoke({"topic": "Space travel"}))
|
43 |
|
44 |
-
retriever = ArxivRetriever(load_max_docs=2)
|
45 |
-
|
46 |
-
# Import things that are needed generically
|
47 |
-
from langchain.pydantic_v1 import BaseModel, Field
|
48 |
-
from langchain.tools import BaseTool, StructuredTool, tool
|
49 |
-
|
50 |
-
|
51 |
-
global all_sources
|
52 |
-
|
53 |
-
# @tool
|
54 |
-
# def search(query: str) -> str:
|
55 |
-
# """Look up things online."""
|
56 |
-
# # return "LangChain"
|
57 |
-
# data = retriever.invoke(query)
|
58 |
-
# meta_data = [i.metadata for i in data]
|
59 |
-
# # meta_data += all_sources
|
60 |
-
# # all_sources += meta_data
|
61 |
-
# all_sources += meta_data
|
62 |
-
# # all_sources = []
|
63 |
-
# return meta_data
|
64 |
-
|
65 |
-
from typing import List, Dict
|
66 |
-
from datetime import datetime
|
67 |
|
68 |
def format_info_list(info_list: List[Dict[str, str]]) -> str:
|
69 |
"""
|
@@ -87,11 +63,11 @@ def format_info_list(info_list: List[Dict[str, str]]) -> str:
|
|
87 |
return '\n'.join(formatted_strings)
|
88 |
|
89 |
@tool
|
90 |
-
def
|
91 |
-
"""
|
92 |
# return "LangChain"
|
93 |
global all_sources
|
94 |
-
data =
|
95 |
meta_data = [i.metadata for i in data]
|
96 |
# meta_data += all_sources
|
97 |
# all_sources += meta_data
|
@@ -102,11 +78,24 @@ def search(query: str) -> str:
|
|
102 |
# formatted_info = format_info_list(all_sources)
|
103 |
|
104 |
return meta_data.__str__()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
|
106 |
-
# all_sources = []
|
107 |
-
# return meta_data
|
108 |
|
109 |
-
tools = [
|
110 |
|
111 |
# tools = [
|
112 |
# create_retriever_tool(
|
@@ -125,7 +114,6 @@ tools = [search]
|
|
125 |
# ]
|
126 |
|
127 |
|
128 |
-
|
129 |
prompt = hub.pull("hwchase17/react-json")
|
130 |
prompt = prompt.partial(
|
131 |
tools=render_text_description(tools),
|
@@ -160,19 +148,25 @@ if __name__ == "__main__":
|
|
160 |
# global variable for collecting sources
|
161 |
all_sources = []
|
162 |
|
163 |
-
input = agent_executor.invoke(
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
|
171 |
-
x = 0
|
172 |
-
|
173 |
input_1 = agent_executor.invoke(
|
174 |
{
|
175 |
-
"input": "I am looking for a text to 3d model; Using the
|
176 |
"add the urls of the papers used in the final answer using the metadata from the retriever"
|
177 |
# f"Please prioritize the newest papers this is the current data {get_current_date()}"
|
178 |
}
|
|
|
12 |
from langchain.agents.output_parsers import (
|
13 |
ReActJsonSingleInputOutputParser,
|
14 |
)
|
15 |
+
# Import things that are needed generically
|
16 |
+
from langchain.pydantic_v1 import BaseModel, Field
|
17 |
+
from langchain.tools import BaseTool, StructuredTool, tool
|
18 |
+
from typing import List, Dict
|
19 |
+
from datetime import datetime
|
20 |
from langchain.tools.render import render_text_description
|
21 |
import os
|
22 |
|
23 |
+
|
24 |
import dotenv
|
25 |
|
26 |
dotenv.load_dotenv()
|
|
|
37 |
)
|
38 |
prompt = ChatPromptTemplate.from_template("Tell me a short joke about {topic}")
|
39 |
|
40 |
+
arxiv_retriever = ArxivRetriever(load_max_docs=2)
|
|
|
|
|
|
|
41 |
|
|
|
|
|
|
|
|
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
def format_info_list(info_list: List[Dict[str, str]]) -> str:
|
45 |
"""
|
|
|
63 |
return '\n'.join(formatted_strings)
|
64 |
|
65 |
@tool
|
66 |
+
def arxiv_search(query: str) -> str:
|
67 |
+
"""Using the arxiv search and collects metadata."""
|
68 |
# return "LangChain"
|
69 |
global all_sources
|
70 |
+
data = arxiv_retriever.invoke(query)
|
71 |
meta_data = [i.metadata for i in data]
|
72 |
# meta_data += all_sources
|
73 |
# all_sources += meta_data
|
|
|
78 |
# formatted_info = format_info_list(all_sources)
|
79 |
|
80 |
return meta_data.__str__()
|
81 |
+
|
82 |
+
@tool
|
83 |
+
def google_search(query: str) -> str:
|
84 |
+
"""Using the google search and collects metadata."""
|
85 |
+
# return "LangChain"
|
86 |
+
global all_sources
|
87 |
+
|
88 |
+
x = SerpAPIWrapper()
|
89 |
+
search_results:dict = x.results(query)
|
90 |
+
|
91 |
+
|
92 |
+
organic_source = search_results['organic_results']
|
93 |
+
return organic_source
|
94 |
+
|
95 |
+
|
96 |
|
|
|
|
|
97 |
|
98 |
+
tools = [arxiv_search,google_search]
|
99 |
|
100 |
# tools = [
|
101 |
# create_retriever_tool(
|
|
|
114 |
# ]
|
115 |
|
116 |
|
|
|
117 |
prompt = hub.pull("hwchase17/react-json")
|
118 |
prompt = prompt.partial(
|
119 |
tools=render_text_description(tools),
|
|
|
148 |
# global variable for collecting sources
|
149 |
all_sources = []
|
150 |
|
151 |
+
# input = agent_executor.invoke(
|
152 |
+
# {
|
153 |
+
# "input": "How to generate videos from images using state of the art macchine learning models; Using the axriv retriever " +
|
154 |
+
# "add the urls of the papers used in the final answer using the metadata from the retriever"
|
155 |
+
# # f"Please prioritize the newest papers this is the current data {get_current_date()}"
|
156 |
+
# }
|
157 |
+
# )
|
158 |
+
|
159 |
+
# input_1 = agent_executor.invoke(
|
160 |
+
# {
|
161 |
+
# "input": "I am looking for a text to 3d model; Using the axriv retriever " +
|
162 |
+
# "add the urls of the papers used in the final answer using the metadata from the retriever"
|
163 |
+
# # f"Please prioritize the newest papers this is the current data {get_current_date()}"
|
164 |
+
# }
|
165 |
+
# )
|
166 |
|
|
|
|
|
167 |
input_1 = agent_executor.invoke(
|
168 |
{
|
169 |
+
"input": "I am looking for a text to 3d model; Using the google retriever " +
|
170 |
"add the urls of the papers used in the final answer using the metadata from the retriever"
|
171 |
# f"Please prioritize the newest papers this is the current data {get_current_date()}"
|
172 |
}
|