Spaces:
Runtime error
Runtime error
creating an agent
Browse files- mixtral_agent.py +95 -0
mixtral_agent.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# LangChain supports many other chat models. Here, we're using Ollama
|
2 |
+
from langchain_community.chat_models import ChatOllama
|
3 |
+
from langchain_core.output_parsers import StrOutputParser
|
4 |
+
from langchain_core.prompts import ChatPromptTemplate
|
5 |
+
from langchain.tools.retriever import create_retriever_tool
|
6 |
+
from langchain_community.utilities import SerpAPIWrapper
|
7 |
+
from langchain.retrievers import ArxivRetriever
|
8 |
+
from langchain_core.tools import Tool
|
9 |
+
from langchain import hub
|
10 |
+
from langchain.agents import AgentExecutor, load_tools
|
11 |
+
from langchain.agents.format_scratchpad import format_log_to_str
|
12 |
+
from langchain.agents.output_parsers import (
|
13 |
+
ReActJsonSingleInputOutputParser,
|
14 |
+
)
|
15 |
+
from langchain.tools.render import render_text_description
|
16 |
+
|
17 |
+
# supports many more optional parameters. Hover on your `ChatOllama(...)`
|
18 |
+
# class to view the latest available supported parameters
|
19 |
+
llm = ChatOllama(
|
20 |
+
model="mistral",
|
21 |
+
base_url="https://0013-35-201-206-176.ngrok-free.app"
|
22 |
+
)
|
23 |
+
prompt = ChatPromptTemplate.from_template("Tell me a short joke about {topic}")
|
24 |
+
|
25 |
+
# using LangChain Expressive Language chain syntax
|
26 |
+
# learn more about the LCEL on
|
27 |
+
# https://python.langchain.com/docs/expression_language/why
|
28 |
+
chain = prompt | llm | StrOutputParser()
|
29 |
+
|
30 |
+
# for brevity, response is printed in terminal
|
31 |
+
# You can use LangServe to deploy your application for
|
32 |
+
# production
|
33 |
+
print(chain.invoke({"topic": "Space travel"}))
|
34 |
+
|
35 |
+
retriever = ArxivRetriever(load_max_docs=2)
|
36 |
+
|
37 |
+
tools = [
|
38 |
+
create_retriever_tool(
|
39 |
+
retriever,
|
40 |
+
"search arxiv's database for",
|
41 |
+
"Use this to recomend the user a paper to read Unless stated please choose the most recent models",
|
42 |
+
# "Searches and returns excerpts from the 2022 State of the Union.",
|
43 |
+
),
|
44 |
+
|
45 |
+
Tool(
|
46 |
+
name="SerpAPI",
|
47 |
+
description="A low-cost Google Search API. Useful for when you need to answer questions about current events. Input should be a search query.",
|
48 |
+
func=SerpAPIWrapper().run,
|
49 |
+
)
|
50 |
+
|
51 |
+
]
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
prompt = hub.pull("hwchase17/react-json")
|
56 |
+
prompt = prompt.partial(
|
57 |
+
tools=render_text_description(tools),
|
58 |
+
tool_names=", ".join([t.name for t in tools]),
|
59 |
+
)
|
60 |
+
|
61 |
+
chat_model = llm
|
62 |
+
# define the agent
|
63 |
+
chat_model_with_stop = chat_model.bind(stop=["\nObservation"])
|
64 |
+
agent = (
|
65 |
+
{
|
66 |
+
"input": lambda x: x["input"],
|
67 |
+
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
|
68 |
+
}
|
69 |
+
| prompt
|
70 |
+
| chat_model_with_stop
|
71 |
+
| ReActJsonSingleInputOutputParser()
|
72 |
+
)
|
73 |
+
|
74 |
+
# instantiate AgentExecutor
|
75 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True,handle_parsing_errors=True)
|
76 |
+
|
77 |
+
# agent_executor.invoke(
|
78 |
+
# {
|
79 |
+
# "input": "Who is the current holder of the speed skating world record on 500 meters? What is her current age raised to the 0.43 power?"
|
80 |
+
# }
|
81 |
+
# )
|
82 |
+
|
83 |
+
# agent_executor.invoke(
|
84 |
+
# {
|
85 |
+
# "input": "what are large language models and why are they so expensive to run?"
|
86 |
+
# }
|
87 |
+
# )
|
88 |
+
|
89 |
+
agent_executor.invoke(
|
90 |
+
{
|
91 |
+
"input": "How to generate videos from images using state of the art macchine learning models"
|
92 |
+
}
|
93 |
+
)
|
94 |
+
|
95 |
+
|