Spaces:
Runtime error
Runtime error
Commit
·
d1f097e
1
Parent(s):
a71f0e3
Upload 5 files
Browse files- crewai/tools/__init__.py +0 -0
- crewai/tools/browser_tools.py +38 -0
- crewai/tools/calculator_tools.py +13 -0
- crewai/tools/search_tools.py +57 -0
- crewai/tools/sec_tools.py +108 -0
crewai/tools/__init__.py
ADDED
File without changes
|
crewai/tools/browser_tools.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
|
4 |
+
import requests
|
5 |
+
from crewai import Agent, Task
|
6 |
+
from langchain.tools import tool
|
7 |
+
from unstructured.partition.html import partition_html
|
8 |
+
|
9 |
+
|
10 |
+
class BrowserTools():
|
11 |
+
|
12 |
+
@tool("Scrape website content")
|
13 |
+
def scrape_and_summarize_website(website):
|
14 |
+
"""Useful to scrape and summarize a website content"""
|
15 |
+
url = f"https://chrome.browserless.io/content?token={os.environ['BROWSERLESS_API_KEY']}"
|
16 |
+
payload = json.dumps({"url": website})
|
17 |
+
headers = {'cache-control': 'no-cache', 'content-type': 'application/json'}
|
18 |
+
response = requests.request("POST", url, headers=headers, data=payload)
|
19 |
+
elements = partition_html(text=response.text)
|
20 |
+
content = "\n\n".join([str(el) for el in elements])
|
21 |
+
content = [content[i:i + 8000] for i in range(0, len(content), 8000)]
|
22 |
+
summaries = []
|
23 |
+
for chunk in content:
|
24 |
+
agent = Agent(
|
25 |
+
role='Principal Researcher',
|
26 |
+
goal=
|
27 |
+
'Do amazing research and summaries based on the content you are working with',
|
28 |
+
backstory=
|
29 |
+
"You're a Principal Researcher at a big company and you need to do research about a given topic.",
|
30 |
+
allow_delegation=False)
|
31 |
+
task = Task(
|
32 |
+
agent=agent,
|
33 |
+
description=
|
34 |
+
f'Analyze and summarize the content below, make sure to include the most relevant information in the summary, return only the summary nothing else.\n\nCONTENT\n----------\n{chunk}'
|
35 |
+
)
|
36 |
+
summary = task.execute()
|
37 |
+
summaries.append(summary)
|
38 |
+
return "\n\n".join(summaries)
|
crewai/tools/calculator_tools.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.tools import tool
|
2 |
+
|
3 |
+
|
4 |
+
class CalculatorTools():
|
5 |
+
|
6 |
+
@tool("Make a calcualtion")
|
7 |
+
def calculate(operation):
|
8 |
+
"""Useful to perform any mathematica calculations,
|
9 |
+
like sum, minus, mutiplcation, division, etc.
|
10 |
+
The input to this tool should be a mathematical
|
11 |
+
expression, a couple examples are `200*7` or `5000/2*10`
|
12 |
+
"""
|
13 |
+
return eval(operation)
|
crewai/tools/search_tools.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
|
4 |
+
import requests
|
5 |
+
from langchain.tools import tool
|
6 |
+
|
7 |
+
|
8 |
+
class SearchTools():
|
9 |
+
@tool("Search the internet")
|
10 |
+
def search_internet(query):
|
11 |
+
"""Useful to search the internet
|
12 |
+
about a a given topic and return relevant results"""
|
13 |
+
top_result_to_return = 4
|
14 |
+
url = "https://google.serper.dev/search"
|
15 |
+
payload = json.dumps({"q": query})
|
16 |
+
headers = {
|
17 |
+
'X-API-KEY': os.environ['SERPER_API_KEY'],
|
18 |
+
'content-type': 'application/json'
|
19 |
+
}
|
20 |
+
response = requests.request("POST", url, headers=headers, data=payload)
|
21 |
+
results = response.json()['organic']
|
22 |
+
string = []
|
23 |
+
for result in results[:top_result_to_return]:
|
24 |
+
try:
|
25 |
+
string.append('\n'.join([
|
26 |
+
f"Title: {result['title']}", f"Link: {result['link']}",
|
27 |
+
f"Snippet: {result['snippet']}", "\n-----------------"
|
28 |
+
]))
|
29 |
+
except KeyError:
|
30 |
+
next
|
31 |
+
|
32 |
+
return '\n'.join(string)
|
33 |
+
|
34 |
+
@tool("Search news on the internet")
|
35 |
+
def search_news(query):
|
36 |
+
"""Useful to search news about a company, stock or any other
|
37 |
+
topic and return relevant results"""""
|
38 |
+
top_result_to_return = 4
|
39 |
+
url = "https://google.serper.dev/news"
|
40 |
+
payload = json.dumps({"q": query})
|
41 |
+
headers = {
|
42 |
+
'X-API-KEY': os.environ['SERPER_API_KEY'],
|
43 |
+
'content-type': 'application/json'
|
44 |
+
}
|
45 |
+
response = requests.request("POST", url, headers=headers, data=payload)
|
46 |
+
results = response.json()['news']
|
47 |
+
string = []
|
48 |
+
for result in results[:top_result_to_return]:
|
49 |
+
try:
|
50 |
+
string.append('\n'.join([
|
51 |
+
f"Title: {result['title']}", f"Link: {result['link']}",
|
52 |
+
f"Snippet: {result['snippet']}", "\n-----------------"
|
53 |
+
]))
|
54 |
+
except KeyError:
|
55 |
+
next
|
56 |
+
|
57 |
+
return '\n'.join(string)
|
crewai/tools/sec_tools.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import requests
|
4 |
+
|
5 |
+
from langchain.tools import tool
|
6 |
+
from langchain.text_splitter import CharacterTextSplitter
|
7 |
+
from langchain.embeddings import OpenAIEmbeddings
|
8 |
+
from langchain.vectorstores import FAISS
|
9 |
+
|
10 |
+
from sec_api import QueryApi
|
11 |
+
from unstructured.partition.html import partition_html
|
12 |
+
|
13 |
+
class SECTools():
|
14 |
+
@tool("Search 10-Q form")
|
15 |
+
def search_10q(data):
|
16 |
+
"""
|
17 |
+
Useful to search information from the latest 10-Q form for a
|
18 |
+
given stock.
|
19 |
+
The input to this tool should be a pipe (|) separated text of
|
20 |
+
length two, representing the stock ticker you are interested, what
|
21 |
+
question you have from it.
|
22 |
+
For example, `AAPL|what was last quarter's revenue`.
|
23 |
+
"""
|
24 |
+
stock, ask = data.split("|")
|
25 |
+
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
|
26 |
+
query = {
|
27 |
+
"query": {
|
28 |
+
"query_string": {
|
29 |
+
"query": f"ticker:{stock} AND formType:\"10-Q\""
|
30 |
+
}
|
31 |
+
},
|
32 |
+
"from": "0",
|
33 |
+
"size": "1",
|
34 |
+
"sort": [{ "filedAt": { "order": "desc" }}]
|
35 |
+
}
|
36 |
+
|
37 |
+
filings = queryApi.get_filings(query)['filings']
|
38 |
+
link = filings[0]['linkToFilingDetails']
|
39 |
+
answer = SECTools.__embedding_search(link, ask)
|
40 |
+
return answer
|
41 |
+
|
42 |
+
@tool("Search 10-K form")
|
43 |
+
def search_10k(data):
|
44 |
+
"""
|
45 |
+
Useful to search information from the latest 10-K form for a
|
46 |
+
given stock.
|
47 |
+
The input to this tool should be a pipe (|) separated text of
|
48 |
+
length two, representing the stock ticker you are interested, what
|
49 |
+
question you have from it.
|
50 |
+
For example, `AAPL|what was last year's revenue`.
|
51 |
+
"""
|
52 |
+
stock, ask = data.split("|")
|
53 |
+
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
|
54 |
+
query = {
|
55 |
+
"query": {
|
56 |
+
"query_string": {
|
57 |
+
"query": f"ticker:{stock} AND formType:\"10-K\""
|
58 |
+
}
|
59 |
+
},
|
60 |
+
"from": "0",
|
61 |
+
"size": "1",
|
62 |
+
"sort": [{ "filedAt": { "order": "desc" }}]
|
63 |
+
}
|
64 |
+
|
65 |
+
filings = queryApi.get_filings(query)['filings']
|
66 |
+
link = filings[0]['linkToFilingDetails']
|
67 |
+
answer = SECTools.__embedding_search(link, ask)
|
68 |
+
return answer
|
69 |
+
|
70 |
+
def __embedding_search(url, ask):
|
71 |
+
text = SECTools.__download_form_html(url)
|
72 |
+
elements = partition_html(text=text)
|
73 |
+
content = "\n".join([str(el) for el in elements])
|
74 |
+
text_splitter = CharacterTextSplitter(
|
75 |
+
separator = "\n",
|
76 |
+
chunk_size = 1000,
|
77 |
+
chunk_overlap = 150,
|
78 |
+
length_function = len,
|
79 |
+
is_separator_regex = False,
|
80 |
+
)
|
81 |
+
docs = text_splitter.create_documents([content])
|
82 |
+
retriever = FAISS.from_documents(
|
83 |
+
docs, OpenAIEmbeddings()
|
84 |
+
).as_retriever()
|
85 |
+
answers = retriever.get_relevant_documents(ask, top_k=4)
|
86 |
+
answers = "\n\n".join([a.page_content for a in answers])
|
87 |
+
return answers
|
88 |
+
|
89 |
+
def __download_form_html(url):
|
90 |
+
headers = {
|
91 |
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
92 |
+
'Accept-Encoding': 'gzip, deflate, br',
|
93 |
+
'Accept-Language': 'en-US,en;q=0.9,pt-BR;q=0.8,pt;q=0.7',
|
94 |
+
'Cache-Control': 'max-age=0',
|
95 |
+
'Dnt': '1',
|
96 |
+
'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"',
|
97 |
+
'Sec-Ch-Ua-Mobile': '?0',
|
98 |
+
'Sec-Ch-Ua-Platform': '"macOS"',
|
99 |
+
'Sec-Fetch-Dest': 'document',
|
100 |
+
'Sec-Fetch-Mode': 'navigate',
|
101 |
+
'Sec-Fetch-Site': 'none',
|
102 |
+
'Sec-Fetch-User': '?1',
|
103 |
+
'Upgrade-Insecure-Requests': '1',
|
104 |
+
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
|
105 |
+
}
|
106 |
+
|
107 |
+
response = requests.get(url, headers=headers)
|
108 |
+
return response.text
|