Upload s.py
Browse files
s.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
from flask import Flask, request, jsonify
|
4 |
+
import os
|
5 |
+
from langchain.prompts import ChatPromptTemplate
|
6 |
+
from langchain.agents import Tool, create_openai_tools_agent, AgentExecutor
|
7 |
+
from langchain_core.prompts import MessagesPlaceholder
|
8 |
+
from skills.vision import get_chat_completion
|
9 |
+
from skills.serch import search_tool
|
10 |
+
from skills.wiki import wikipedia_tool
|
11 |
+
from langchain_groq import ChatGroq
|
12 |
+
|
13 |
+
# Initialize the Groq API key
|
14 |
+
GROQ_API_KEY = "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" # Replace with your actual API key
|
15 |
+
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
|
16 |
+
|
17 |
+
# Initialize the Groq LLM
|
18 |
+
llm = ChatGroq(
|
19 |
+
model_name="mixtral-8x7b-32768",
|
20 |
+
temperature=0.7,
|
21 |
+
max_tokens=4096
|
22 |
+
)
|
23 |
+
|
24 |
+
def analyse(query):
|
25 |
+
|
26 |
+
response = get_chat_completion(query,image_file)
|
27 |
+
return response
|
28 |
+
|
29 |
+
|
30 |
+
# Initialize external tools
|
31 |
+
tools = [
|
32 |
+
Tool(name="analyseimage", func=analyse, description="Tool for image vision LLM model queries"),
|
33 |
+
Tool(name="Search", func=search_tool, description="Search the internet for current information"),
|
34 |
+
Tool(name="Wikipedia", func=wikipedia_tool, description="Query Wikipedia for detailed topic information"),
|
35 |
+
]
|
36 |
+
|
37 |
+
# Create the agent prompt
|
38 |
+
template_messages = [
|
39 |
+
("system", "You are an AI assistant capable of using tools to analyze images, search the internet, and query Wikipedia."),
|
40 |
+
("user", "{input}"),
|
41 |
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
42 |
+
]
|
43 |
+
prompt = ChatPromptTemplate.from_messages(template_messages)
|
44 |
+
|
45 |
+
# Create the agent
|
46 |
+
agent = create_openai_tools_agent(llm=llm, tools=tools, prompt=prompt)
|
47 |
+
|
48 |
+
# Create the agent executor
|
49 |
+
agent_executor = AgentExecutor(
|
50 |
+
agent=agent,
|
51 |
+
tools=tools,
|
52 |
+
verbose=True,
|
53 |
+
handle_parsing_errors=True
|
54 |
+
)
|
55 |
+
|
56 |
+
# Initialize Flask app
|
57 |
+
app = Flask(__name__)
|
58 |
+
|
59 |
+
@app.route('/analyze', methods=['POST'])
|
60 |
+
def analyze():
|
61 |
+
global image_file
|
62 |
+
global query
|
63 |
+
try:
|
64 |
+
query = request.form.get('query')
|
65 |
+
if not query:
|
66 |
+
return jsonify({"error": "Query is required"}), 400
|
67 |
+
image_file = request.files.get('image')
|
68 |
+
|
69 |
+
response = agent_executor.invoke({"input": query})
|
70 |
+
|
71 |
+
return jsonify({"response": response["output"]})
|
72 |
+
|
73 |
+
except Exception as e:
|
74 |
+
return jsonify({"error": f"Server error: {str(e)}"}), 500
|
75 |
+
|
76 |
+
# CORS Headers
|
77 |
+
@app.after_request
|
78 |
+
def after_request(response):
|
79 |
+
response.headers.add('Access-Control-Allow-Origin', '*')
|
80 |
+
response.headers.add('Access-Control-Allow-Headers', 'Content-Type')
|
81 |
+
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
|
82 |
+
return response
|
83 |
+
|
84 |
+
if __name__ == "__main__":
|
85 |
+
app.run(debug=True, host='0.0.0.0', port=5000)
|