srivatsavdamaraju's picture
Rename s.py to app.py
bd61a49 verified
raw
history blame
3.41 kB
from flask import Flask, request, jsonify
import os
from langchain.prompts import ChatPromptTemplate
from langchain.agents import Tool, create_openai_tools_agent, AgentExecutor
from langchain_core.prompts import MessagesPlaceholder
from skills.vision import get_chat_completion
from skills.serch import search_tool
from skills.wiki import wikipedia_tool
from skills.control_mode import manual_control_mode , deactivate_manual_control_mode
from langchain_groq import ChatGroq
import requests
# Initialize the Groq API key
GROQ_API_KEY = "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" # Replace with your actual API key
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
# Initialize the Groq LLM
llm = ChatGroq(model_name="mixtral-8x7b-32768", temperature=0.7, max_tokens=4096)
# ____________________tools section_____________________
def analyse(query):
response = get_chat_completion(query, image_file)
return response
# Initialize external tools
tools = [
Tool(
name="analyseimage",
func=analyse,
description="Tool for image vision LLM model queries",
),
Tool(
name="Search",
func=search_tool,
description="Search the internet for current information",
),
Tool(
name="Wikipedia",
func=wikipedia_tool,
description="Query Wikipedia for detailed topic information",
),
Tool(
name="Manual remote Control",
func=manual_control_mode,
description="if user said activate manual remote control mode this function needs to trigger ",
),
Tool(
name="deativate manualControl",
func=deactivate_manual_control_mode,
description="if user said deactivate manual remote control mode this function needs to trigger ",
),
]
# Create the agent prompt
template_messages = [
(
"system",
"You are an AI assistant capable of using tools to analyze images, search the internet, and query Wikipedia.",
),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
prompt = ChatPromptTemplate.from_messages(template_messages)
# Create the agent
agent = create_openai_tools_agent(llm=llm, tools=tools, prompt=prompt)
# Create the agent executor
agent_executor = AgentExecutor(
agent=agent, tools=tools, verbose=True, handle_parsing_errors=True
)
# Initialize Flask app
app = Flask(__name__)
@app.route("/analyze", methods=["POST"])
def analyze():
global image_file
global query
try:
query = request.form.get("query")
if not query:
return jsonify({"error": "Query is required"}), 400
image_file = request.files.get("image")
response = agent_executor.invoke({"input": query})
return jsonify({"response": response["output"]})
except Exception as e:
return jsonify({"error": f"Server error: {str(e)}"}), 500
# CORS Headers
@app.after_request
def after_request(response):
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add("Access-Control-Allow-Headers", "Content-Type")
response.headers.add("Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS")
return response
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=5000)