triAGI-Coder / app.py
acecalisto3's picture
Update app.py
09fc863 verified
raw
history blame
20.4 kB
import streamlit as st
from huggingface_hub import InferenceClient
import os
import sys
import pickle
import json
st.title("CODEFUSSION ☄")
# --- Agent Definitions ---
class Agent:
def __init__(self, name, role, tools, knowledge_base=None):
self.name = name
self.role = role
self.tools = tools
self.knowledge_base = knowledge_base
self.memory = []
def act(self, prompt, context):
self.memory.append((prompt, context))
action = self.choose_action(prompt, context)
return action
def choose_action(self, prompt, context):
# Placeholder for action selection logic
# This should be implemented based on the specific agent's capabilities
# and the available tools
return {"tool": "Code Generation", "arguments": {"language": "python", "code": "print('Hello, World!')"}}
def observe(self, observation):
# Placeholder for observation processing
# This should be implemented based on the agent's capabilities and the nature of the observation
pass
def learn(self, data):
# Placeholder for learning logic
# This should be implemented based on the agent's capabilities and the type of data
pass
def __str__(self):
return f"Agent: {self.name} (Role: {self.role})"
# --- Tool Definitions ---
class Tool:
def __init__(self, name, description):
self.name = name
self.description = description
def run(self, arguments):
# Placeholder for tool execution logic
# This should be implemented based on the specific tool's functionality
# and the provided arguments
return {"output": "Tool Output"}
# --- Tool Examples ---
class CodeGenerationTool(Tool):
def __init__(self):
super().__init__("Code Generation", "Generates code snippets in various languages.")
def run(self, arguments):
# This is a simplified example, real implementation would use a code generation model
language = arguments.get("language", "python")
code = arguments.get("code", "print('Hello, World!')")
return {"output": f"```{language}\n{code}\n```"}
class DataRetrievalTool(Tool):
def __init__(self):
super().__init__("Data Retrieval", "Accesses data from APIs, databases, or files.")
def run(self, arguments):
# This is a simplified example, real implementation would use APIs, databases, or file systems
source = arguments.get("source", "https://example.com/data")
return {"output": f"Data from {source}"}
class CodeExecutionTool(Tool):
def __init__(self):
super().__init__("Code Execution", "Runs code snippets in various languages.")
def run(self, arguments):
# This is a simplified example, real implementation would use a code execution engine
code = arguments.get("code", "print('Hello, World!')")
return {"output": f"Code executed: {code}"}
class CodeDebuggingTool(Tool):
def __init__(self):
super().__init__("Code Debugging", "Identifies and resolves errors in code snippets.")
def run(self, arguments):
# This is a simplified example, real implementation would use a code debugger
code = arguments.get("code", "print('Hello, World!')")
return {"output": f"Code debugged: {code}"}
class CodeSummarizationTool(Tool):
def __init__(self):
super().__init__("Code Summarization", "Provides a concise overview of the functionality of a code snippet.")
def run(self, arguments):
# This is a simplified example, real implementation would use a code summarization model
code = arguments.get("code", "print('Hello, World!')")
return {"output": f"Code summary: {code}"}
class CodeTranslationTool(Tool):
def __init__(self):
super().__init__("Code Translation", "Translates code snippets between different programming languages.")
def run(self, arguments):
# This is a simplified example, real implementation would use a code translation model
code = arguments.get("code", "print('Hello, World!')")
return {"output": f"Translated code: {code}"}
class CodeOptimizationTool(Tool):
def __init__(self):
super().__init__("Code Optimization", "Optimizes code for performance and efficiency.")
def run(self, arguments):
# This is a simplified example, real implementation would use a code optimization model
code = arguments.get("code", "print('Hello, World!')")
return {"output": f"Optimized code: {code}"}
class CodeDocumentationTool(Tool):
def __init__(self):
super().__init__("Code Documentation", "Generates documentation for code snippets.")
def run(self, arguments):
# This is a simplified example, real implementation would use a code documentation generator
code = arguments.get("code", "print('Hello, World!')")
return {"output": f"Code documentation: {code}"}
class ImageGenerationTool(Tool):
def __init__(self):
super().__init__("Image Generation", "Generates images based on text descriptions.")
def run(self, arguments):
# This is a simplified example, real implementation would use an image generation model
description = arguments.get("description", "A cat sitting on a couch")
return {"output": f"Generated image based on: {description}"}
class ImageEditingTool(Tool):
def __init__(self):
super().__init__("Image Editing", "Modifying existing images.")
def run(self, arguments):
# This is a simplified example, real implementation would use an image editing library
image_path = arguments.get("image_path", "path/to/image.jpg")
return {"output": f"Image edited: {image_path}"}
class ImageAnalysisTool(Tool):
def __init__(self):
super().__init__("Image Analysis", "Extracting information from images, such as objects, scenes, and emotions.")
def run(self, arguments):
# This is a simplified example, real implementation would use an image analysis model
image_path = arguments.get("image_path", "path/to/image.jpg")
return {"output": f"Image analysis results: {image_path}"}
# --- Agent Pool ---
agent_pool = {
"IdeaIntake": Agent("IdeaIntake", "Idea Intake", [DataRetrievalTool(), CodeGenerationTool(), SentimentAnalysisTool(), TextGenerationTool()], knowledge_base=""),
"CodeBuilder": Agent("CodeBuilder", "Code Builder", [CodeGenerationTool(), CodeDebuggingTool(), CodeOptimizationTool()], knowledge_base=""),
"ImageCreator": Agent("ImageCreator", "Image Creator", [ImageGenerationTool(), ImageEditingTool()], knowledge_base=""),
}
# --- Workflow Definitions ---
class Workflow:
def __init__(self, name, agents, task, description):
self.name = name
self.agents = agents
self.task = task
self.description = description
def run(self, prompt, context):
# Placeholder for workflow execution logic
# This should be implemented based on the specific workflow's steps
# and the interaction between the agents
for agent in self.agents:
action = agent.act(prompt, context)
# Execute the tool
if action.get("tool"):
tool = next((t for t in agent.tools if t.name == action["tool"]), None)
if tool:
output = tool.run(action["arguments"])
# Update context
context.update(output)
# Observe the output
agent.observe(output)
return context
# --- Workflow Examples ---
class AppBuildWorkflow(Workflow):
def __init__(self):
super().__init__("App Build", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Build a mobile application", "A workflow for building a mobile application.")
class WebsiteBuildWorkflow(Workflow):
def __init__(self):
super().__init__("Website Build", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Build a website", "A workflow for building a website.")
class GameBuildWorkflow(Workflow):
def __init__(self):
super().__init__("Game Build", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Build a game", "A workflow for building a game.")
class PluginBuildWorkflow(Workflow):
def __init__(self):
super().__init__("Plugin Build", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Build a plugin", "A workflow for building a plugin.")
class DevSandboxWorkflow(Workflow):
def __init__(self):
super().__init__("Dev Sandbox", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Experiment with code", "A workflow for experimenting with code.")
# --- Model Definitions ---
class Model:
def __init__(self, name, description, model_link):
self.name = name
self.description = description
self.model_link = model_link
self.inference_client = InferenceClient(model=model_link)
def generate_text(self, prompt, temperature=0.5, max_new_tokens=2048):
try:
output = self.inference_client.text_generation(
prompt,
temperature=temperature,
max_new_tokens=max_new_tokens,
stream=True
)
response = "".join(output)
except ValueError as e:
if "Input validation error" in str(e):
return "Error: The input prompt is too long. Please try a shorter prompt."
else:
return f"An error occurred: {e}"
return response
# --- Model Examples ---
class LegacyLiftModel(Model):
def __init__(self):
super().__init__("LegacyLift🚀", "The LegacyLift model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best for minimal problem-solving, content writing, and daily tips.", "mistralai/Mistral-7B-Instruct-v0.2")
class ModernMigrateModel(Model):
def __init__(self):
super().__init__("ModernMigrate⭐", "The ModernMigrate model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model excels in coding, logical reasoning, and high-speed inference.", "mistralai/Mixtral-8x7B-Instruct-v0.1")
class RetroRecodeModel(Model):
def __init__(self):
super().__init__("RetroRecode🔄", "The RetroRecode model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best suited for critical development, practical knowledge, and serverless inference.", "microsoft/Phi-3-mini-4k-instruct")
# --- Streamlit Interface ---
model_links = {
"LegacyLift🚀": "mistralai/Mistral-7B-Instruct-v0.2",
"ModernMigrate⭐": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"RetroRecode🔄": "microsoft/Phi-3-mini-4k-instruct"
}
model_info = {
"LegacyLift🚀": {
'description': "The LegacyLift model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best for minimal problem-solving, content writing, and daily tips.",
'logo': './11.jpg'
},
"ModernMigrate⭐": {
'description': "The ModernMigrate model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model excels in coding, logical reasoning, and high-speed inference.",
'logo': './2.jpg'
},
"RetroRecode🔄": {
'description': "The RetroRecode model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best suited for critical development, practical knowledge, and serverless inference.",
'logo': './3.jpg'
},
}
def format_prompt(message, conversation_history, custom_instructions=None):
prompt = ""
if custom_instructions:
prompt += f"\[INST\] {custom_instructions} $$/INST$$\n"
# Add conversation history to the prompt
prompt += "\[CONV_HISTORY\]\n"
for role, content in conversation_history:
prompt += f"{role.upper()}: {content}\n"
prompt += "\[/CONV_HISTORY\]\n"
# Add the current message
prompt += f"\[INST\] {message} $$/INST$$\n"
# Add the response format
prompt += "\[RESPONSE\]\n"
return prompt
def reset_conversation():
'''
Resets Conversation
'''
st.session_state.conversation = []
st.session_state.messages = []
st.session_state.chat_state = "reset"
def load_conversation_history():
history_file = "conversation_history.pickle"
if os.path.exists(history_file):
with open(history_file, "rb") as f:
conversation_history = pickle.load(f)
else:
conversation_history = []
return conversation_history
def save_conversation_history(conversation_history):
history_file = "conversation_history.pickle"
with open(history_file, "wb") as f:
pickle.dump(conversation_history, f)
models = [key for key in model_links.keys()]
selected_model = st.sidebar.selectbox("Select Model", models)
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown(model_info[selected_model]['description'])
st.sidebar.image(model_info[selected_model]['logo'])
st.sidebar.markdown("\*Generating the code might go slow if you are using low power resources \*")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
st.session_state.prev_option = selected_model
if "chat_state" not in st.session_state:
st.session_state.chat_state = "normal"
# Load the conversation history from the file
if "messages" not in st.session_state:
st.session_state.messages = load_conversation_history()
repo_id = model_links[selected_model]
st.subheader(f'{selected_model}')
if st.session_state.chat_state == "normal":
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
custom_instruction = "Act like a Human in conversation"
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
conversation_history = [(message["role"], message["content"]) for message in st.session_state.messages]
formated_text = format_promt(prompt, conversation_history, custom_instruction)
with st.chat_message("assistant"):
# Select the appropriate model based on the user's choice
if selected_model == "LegacyLift🚀":
model = LegacyLiftModel()
elif selected_model == "ModernMigrate⭐":
model = ModernMigrateModel()
elif selected_model == "RetroRecode🔄":
model = RetroRecodeModel()
else:
st.error("Invalid model selection.")
return
response = model.generate_text(formated_text, temperature=temp_values)
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
save_conversation_history(st.session_state.messages)
elif st.session_state.chat_state == "reset":
st.session_state.chat_state = "normal"
st.experimental_rerun()
# --- Agent-Based Workflow Execution ---
def execute_workflow(workflow, prompt, context):
# Execute the workflow
context = workflow.run(prompt, context)
# Display the output
for agent in workflow.agents:
st.write(f"{agent}: {agent.memory}")
for action in agent.memory:
st.write(f" Action: {action}")
return context
# --- Example Usage ---
if st.button("Build an App"):
app_build_workflow = AppBuildWorkflow()
context = {"task": "Build a mobile application"}
context = execute_workflow(app_build_workflow, "Build a mobile app for ordering food.", context)
st.write(f"Workflow Output: {context}")
if st.button("Build a Website"):
website_build_workflow = WebsiteBuildWorkflow()
context = {"task": "Build a website"}
context = execute_workflow(website_build_workflow, "Build a website for a restaurant.", context)
st.write(f"Workflow Output: {context}")
if st.button("Build a Game"):
game_build_workflow = GameBuildWorkflow()
context = {"task": "Build a game"}
context = execute_workflow(game_build_workflow, "Build a simple 2D platformer game.", context)
st.write(f"Workflow Output: {context}")
if st.button("Build a Plugin"):
plugin_build_workflow = PluginBuildWorkflow()
context = {"task": "Build a plugin"}
context = execute_workflow(plugin_build_workflow, "Build a plugin for a text editor that adds a new syntax highlighting theme.", context)
st.write(f"Workflow Output: {context}")
if st.button("Dev Sandbox"):
dev_sandbox_workflow = DevSandboxWorkflow()
context = {"task": "Experiment with code"}
context = execute_workflow(dev_sandbox_workflow, "Write a Python function to reverse a string.", context)
st.write(f"Workflow Output: {context}")
# --- Displaying Agent and Tool Information ---
st.subheader("Agent Pool")
for agent_name, agent in agent_pool.items():
st.write(f"**{agent_name}**")
st.write(f" Role: {agent.role}")
st.write(f" Tools: {', '.join([tool.name for tool in agent.tools])}")
st.subheader("Workflows")
st.write("**App Build**")
st.write(f" Description: {AppBuildWorkflow().description}")
st.write("**Website Build**")
st.write(f" Description: {WebsiteBuildWorkflow().description}")
st.write("**Game Build**")
st.write(f" Description: {GameBuildWorkflow().description}")
st.write("**Plugin Build**")
st.write(f" Description: {PluginBuildWorkflow().description}")
st.write("**Dev Sandbox**")
st.write(f" Description: {DevSandboxWorkflow().description}")
# --- Displaying Tool Definitions ---
st.subheader("Tool Definitions")
for tool_class in [CodeGenerationTool, DataRetrievalTool, CodeExecutionTool, CodeDebuggingTool, CodeSummarizationTool, CodeTranslationTool, CodeOptimizationTool, CodeDocumentationTool, ImageGenerationTool, ImageEditingTool, ImageAnalysisTool]:
tool = tool_class()
st.write(f"**{tool.name}**")
st.write(f" Description: {tool.description}")
# --- Displaying Example Output ---
st.subheader("Example Output")
code_generation_tool = CodeGenerationTool()
st.write(f"Code Generation Tool Output: {code_generation_tool.run({'language': 'python', 'code': 'print(\'Hello, World!\')'})}")
data_retrieval_tool = DataRetrievalTool()
st.write(f"Data Retrieval Tool Output: {data_retrieval_tool.run({'source': 'https://example.com/data'})}")
code_execution_tool = CodeExecutionTool()
st.write(f"Code Execution Tool Output: {code_execution_tool.run({'code': 'print(\'Hello, World!\')'})}")
code_debugging_tool = CodeDebuggingTool()
st.write(f"Code Debugging Tool Output: {code_debugging_tool.run({'code': 'print(\'Hello, World!\')'})}")
code_summarization_tool = CodeSummarizationTool()
st.write(f"Code Summarization Tool Output: {code_summarization_tool.run({'code': 'print(\'Hello, World!\')'})}")
code_translation_tool = CodeTranslationTool()
st.write(f"Code Translation Tool Output: {code_translation_tool.run({'code': 'print(\'Hello, World!\')'})}")
code_optimization_tool = CodeOptimizationTool()
st.write(f"Code Optimization Tool Output: {code_optimization_tool.run({'code': 'print(\'Hello, World!\')'})}")
code_documentation_tool = CodeDocumentationTool()
st.write(f"Code Documentation Tool Output: {code_documentation_tool.run({'code': 'print(\'Hello, World!\')'})}")
image_generation_tool = ImageGenerationTool()
st.write(f"Image Generation Tool Output: {image_generation_tool.run({'description': 'A cat sitting on a couch'})}")
image_editing_tool = ImageEditingTool()
st.write(f"Image Editing Tool Output: {image_editing_tool.run({'image_path': 'path/to/image.jpg'})}")
image_analysis_tool = ImageAnalysisTool()
st.write(f"Image Analysis Tool Output: {image_analysis_tool.run({'image_path': 'path/to/image.jpg'})}")