triAGI-Coder / app.py
acecalisto3's picture
Update app.py
bda5d1f verified
raw
history blame
27.1 kB
import streamlit as st
from huggingface_hub import InferenceClient
import os
import pickle
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.tools import Tool
from langchain.agents import AgentType
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings # Use Hugging Face Embeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.chains.conversational_retrieval_qa import ConversationalRetrievalQAChain
from langchain.chains.summarization import load_summarization_chain
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
from langchain.agents import initialize_agent, AgentType
from langchain.tools import Tool
from langchain_community.llms import HuggingFaceHub
from typing import List, Dict, Any, Optional
st.title("CODEFUSSION ☄")
# --- Agent Definitions ---
class Agent:
def __init__(self, name, role, tools, knowledge_base=None):
self.name = name
self.role = role
self.tools = tools
self.knowledge_base = knowledge_base
self.memory = []
self.llm = HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 0.5}) # Use a language model for action selection
self.agent = ToolAgent(
llm=self.llm,
tools=self.tools,
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
def act(self, prompt, context):
self.memory.append((prompt, context))
action = self.agent.run(prompt, context)
return action
def observe(self, observation):
# Placeholder for observation processing
# This should be implemented based on the agent's capabilities and the nature of the observation
pass
def learn(self, data):
# Placeholder for learning logic
# This should be implemented based on the agent's capabilities and the type of data
pass
def __str__(self):
return f"Agent: {self.name} (Role: {self.role})"
# --- Tool Definitions ---
class Tool:
def __init__(self, name, description):
self.name = name
self.description = description
def run(self, arguments):
# Placeholder for tool execution logic
# This should be implemented based on the specific tool's functionality
# and the provided arguments
return {"output": "Tool Output"}
# --- Tool Examples ---
class CodeGenerationTool(Tool):
def __init__(self):
super().__init__("Code Generation", "Generates code snippets in various languages.")
self.llm = HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 0.5})
self.prompt_template = PromptTemplate(
input_variables=["language", "code_description"],
template="Generate {language} code for: {code_description}"
)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
def run(self, arguments):
language = arguments.get("language", "python")
code_description = arguments.get("code_description", "print('Hello, World!')")
code = self.chain.run(language=language, code_description=code_description)
return {"output": code}
class DataRetrievalTool(Tool):
def __init__(self):
super().__init__("Data Retrieval", "Accesses data from APIs, databases, or files.")
self.llm = HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 0.5})
self.prompt_template = PromptTemplate(
input_variables=["data_source", "data_query"],
template="Retrieve data from {data_source} based on: {data_query}"
)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
def run(self, arguments):
data_source = arguments.get("data_source", "https://example.com/data")
data_query = arguments.get("data_query", "some information")
data = self.chain.run(data_source=data_source, data_query=data_query)
return {"output": data}
class TextGenerationTool(Tool):
def __init__(self):
super().__init__("Text Generation", "Generates human-like text based on a given prompt.")
self.llm = HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 0.5})
self.prompt_template = PromptTemplate(
input_variables=["text_prompt"],
template="Generate text based on: {text_prompt}"
)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
def run(self, arguments):
text_prompt = arguments.get("text_prompt", "Write a short story about a cat.")
text = self.chain.run(text_prompt=text_prompt)
return {"output": text}
class CodeExecutionTool(Tool):
def __init__(self):
super().__init__("Code Execution", "Runs code snippets in various languages.")
def run(self, arguments):
code = arguments.get("code", "print('Hello, World!')")
try:
exec(code)
return {"output": f"Code executed: {code}"}
except Exception as e:
return {"output": f"Error executing code: {e}"}
class CodeDebuggingTool(Tool):
def __init__(self):
super().__init__("Code Debugging", "Identifies and resolves errors in code snippets.")
self.llm = HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 0.5})
self.prompt_template = PromptTemplate(
input_variables=["code", "error_message"],
template="Debug the following code:\n{code}\n\nError message: {error_message}"
)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
def run(self, arguments):
code = arguments.get("code", "print('Hello, World!')")
try:
exec(code)
return {"output": f"Code debugged: {code}"}
except Exception as e:
error_message = str(e)
debugged_code = self.chain.run(code=code, error_message=error_message)
return {"output": f"Debugged code:\n{debugged_code}"}
class CodeSummarizationTool(Tool):
def __init__(self):
super().__init__("Code Summarization", "Provides a concise overview of the functionality of a code snippet.")
self.llm = HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 0.5})
self.prompt_template = PromptTemplate(
input_variables=["code"],
template="Summarize the functionality of the following code:\n{code}"
)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
def run(self, arguments):
code = arguments.get("code", "print('Hello, World!')")
summary = self.chain.run(code=code)
return {"output": f"Code summary: {summary}"}
class CodeTranslationTool(Tool):
def __init__(self):
super().__init__("Code Translation", "Translates code snippets between different programming languages.")
self.llm = HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 0.5})
self.prompt_template = PromptTemplate(
input_variables=["code", "target_language"],
template="Translate the following code to {target_language}:\n{code}"
)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
def run(self, arguments):
code = arguments.get("code", "print('Hello, World!')")
target_language = arguments.get("target_language", "javascript")
translated_code = self.chain.run(code=code, target_language=target_language)
return {"output": f"Translated code:\n{translated_code}"}
class CodeOptimizationTool(Tool):
def __init__(self):
super().__init__("Code Optimization", "Optimizes code for performance and efficiency.")
self.llm = HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 0.5})
self.prompt_template = PromptTemplate(
input_variables=["code"],
template="Optimize the following code for performance and efficiency:\n{code}"
)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
def run(self, arguments):
code = arguments.get("code", "print('Hello, World!')")
optimized_code = self.chain.run(code=code)
return {"output": f"Optimized code:\n{optimized_code}"}
class CodeDocumentationTool(Tool):
def __init__(self):
super().__init__("Code Documentation", "Generates documentation for code snippets.")
self.llm = HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 0.5})
self.prompt_template = PromptTemplate(
input_variables=["code"],
template="Generate documentation for the following code:\n{code}"
)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
def run(self, arguments):
code = arguments.get("code", "print('Hello, World!')")
documentation = self.chain.run(code=code)
return {"output": f"Code documentation:\n{documentation}"}
class ImageGenerationTool(Tool):
def __init__(self):
super().__init__("Image Generation", "Generates images based on text descriptions.")
self.llm = HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 0.5})
self.prompt_template = PromptTemplate(
input_variables=["description"],
template="Generate an image based on the description: {description}"
)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
def run(self, arguments):
description = arguments.get("description", "A cat sitting on a couch")
image_url = self.chain.run(description=description)
return {"output": f"Generated image: {image_url}"}
class ImageEditingTool(Tool):
def __init__(self):
super().__init__("Image Editing", "Modifying existing images.")
self.llm = HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 0.5})
self.prompt_template = PromptTemplate(
input_variables=["image_url", "editing_instructions"],
template="Edit the image at {image_url} according to the instructions: {editing_instructions}"
)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
def run(self, arguments):
image_url = arguments.get("image_url", "https://example.com/image.jpg")
editing_instructions = arguments.get("editing_instructions", "Make the cat smile")
edited_image_url = self.chain.run(image_url=image_url, editing_instructions=editing_instructions)
return {"output": f"Edited image: {edited_image_url}"}
class ImageAnalysisTool(Tool):
def __init__(self):
super().__init__("Image Analysis", "Extracting information from images, such as objects, scenes, and emotions.")
self.llm = HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 0.5})
self.prompt_template = PromptTemplate(
input_variables=["image_url"],
template="Analyze the image at {image_url} and provide information about objects, scenes, and emotions."
)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
def run(self, arguments):
image_url = arguments.get("image_url", "https://example.com/image.jpg")
analysis_results = self.chain.run(image_url=image_url)
return {"output": f"Image analysis results:\n{analysis_results}"}
class QuestionAnsweringTool(Tool):
def __init__(self):
super().__init__("Question Answering", "Answers questions based on provided context.")
self.llm = HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 0.5})
self.qa_chain = load_qa_chain(self.llm) # Use a question answering chain
def run(self, arguments):
question = arguments.get("question", "What is the capital of France?")
context = arguments.get("context", "France is a country in Western Europe. Its capital is Paris.")
answer = self.qa_chain.run(question=question, context=context)
return {"output": answer}
# --- Agent Pool ---
agent_pool = {
"IdeaIntake": Agent("IdeaIntake", "Idea Intake", [DataRetrievalTool(), CodeGenerationTool(), TextGenerationTool(), QuestionAnsweringTool()], knowledge_base=""),
"CodeBuilder": Agent("CodeBuilder", "Code Builder", [CodeGenerationTool(), CodeDebuggingTool(), CodeOptimizationTool(), CodeExecutionTool(), CodeSummarizationTool, CodeTranslationTool, CodeDocumentationTool], knowledge_base=""),
"ImageCreator": Agent("ImageCreator", "Image Creator", [ImageGenerationTool(), ImageEditingTool(), ImageAnalysisTool], knowledge_base=""),
}
# --- Workflow Definitions ---
class Workflow:
def __init__(self, name, agents, task, description):
self.name = name
self.agents = agents
self.task = task
self.description = description
def run(self, prompt, context):
for agent in self.agents:
action = agent.act(prompt, context)
if action.get("tool"):
tool = next((t for t in agent.tools if t.name == action["tool"]), None)
if tool:
output = tool.run(action["arguments"])
context.update(output)
agent.observe(output)
return context
# --- Workflow Examples ---
class AppBuildWorkflow(Workflow):
def __init__(self):
super().__init__("App Build", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Build a mobile application", "A workflow for building a mobile application.")
class WebsiteBuildWorkflow(Workflow):
def __init__(self):
super().__init__("Website Build", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Build a website", "A workflow for building a website.")
class GameBuildWorkflow(Workflow):
def __init__(self):
super().__init__("Game Build", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Build a game", "A workflow for building a game.")
class PluginBuildWorkflow(Workflow):
def __init__(self):
super().__init__("Plugin Build", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Build a plugin", "A workflow for building a plugin.")
class DevSandboxWorkflow(Workflow):
def __init__(self):
super().__init__("Dev Sandbox", [agent_pool["IdeaIntake"], agent_pool["CodeBuilder"]], "Experiment with code", "A workflow for experimenting with code.")
# --- Model Definitions ---
class Model:
def __init__(self, name, description, model_link):
self.name = name
self.description = description
self.model_link = model_link
self.inference_client = InferenceClient(model=model_link)
def generate_text(self, prompt, temperature=0.5, max_new_tokens=4096):
try:
output = self.inference_client.text_generation(
prompt,
temperature=temperature,
max_new_tokens=max_new_tokens,
stream=True
)
response = "".join(output)
except ValueError as e:
if "Input validation error" in str(e):
return "Error: The input prompt is too long. Please try a shorter prompt."
else:
return f"An error occurred: {e}"
return response
# --- Model Examples ---
class LegacyLiftModel(Model):
def __init__(self):
super().__init__("LegacyLift🚀", "The LegacyLift model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best for minimal problem-solving, content writing, and daily tips.", "mistralai/Mistral-7B-Instruct-v0.2")
class ModernMigrateModel(Model):
def __init__(self):
super().__init__("ModernMigrate⭐", "The ModernMigrate model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model excels in coding, logical reasoning, and high-speed inference.", "mistralai/Mixtral-8x7B-Instruct-v0.1")
class RetroRecodeModel(Model):
def __init__(self):
super().__init__("RetroRecode🔄", "The RetroRecode model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best suited for critical development, practical knowledge, and serverless inference.", "microsoft/Phi-3-mini-4k-instruct")
# --- Streamlit Interface ---
model_links = {
"LegacyLift🚀": "mistralai/Mistral-7B-Instruct-v0.2",
"ModernMigrate⭐": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"RetroRecode🔄": "microsoft/Phi-3-mini-4k-instruct"
}
model_info = {
"LegacyLift🚀": {
'description': "The LegacyLift model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best for minimal problem-solving, content writing, and daily tips.",
'logo': './11.jpg'
},
"ModernMigrate⭐": {
'description': "The ModernMigrate model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model excels in coding, logical reasoning, and high-speed inference.",
'logo': './2.jpg'
},
"RetroRecode🔄": {
'description': "The RetroRecode model is a Large Language Model (LLM) that's able to have question and answer interactions.\n \n\nThis model is best suited for critical development, practical knowledge, and serverless inference.",
'logo': './3.jpg'
},
}
def format_prompt(message, conversation_history, custom_instructions=None):
prompt = ""
if custom_instructions:
prompt += f"\[INST\] {custom_instructions} $$/INST$$\n"
# Add conversation history to the prompt
prompt += "\[CONV_HISTORY\]\n"
for role, content in conversation_history:
prompt += f"{role.upper()}: {content}\n"
prompt += "\[/CONV_HISTORY\]\n"
# Add the current message
prompt += f"\[INST\] {message} $$/INST$$\n"
# Add the response format
prompt += "\[RESPONSE\]\n"
return prompt
def reset_conversation():
'''
Resets Conversation
'''
st.session_state.conversation = []
st.session_state.messages = []
st.session_state.chat_state = "reset"
def load_conversation_history():
history_file = "conversation_history.pickle"
if os.path.exists(history_file):
with open(history_file, "rb") as f:
conversation_history = pickle.load(f)
else:
conversation_history = []
return conversation_history
def save_conversation_history(conversation_history):
history_file = "conversation_history.pickle"
with open(history_file, "wb") as f:
pickle.dump(conversation_history, f)
models = [key for key in model_links.keys()]
selected_model = st.sidebar.selectbox("Select Model", models)
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown(model_info[selected_model]['description'])
st.sidebar.image(model_info[selected_model]['logo'])
st.sidebar.markdown("\*Generating the code might go slow if you are using low power resources \*")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
st.session_state.prev_option = selected_model
if "chat_state" not in st.session_state:
st.session_state.chat_state = "normal"
# Load the conversation history from the file
if "messages" not in st.session_state:
st.session_state.messages = load_conversation_history()
repo_id = model_links[selected_model]
st.subheader(f'{selected_model}')
if st.session_state.chat_state == "normal":
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
custom_instruction = "Act like a Human in conversation"
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
conversation_history = [(message["role"], message["content"]) for message in st.session_state.messages]
formated_text = format_prompt(prompt, conversation_history, custom_instruction)
with st.chat_message("assistant"):
# Select the appropriate model based on the user's choice
if selected_model == "LegacyLift🚀":
model = LegacyLiftModel()
elif selected_model == "ModernMigrate⭐":
model = ModernMigrateModel()
elif selected_model == "RetroRecode🔄":
model = RetroRecodeModel()
else:
st.error("Invalid model selection.")
st.stop() # Stop the Streamlit app execution
response = model.generate_text(formated_text, temperature=temp_values)
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
save_conversation_history(st.session_state.messages)
elif st.session_state.chat_state == "reset":
st.session_state.chat_state = "normal"
st.experimental_rerun()
# --- Agent-Based Workflow Execution ---
def execute_workflow(workflow, prompt, context):
# Execute the workflow
context = workflow.run(prompt, context)
# Display the output
for agent in workflow.agents:
st.write(f"{agent}: {agent.memory}")
for action in agent.memory:
st.write(f" Action: {action}")
return context
# --- Example Usage ---
if st.button("Build an App"):
app_build_workflow = AppBuildWorkflow()
context = {"task": "Build a mobile application"}
context = execute_workflow(app_build_workflow, "Build a mobile app for ordering food.", context)
st.write(f"Workflow Output: {context}")
if st.button("Build a Website"):
website_build_workflow = WebsiteBuildWorkflow()
context = {"task": "Build a website"}
context = execute_workflow(website_build_workflow, "Build a website for a restaurant.", context)
st.write(f"Workflow Output: {context}")
if st.button("Build a Game"):
game_build_workflow = GameBuildWorkflow()
context = {"task": "Build a game"}
context = execute_workflow(game_build_workflow, "Build a simple 2D platformer game.", context)
st.write(f"Workflow Output: {context}")
if st.button("Build a Plugin"):
plugin_build_workflow = PluginBuildWorkflow()
context = {"task": "Build a plugin"}
context = execute_workflow(plugin_build_workflow, "Build a plugin for a text editor that adds a new syntax highlighting theme.", context)
st.write(f"Workflow Output: {context}")
if st.button("Dev Sandbox"):
dev_sandbox_workflow = DevSandboxWorkflow()
context = {"task": "Experiment with code"}
context = execute_workflow(dev_sandbox_workflow, "Write a Python function to reverse a string.", context)
st.write(f"Workflow Output: {context}")
# --- Displaying Agent and Tool Information ---
st.subheader("Agent Pool")
for agent_name, agent in agent_pool.items():
st.write(f"**{agent_name}**")
st.write(f" Role: {agent.role}")
st.write(f" Tools: {', '.join([tool.name for tool in agent.tools])}")
st.subheader("Workflows")
st.write("**App Build**")
st.write(f""" Description: {AppBuildWorkflow().description}""")
st.write("**Website Build**")
st.write(f""" Description: {WebsiteBuildWorkflow().description}""")
st.write("**Game Build**")
st.write(f""" Description: {GameBuildWorkflow().description}""")
st.write("**Plugin Build**")
st.write(f""" Description: {PluginBuildWorkflow().description}""")
st.write("**Dev Sandbox**")
st.write(f""" Description: {DevSandboxWorkflow().description}""")
# --- Displaying Tool Definitions ---
st.subheader("Tool Definitions")
for tool_class in [CodeGenerationTool, DataRetrievalTool, CodeExecutionTool, CodeDebuggingTool, CodeSummarizationTool, CodeTranslationTool, CodeOptimizationTool, CodeDocumentationTool, ImageGenerationTool, ImageEditingTool, ImageAnalysisTool, TextGenerationTool, QuestionAnsweringTool]:
tool = tool_class()
st.write(f"**{tool.name}**")
st.write(f" Description: {tool.description}")
# --- Displaying Example Output ---
st.subheader("Example Output")
code_generation_tool = CodeGenerationTool()
st.write(f"""Code Generation Tool Output: {code_generation_tool.run({'language': 'python', 'code_description': "print('Hello, World!')"})}""")
data_retrieval_tool = DataRetrievalTool()
st.write(f"""Data Retrieval Tool Output: {data_retrieval_tool.run({'data_source': 'https://example.com/data', 'data_query': 'some information'})}""")
code_execution_tool = CodeExecutionTool()
st.write(f"""Code Execution Tool Output: {code_execution_tool.run({'code': "print('Hello, World!')"})}""")
code_debugging_tool = CodeDebuggingTool()
st.write(f"""Code Debugging Tool Output: {code_debugging_tool.run({'code': "print('Hello, World!')"})}""")
code_summarization_tool = CodeSummarizationTool()
st.write(f"""Code Summarization Tool Output: {code_summarization_tool.run({'code': "print('Hello, World!')"})}""")
code_translation_tool = CodeTranslationTool()
st.write(f"""Code Translation Tool Output: {code_translation_tool.run({'code': "print('Hello, World!')", 'target_language': 'javascript'})}""")
code_optimization_tool = CodeOptimizationTool()
st.write(f"""Code Optimization Tool Output: {code_optimization_tool.run({'code': "print('Hello, World!')"})}""")
code_documentation_tool = CodeDocumentationTool()
st.write(f"""Code Documentation Tool Output: {code_documentation_tool.run({'code': "print('Hello, World!')"})}""")
image_generation_tool = ImageGenerationTool()
st.write(f"""Image Generation Tool Output: {image_generation_tool.run({'description': 'A cat sitting on a couch'})}""")
image_editing_tool = ImageEditingTool()
st.write(f"""Image Editing Tool Output: {image_editing_tool.run({'image_url': 'https://example.com/image.jpg', 'editing_instructions': 'Make the cat smile'})}""")
image_analysis_tool = ImageAnalysisTool()
st.write(f"""Image Analysis Tool Output: {image_analysis_tool.run({'image_url': 'https://example.com/image.jpg'})}""")
question_answering_tool = QuestionAnsweringTool()
st.write(f"""Question Answering Tool Output: {question_answering_tool.run({'question': 'What is the capital of France?', 'context': 'France is a country in Western Europe. Its capital is Paris.'})}""")