TEST / app.py
Reality123b's picture
Update app.py
55168fa verified
raw
history blame
12.6 kB
import os
import gradio as gr
from huggingface_hub import InferenceClient
import json
class XylariaChat:
def __init__(self):
# Securely load HuggingFace token
self.hf_token = os.getenv("HF_TOKEN")
if not self.hf_token:
raise ValueError("HuggingFace token not found in environment variables")
# Initialize the inference client
self.client = InferenceClient(
model="Qwen/Qwen-32B-Preview",
api_key=self.hf_token
)
# Initialize conversation history and persistent memory
self.conversation_history = []
self.persistent_memory = {}
self.chat_file_path = "chat_history.txt" # File to save chats
# System prompt
self.system_prompt = """You are a helpful and harmless AI assistant you are Xylaria 1.4 Senoa, Made by Sk Md Saad Amin you think step by step
"""
def store_information(self, key, value):
"""Store important information in persistent memory"""
self.persistent_memory[key] = value
def retrieve_information(self, key):
"""Retrieve information from persistent memory"""
return self.persistent_memory.get(key)
def save_chat(self):
"""Saves the current chat history to a text file."""
try:
with open(self.chat_file_path, "w") as f:
chat_data = {
"conversation_history": self.conversation_history,
"persistent_memory": self.persistent_memory
}
json.dump(chat_data, f)
except Exception as e:
print(f"Error saving chat history: {e}")
def load_chat(self):
"""Loads chat history from a text file."""
try:
with open(self.chat_file_path, "r") as f:
chat_data = json.load(f)
self.conversation_history = chat_data.get("conversation_history", [])
self.persistent_memory = chat_data.get("persistent_memory", {})
return self.conversation_history, self.persistent_memory
except FileNotFoundError:
print("Chat history file not found.")
return [], {}
except Exception as e:
print(f"Error loading chat history: {e}")
return [], {}
def reset_conversation(self):
"""
Completely reset the conversation history, persistent memory,
and clear API-side memory
"""
# Clear local memory
self.conversation_history = []
self.persistent_memory.clear()
# Clear API-side memory by resetting the conversation
try:
# Attempt to clear any API-side session or context
self.client = InferenceClient(
model="Qwen/Qwen-32B-Preview",
api_key=self.hf_token
)
except Exception as e:
print(f"Error resetting API client: {e}")
self.save_chat() # Save the empty chat history
return None # To clear the chatbot interface
def get_response(self, user_input):
# Prepare messages with conversation context and persistent memory
messages = [
{"role": "system", "content": self.system_prompt},
*self.conversation_history,
{"role": "user", "content": user_input}
]
# Add persistent memory context if available
if self.persistent_memory:
memory_context = "Remembered Information:\n" + "\n".join(
[f"{k}: {v}" for k, v in self.persistent_memory.items()]
)
messages.insert(1, {"role": "system", "content": memory_context})
# Generate response with streaming
try:
stream = self.client.chat.completions.create(
messages=messages,
temperature=0.5,
max_tokens=10240,
top_p=0.7,
stream=True
)
return stream
except Exception as e:
return f"Error generating response: {str(e)}"
def create_interface(self):
def streaming_response(message, chat_history):
response_stream = self.get_response(message)
if isinstance(response_stream, str):
return "", chat_history + [[message, response_stream]]
full_response = ""
updated_history = chat_history + [[message, ""]]
for chunk in response_stream:
if chunk.choices[0].delta.content:
chunk_content = chunk.choices[0].delta.content
full_response += chunk_content
updated_history[-1][1] = full_response
yield "", updated_history
self.conversation_history.append(
{"role": "user", "content": message}
)
self.conversation_history.append(
{"role": "assistant", "content": full_response}
)
if len(self.conversation_history) > 10:
self.conversation_history = self.conversation_history[-10:]
self.save_chat() # Save after each interaction
def load_chat_interface():
self.load_chat()
return self.conversation_history
# Custom CSS for improved colors and styling
custom_css = """
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
body, .gradio-container {
font-family: 'Inter', sans-serif !important;
background-color: #f8f8f8; /* Light background */
}
/* Chatbot styling */
.chatbot-container .message {
font-family: 'Inter', sans-serif !important;
padding: 10px 15px;
border-radius: 10px;
margin-bottom: 8px; /* Add margin between messages */
}
.chatbot-container .user {
background-color: #e0f2f7; /* Light blue for user messages */
border: 1px solid #a7d9ed; /* Light blue border */
}
.chatbot-container .assistant {
background-color: #f0f0f0; /* Light gray for assistant messages */
border: 1px solid #d3d3d3; /* Light gray border */
}
.chatbot-container .message-tools {
margin-right: 10px; /* Add some space between text and buttons */
}
/* Sidebar styling */
#sidebar {
background-color: #f2f2f2;
border-right: 1px solid #ccc;
padding: 10px;
height: 100vh;
overflow-y: auto;
}
/* Main chat area */
#main-chat {
padding: 20px;
}
/* Textbox and buttons */
.gradio-container input,
.gradio-container textarea,
.gradio-container button {
font-family: 'Inter', sans-serif !important;
border-radius: 5px; /* Rounded corners */
}
.gradio-container button {
background-color: #4CAF50; /* Green button */
color: white;
transition: background-color 0.2s; /* Smooth transition for hover effect */
}
.gradio-container button:hover {
background-color: #3e8e41; /* Darker green on hover */
}
"""
# Example prompts
example_prompts = [
"How do I get started with coding?",
"Tell me a fun fact about science.",
"What are some good books to read?"
]
# Function to forward prompt to the textbox
def forward_prompt(prompt):
return prompt
with gr.Blocks(theme='soft', css=custom_css) as demo:
with gr.Row():
# Sidebar for displaying chat history
with gr.Column(elem_id="sidebar", scale=1):
gr.Markdown("### Chat History")
load_button = gr.Button("Load Chat History")
chat_list = gr.Markdown("No chat history found.")
load_button.click(
fn=lambda: gr.Markdown.update(value=self.format_chat_history()),
inputs=None,
outputs=[chat_list]
)
# Main chat interface
with gr.Column(elem_id="main-chat", scale=3):
# Input row (stays visible)
with gr.Row():
txt = gr.Textbox(
show_label=False,
placeholder="Type your message...",
container=False,
scale=4
)
btn = gr.Button("Send", scale=1)
# Xylaria welcome and example prompts (initially visible)
with gr.Column(visible=True) as start_page:
gr.Markdown("# Xylaria")
with gr.Row():
for prompt in example_prompts:
gr.Button(prompt).click(
fn=forward_prompt,
inputs=gr.State(prompt),
outputs=txt
)
# Chat interface (initially hidden)
with gr.Column(visible=False) as chat_page:
chatbot = gr.Chatbot(
label="Xylaria 1.4 Senoa",
height=500,
show_copy_button=True,
avatar_images=("user.png", "xylaria.png"), # Replace with your image paths
bubble_full_width=False
)
# Clear history and memory buttons
clear = gr.Button("Clear Conversation")
clear_memory = gr.Button("Clear Memory")
# Toggle between start and chat pages
def toggle_page(choice):
return gr.Column.update(visible=choice == "chat"), gr.Column.update(visible=choice == "start")
# Submit prompt
btn.click(
fn=streaming_response,
inputs=[txt, chatbot],
outputs=[txt, chatbot]
).then(
fn=lambda: toggle_page("chat"),
inputs=gr.State("chat"),
outputs=[chat_page, start_page]
)
txt.submit(
fn=streaming_response,
inputs=[txt, chatbot],
outputs=[txt, chatbot]
).then(
fn=lambda: toggle_page("chat"),
inputs=gr.State("chat"),
outputs=[chat_page, start_page]
)
# Clear conversation
clear.click(
fn=lambda: None,
inputs=None,
outputs=[chatbot],
queue=False
).then(
fn=lambda: toggle_page("start"),
inputs=gr.State("start"),
outputs=[chat_page, start_page]
)
# Clear memory
clear_memory.click(
fn=self.reset_conversation,
inputs=None,
outputs=[chatbot],
queue=False
).then(
fn=lambda: toggle_page("start"),
inputs=gr.State("start"),
outputs=[chat_page, start_page]
)
# Load chat history on interface load
demo.load(self.reset_conversation, None, None)
return demo
def format_chat_history(self):
"""Formats the chat history for display in the sidebar."""
self.load_chat() # Load the chat history first
if not self.conversation_history:
return "No chat history found."
formatted_history = ""
for chat in self.conversation_history:
if chat["role"] == "user":
formatted_history += f"**You:** {chat['content']}\n\n"
elif chat["role"] == "assistant":
formatted_history += f"**Xylaria:** {chat['content']}\n\n"
return formatted_history
# Launch the interface
def main():
chat = XylariaChat()
interface = chat.create_interface()
interface.launch(
share=False, # Set to True to create a public link
debug=True # Show detailed errors
)
if __name__ == "__main__":
main()