Spaces:
Sleeping
Sleeping
import gradio as gr | |
import openai | |
import os | |
import json | |
import markdown | |
import re | |
import time | |
# OpenAI API setup | |
openai.api_key = "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" | |
openai.api_base = "https://api.groq.com/openai/v1" | |
# File to store conversation history | |
CONVERSATION_FILE = "conversation_history.json" | |
# Function to load conversation history | |
def load_history(): | |
if not os.path.exists(CONVERSATION_FILE): | |
# Create the file with an empty list as default content | |
with open(CONVERSATION_FILE, "w") as file: | |
json.dump([], file) | |
try: | |
with open(CONVERSATION_FILE, "r") as file: | |
return json.load(file) | |
except json.JSONDecodeError: | |
return [] | |
# Function to save conversation history | |
def save_history(history): | |
try: | |
with open(CONVERSATION_FILE, "w") as file: | |
json.dump(history, file, indent=4) | |
except Exception as e: | |
print(f"Error saving history: {e}") | |
# Function to clear conversation history | |
def clear_conversation_history(): | |
try: | |
with open(CONVERSATION_FILE, "w") as file: | |
json.dump([], file) | |
return "Conversation history cleared successfully.", "" | |
except Exception as e: | |
return f"Error clearing history: {e}", "" | |
# Function to format code block | |
def format_code_block(code): | |
"""Wraps the provided code in <pre> and <code> tags for proper display.""" | |
return f"<pre><code>{code}</code></pre>" | |
def format_code_and_markdown(response): | |
""" | |
Handles both code blocks and markdown formatting simultaneously. | |
- Converts code blocks into HTML <pre><code> tags. | |
- Converts markdown syntax (e.g., bold, italics) into HTML tags. | |
""" | |
# Convert code blocks to HTML <pre><code>...</code></pre> format | |
def process_code_blocks(text): | |
code_block_pattern = re.compile(r'```python\n(.*?)```', re.DOTALL) | |
return re.sub(code_block_pattern, r'<pre><code>\1</code></pre>', text) | |
# Process the text to handle code blocks | |
response = process_code_blocks(response) | |
# Convert Markdown to HTML | |
html_response = markdown.markdown(response) | |
return html_response | |
# Function to get response from the LLM | |
def get_groq_response(message, history=[]): | |
try: | |
messages = [{"role": "system", "content": "Precise answer"}] + history + [{"role": "user", "content": message}] | |
response = openai.ChatCompletion.create( | |
model="llama-3.1-70b-versatile", | |
messages=messages | |
) | |
return format_code_and_markdown(response.choices[0].message["content"]) | |
except Exception as e: | |
return f"Error: {str(e)}" | |
# Function to simulate typing effect | |
def simulate_typing_effect(response, delay=1): | |
time.sleep(delay) | |
return response | |
# Chatbot function | |
def chatbot(user_input, history): | |
# Load conversation history | |
conversation_history = history or load_history() | |
# Format history for the LLM | |
formatted_history = [{"role": "user" if i % 2 == 0 else "assistant", "content": msg} for i, (msg, _) in enumerate(conversation_history)] + \ | |
[{"role": "assistant", "content": response} for _, response in conversation_history] | |
# Get bot response | |
bot_response = get_groq_response(user_input, formatted_history) | |
# Simulate typing delay | |
bot_response = simulate_typing_effect(bot_response) | |
# Update history with the new conversation | |
conversation_history.append((user_input, bot_response)) | |
# Save the updated history | |
save_history(conversation_history) | |
# Format for HTML display | |
display_html = "".join( | |
f"<div class='user-message'><b>User:</b> {user}</div>" | |
f"<div class='bot-message'><b>Bot:</b> {bot}</div>" | |
for user, bot in conversation_history | |
) | |
return conversation_history, display_html, "" # Clear the user input field | |
# Gradio Interface with enhanced UI/UX styling | |
with gr.Blocks(css=""" | |
.user-message { | |
background-color: #9ACBD0; | |
padding: 10px; | |
margin: 10px; | |
border-radius: 8px; | |
max-width: 60%; | |
float: right; | |
clear: both; | |
} | |
.bot-message { | |
background-color: #F2EFE7; | |
padding: 10px; | |
margin: 10px; | |
border-radius: 8px; | |
max-width: 60%; | |
float: left; | |
clear: both; | |
} | |
.user-message:hover, .bot-message:hover { | |
transform: scale(1.02); | |
box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.1); | |
} | |
.chat-container { | |
max-height: 500px; | |
overflow-y: auto; | |
margin-bottom: 20px; | |
} | |
.gradio-button { | |
background-color: #4CAF50; | |
color: white; | |
border-radius: 5px; | |
padding: 10px 20px; | |
font-size: 16px; | |
} | |
.gradio-button:hover { | |
background-color: #45a049; | |
} | |
""") as demo: | |
gr.Markdown("""# Mom: We have ChatGPT at Home, \n ChatGPT at Home: """) | |
chat_display = gr.HTML(label="Conversation") | |
user_input = gr.Textbox(label="Type your message here: Feel free to ask questions. After you're done, remember to clear the history for privacy. ") | |
clear_button = gr.Button("Clear History") | |
system_message = gr.Textbox(label="System Message", interactive=False) | |
history_state = gr.State(load_history()) | |
# Chat interaction | |
user_input.submit(chatbot, inputs=[user_input, history_state], outputs=[history_state, chat_display, user_input]) | |
# Clear history button action | |
clear_button.click(clear_conversation_history, inputs=None, outputs=[system_message, chat_display]) | |
clear_button.click(lambda: [], outputs=history_state) # Reset the history state | |
# Launch the app | |
demo.launch() | |