File size: 4,769 Bytes
bc5cf4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4abb58b
 
 
 
 
 
 
 
 
 
 
 
 
bc5cf4c
 
 
 
 
 
 
 
4abb58b
bc5cf4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4abb58b
 
 
bc5cf4c
 
 
 
 
 
4abb58b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c9b44ee
bc5cf4c
 
c9b44ee
bc5cf4c
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import gradio as gr
import openai
import os
import json

# OpenAI API setup
openai.api_key = os.getenv("GROQ_API_KEY")
openai.api_base = "https://api.groq.com/openai/v1"

# File to store conversation history
CONVERSATION_FILE = "conversation_history.json"

# Function to load conversation history
def load_history():
    if not os.path.exists(CONVERSATION_FILE):
        # Create the file with an empty list as default content
        with open(CONVERSATION_FILE, "w") as file:
            json.dump([], file)
    try:
        with open(CONVERSATION_FILE, "r") as file:
            return json.load(file)
    except json.JSONDecodeError:
        return []

# Function to save conversation history
def save_history(history):
    try:
        with open(CONVERSATION_FILE, "w") as file:
            json.dump(history, file, indent=4)
    except Exception as e:
        print(f"Error saving history: {e}")

# Function to clear conversation history
def clear_conversation_history():
    try:
        with open(CONVERSATION_FILE, "w") as file:
            json.dump([], file)
        return "Conversation history cleared successfully.", ""
    except Exception as e:
        return f"Error clearing history: {e}", ""

# Function to format bot response
def format_bot_response(response):
    """
    Converts markdown-like symbols to HTML and structures response.
    """
    response = response.replace("**", "<b>").replace("**", "</b>")  # Bold formatting
    response = response.replace("1.", "<br>&nbsp;&nbsp;1.")
    response = response.replace("2.", "<br>&nbsp;&nbsp;2.")
    response = response.replace("3.", "<br>&nbsp;&nbsp;3.")
    response = response.replace("4.", "<br>&nbsp;&nbsp;4.")
    response = response.replace("5.", "<br>&nbsp;&nbsp;5.")
    return f"<div>{response}</div>"

# Function to get response from the LLM
def get_groq_response(message, history=[]):
    try:
        messages = [{"role": "system", "content": "Precise answer"}] + history + [{"role": "user", "content": message}]
        response = openai.ChatCompletion.create(
            model="llama-3.1-70b-versatile",
            messages=messages
        )
        return format_bot_response(response.choices[0].message["content"])
    except Exception as e:
        return f"Error: {str(e)}"

# Chatbot function
def chatbot(user_input, history):
    # Load conversation history
    conversation_history = history or load_history()
    
    # Format history for the LLM
    formatted_history = [{"role": "user" if i % 2 == 0 else "assistant", "content": msg} for i, (msg, _) in enumerate(conversation_history)] + \
                        [{"role": "assistant", "content": response} for _, response in conversation_history]
    
    # Get bot response
    bot_response = get_groq_response(user_input, formatted_history)
    
    # Update history with the new conversation
    conversation_history.append((user_input, bot_response))
    
    # Save the updated history
    save_history(conversation_history)
    
    # Format for HTML display
    display_html = "".join(
        f"<div class='user-message'><b>User:</b> {user}</div>"
        f"<div class='bot-message'><b>Bot:</b> {bot}</div>"
        for user, bot in conversation_history
    )
    
    return conversation_history, display_html, ""  # Clear the user input field

# Gradio Interface
with gr.Blocks(css="""
    .user-message { 
        background-color: #9ACBD0; 
        padding: 10px; 
        margin: 10px; 
        border-radius: 8px; 
        max-width: 60%; 
        float: right; 
        clear: both; 
    }
    .bot-message { 
        background-color: #F2EFE7; 
        padding: 10px; 
        margin: 10px; 
        border-radius: 8px; 
        max-width: 60%; 
        float: left; 
        clear: both; 
    }
    .user-message:hover, .bot-message:hover {
        transform: scale(1.02);
        box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.1);
    }
""") as demo:
    gr.Markdown("""# Mom: We have ChatGPT at Home, \n ChatGPT at Home: """)
    
    chat_display = gr.HTML(label="Conversation")
    user_input = gr.Textbox(label="Type your message here: Feel free to ask questions. After you're done, remember to clear the history for privacy. ")
    clear_button = gr.Button("Clear History")
    system_message = gr.Textbox(label="System Message", interactive=False)

    history_state = gr.State(load_history())

    # Chat interaction
    user_input.submit(chatbot, inputs=[user_input, history_state], outputs=[history_state, chat_display, user_input])
    
    # Clear history button action
    clear_button.click(clear_conversation_history, inputs=None, outputs=[system_message, chat_display])
    clear_button.click(lambda: [], outputs=history_state)  # Reset the history state

# Launch the app
demo.launch()