File size: 6,413 Bytes
3f78c39
d9a0196
 
7a6824f
d9a0196
 
 
 
 
 
 
 
 
7a6824f
 
 
 
d9a0196
 
 
 
 
 
 
 
2b5fc9a
da63192
bb106c1
1fd443d
054c267
c7db6f4
32627c4
b3fcbaf
aaf1f92
b3fcbaf
e44c370
4d992f9
e44c370
704cc76
cc9136e
704cc76
 
 
d9a0196
 
 
5dfc352
 
e44c370
 
 
3280ae4
e44c370
 
d9a0196
 
7a6824f
d9a0196
 
 
 
 
 
 
 
 
 
 
 
 
 
7a6824f
 
d9a0196
 
 
 
 
 
 
7a6824f
 
 
d9a0196
 
7a6824f
d9a0196
 
 
 
5b847dd
d9a0196
 
7a6824f
 
 
 
d9a0196
 
 
 
dcb434f
7a6824f
 
 
d9a0196
 
 
 
 
 
7a6824f
d9a0196
 
 
7a6824f
 
 
d9a0196
 
7a6824f
3f78c39
7a6824f
 
 
 
 
d9a0196
 
 
 
 
7a6824f
d9a0196
 
 
 
 
7a6824f
 
d9a0196
 
 
 
 
 
 
4112e9f
7a6824f
d9a0196
 
 
 
 
 
 
 
 
7a6824f
d9a0196
 
 
7a6824f
d9a0196
 
 
7a6824f
d9a0196
 
 
 
 
 
 
7a6824f
d9a0196
 
 
 
4112e9f
 
 
 
 
d9a0196
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
""" TypeGPT
@author: NiansuhAI
@email: [email protected]

"""
import numpy as np
import streamlit as st
from openai import OpenAI
import os
import sys
from dotenv import load_dotenv, dotenv_values
load_dotenv()





# initialize the client
client = OpenAI(
  base_url="https://api-inference.huggingface.co/v1",
  api_key=os.environ.get('API_KEY')  # Replace with your token
)

# Create supported models
model_links = {
    "Mistral-Nemo-Instruct-2407": "mistralai/Mistral-Nemo-Instruct-2407",
    "Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
    "Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct",
    "Meta-Llama-3.1-405B-Instruct-FP8": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
    "Meta-Llama-3.1-405B-Instruct": "meta-llama/Meta-Llama-3.1-405B-Instruct",
    "Meta-Llama-3-70B-Instruct": "meta-llama/Meta-Llama-3-70B-Instruct",
    "Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
    "C4ai-command-r-plus": "CohereForAI/c4ai-command-r-plus",
    "Aya-23-35B": "CohereForAI/aya-23-35B",
    "Zephyr-orpo-141b-A35b-v0.1": "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
    "Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
    "Codestral-22B-v0.1": "mistralai/Codestral-22B-v0.1",
    "Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
    "Yi-1.5-34B-Chat": "01-ai/Yi-1.5-34B-Chat",
    "Gemma-2-27b-it": "google/gemma-2-27b-it",
    "Meta-Llama-2-70B-Chat-HF": "meta-llama/Llama-2-70b-chat-hf",
    "Meta-Llama-2-7B-Chat-HF": "meta-llama/Llama-2-7b-chat-hf",
    "Meta-Llama-2-13B-Chat-HF": "meta-llama/Llama-2-13b-chat-hf",
    "Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.1",
    "Mistral-7B-Instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
    "Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
    "Falcon-7b-Instruct": "tiiuae/falcon-7b-instruct",
    "Starchat2-15b-v0.1": "HuggingFaceH4/starchat2-15b-v0.1",
    "Gemma-1.1-7b-it": "google/gemma-1.1-7b-it",
    "Gemma-1.1-2b-it": "google/gemma-1.1-2b-it",
    "Zephyr-7B-Beta": "HuggingFaceH4/zephyr-7b-beta",
    "Zephyr-7B-Alpha": "HuggingFaceH4/zephyr-7b-alpha",
    "Phi-3-mini-128k-instruct": "microsoft/Phi-3-mini-128k-instruct",
    "Phi-3-mini-4k-instruct": "microsoft/Phi-3-mini-4k-instruct",
}

#Random dog images for error message
random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
              "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
              "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
              "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
              "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
              "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
              "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
              "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
              "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
              "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
              "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
              "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
              "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]



def reset_conversation():
    '''
    Resets Conversation
    '''
    st.session_state.conversation = []
    st.session_state.messages = []
    return None
    



# Define the available models
models =[key for key in model_links.keys()]

# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Select Model", models)

# Create a temperature slider
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))


#Add reset button to clear conversation
st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button


# Create model description
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).")




if "prev_option" not in st.session_state:
    st.session_state.prev_option = selected_model

if st.session_state.prev_option != selected_model:
    st.session_state.messages = []
    # st.write(f"Changed to {selected_model}")
    st.session_state.prev_option = selected_model
    reset_conversation()



#Pull in the model we want to use
repo_id = model_links[selected_model]


st.subheader(f'TypeGPT.net - {selected_model}')
# st.title(f'ChatBot Using {selected_model}')

# Set a default model
if selected_model not in st.session_state:
    st.session_state[selected_model] = model_links[selected_model] 

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []


# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])



# Accept user input
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})
    

    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        try:
            stream = client.chat.completions.create(
                model=model_links[selected_model],
                messages=[
                    {"role": m["role"], "content": m["content"]}
                    for m in st.session_state.messages
                ],
                temperature=temp_values,#0.5,
                stream=True,
                max_tokens=3000,
            )
    
            response = st.write_stream(stream)

        except Exception as e:
            # st.empty()
            response = "😵‍💫 Looks like someone unplugged something!\
                    \n Either the model space is being updated or something is down.\
                    \n\
                    \n Try again later. \
                    \n\
                    \n Here's a random pic of a 🐶:"
            st.write(response)
            random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
            st.image(random_dog_pick)
            st.write("This was the error message:")
            st.write(e)




            

    st.session_state.messages.append({"role": "assistant", "content": response})