saikub commited on
Commit
3132846
·
verified ·
1 Parent(s): a69da5a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -58
app.py CHANGED
@@ -1,54 +1,73 @@
 
1
  import numpy as np
2
  import streamlit as st
3
  from openai import OpenAI
4
  import os
5
- from dotenv import load_dotenv
6
-
7
- # Load environment variables
8
  load_dotenv()
9
 
10
- # Initialize the Hugging Face client
11
- hf_api_key = os.getenv('HF_API_KEY') # Replace with your Hugging Face API key
12
- openai_api_key = os.getenv('OPENAI_API_KEY') # Replace with your OpenAI API key
13
 
 
 
 
 
14
  client = OpenAI(
15
- api_key=openai_api_key
16
- )
17
-
18
- # Create supported models
19
- model_links = {
20
- "Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
21
- "Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct",
22
- "Meta-Llama-3.1-405B-Instruct-FP8": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
23
- "Meta-Llama-3.1-405B-Instruct": "meta-llama/Meta-Llama-3.1-405B-Instruct",
24
- "Mistral-Nemo-Instruct-2407": "mistralai/Mistral-Nemo-Instruct-2407",
25
- "Meta-Llama-3-70B-Instruct": "meta-llama/Meta-Llama-3-70B-Instruct",
26
- "Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
27
- "C4ai-command-r-plus": "CohereForAI/c4ai-command-r-plus",
28
- "Aya-23-35B": "CohereForAI/aya-23-35B",
29
- "Zephyr-orpo-141b-A35b-v0.1": "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
30
- "Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
31
- "Codestral-22B-v0.1": "mistralai/Codestral-22B-v0.1",
32
- "Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
33
- "Yi-1.5-34B-Chat": "01-ai/Yi-1.5-34B-Chat",
34
- "Gemma-2-27b-it": "google/gemma-2-27b-it",
35
- "Meta-Llama-2-70B-Chat-HF": "meta-llama/Llama-2-70b-chat-hf",
36
- "Meta-Llama-2-7B-Chat-HF": "meta-llama/Llama-2-7b-chat-hf",
37
- "Meta-Llama-2-13B-Chat-HF": "meta-llama/Llama-2-13b-chat-hf",
38
- "Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.1",
39
- "Mistral-7B-Instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
40
- "Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
41
- "Falcon-7b-Instruct": "tiiuae/falcon-7b-instruct",
42
- "Starchat2-15b-v0.1": "HuggingFaceH4/starchat2-15b-v0.1",
43
- "Gemma-1.1-7b-it": "google/gemma-1.1-7b-it",
44
- "Gemma-1.1-2b-it": "google/gemma-1.1-2b-it",
45
- "Zephyr-7B-Beta": "HuggingFaceH4/zephyr-7b-beta",
46
- "Zephyr-7B-Alpha": "HuggingFaceH4/zephyr-7b-alpha",
47
- "Phi-3-mini-128k-instruct": "microsoft/Phi-3-mini-128k-instruct",
48
- "Phi-3-mini-4k-instruct": "microsoft/Phi-3-mini-4k-instruct",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  }
50
 
51
- # Random dog images for error message
 
52
  random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
53
  "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
54
  "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
@@ -63,65 +82,90 @@ random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
63
  "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
64
  "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
65
 
66
- # Reset conversation
 
67
  def reset_conversation():
 
 
 
68
  st.session_state.conversation = []
69
  st.session_state.messages = []
 
 
 
 
70
 
71
  # Define the available models
72
- models = [key for key in model_links.keys()]
73
 
74
  # Create the sidebar with the dropdown for model selection
75
  selected_model = st.sidebar.selectbox("Select Model", models)
76
 
77
- # Create a temperature slider
78
- temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5)
 
 
 
 
79
 
80
- # Add reset button to clear conversation
81
- st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
82
 
83
  # Create model description
84
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
 
 
85
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
86
- st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).")
87
 
88
- # Initialize previous option and messages
 
 
 
89
  if "prev_option" not in st.session_state:
90
  st.session_state.prev_option = selected_model
91
 
92
  if st.session_state.prev_option != selected_model:
93
  st.session_state.messages = []
 
94
  st.session_state.prev_option = selected_model
95
  reset_conversation()
96
 
97
- # Pull in the model we want to use
 
 
98
  repo_id = model_links[selected_model]
99
 
100
- st.subheader(f'TypeGPT.net - {selected_model}')
 
 
101
 
102
  # Set a default model
103
  if selected_model not in st.session_state:
104
- st.session_state[selected_model] = model_links[selected_model]
105
 
106
  # Initialize chat history
107
  if "messages" not in st.session_state:
108
  st.session_state.messages = []
109
 
 
110
  # Display chat messages from history on app rerun
111
  for message in st.session_state.messages:
112
  with st.chat_message(message["role"]):
113
  st.markdown(message["content"])
114
 
 
 
115
  # Accept user input
116
  if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
 
117
  # Display user message in chat message container
118
  with st.chat_message("user"):
119
  st.markdown(prompt)
120
  # Add user message to chat history
121
  st.session_state.messages.append({"role": "user", "content": prompt})
122
 
 
123
  # Display assistant response in chat message container
124
  with st.chat_message("assistant"):
 
125
  try:
126
  stream = client.chat.completions.create(
127
  model=model_links[selected_model],
@@ -129,25 +173,31 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
129
  {"role": m["role"], "content": m["content"]}
130
  for m in st.session_state.messages
131
  ],
132
- temperature=temp_values,
133
  stream=True,
134
  max_tokens=3000,
135
  )
 
136
  response = st.write_stream(stream)
137
 
138
  except Exception as e:
139
- response = ("😵‍💫 Looks like someone unplugged something! "
140
- "Either the model space is being updated or something is down. "
141
- "Try again later. Here's a random pic of a 🐶:")
 
 
 
 
142
  st.write(response)
143
- random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
144
  st.image(random_dog_pick)
145
  st.write("This was the error message:")
146
  st.write(e)
147
 
148
- st.session_state.messages.append({"role": "assistant", "content": response})
149
 
150
 
 
 
151
 
152
  # import gradio as gr
153
  # from huggingface_hub import InferenceClient
 
1
+
2
  import numpy as np
3
  import streamlit as st
4
  from openai import OpenAI
5
  import os
6
+ import sys
7
+ from dotenv import load_dotenv, dotenv_values
 
8
  load_dotenv()
9
 
 
 
 
10
 
11
+
12
+
13
+
14
+ # initialize the client
15
  client = OpenAI(
16
+ base_url="https://api-inference.huggingface.co/v1",
17
+ api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#"hf_xxx" # Replace with your token
18
+ )
19
+
20
+
21
+
22
+
23
+ #Create supported models
24
+ model_links ={
25
+ "Meta-Llama-3-8B":"meta-llama/Meta-Llama-3-8B-Instruct",
26
+ "Mistral-7B":"mistralai/Mistral-7B-Instruct-v0.2",
27
+ "Gemma-7B":"google/gemma-1.1-7b-it",
28
+ "Gemma-2B":"google/gemma-1.1-2b-it",
29
+ "Zephyr-7B-β":"HuggingFaceH4/zephyr-7b-beta",
30
+
31
+ }
32
+
33
+ #Pull info about the model to display
34
+ model_info ={
35
+ "Mistral-7B":
36
+ {'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
37
+ \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
38
+ 'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
39
+ "Gemma-7B":
40
+ {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
41
+ \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **7 billion parameters.** \n""",
42
+ 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
43
+ "Gemma-2B":
44
+ {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
45
+ \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **2 billion parameters.** \n""",
46
+ 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
47
+ "Zephyr-7B":
48
+ {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
49
+ \nFrom Huggingface: \n\
50
+ Zephyr is a series of language models that are trained to act as helpful assistants. \
51
+ [Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
52
+ is the third model in the series, and is a fine-tuned version of google/gemma-7b \
53
+ that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
54
+ 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
55
+ "Zephyr-7B-β":
56
+ {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
57
+ \nFrom Huggingface: \n\
58
+ Zephyr is a series of language models that are trained to act as helpful assistants. \
59
+ [Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
60
+ is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
61
+ that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
62
+ 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
63
+ "Meta-Llama-3-8B":
64
+ {'description':"""The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
65
+ \nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
66
+ 'logo':'Llama_logo.png'},
67
  }
68
 
69
+
70
+ #Random dog images for error message
71
  random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
72
  "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
73
  "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
 
82
  "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
83
  "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
84
 
85
+
86
+
87
  def reset_conversation():
88
+ '''
89
+ Resets Conversation
90
+ '''
91
  st.session_state.conversation = []
92
  st.session_state.messages = []
93
+ return None
94
+
95
+
96
+
97
 
98
  # Define the available models
99
+ models =[key for key in model_links.keys()]
100
 
101
  # Create the sidebar with the dropdown for model selection
102
  selected_model = st.sidebar.selectbox("Select Model", models)
103
 
104
+ #Create a temperature slider
105
+ temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
106
+
107
+
108
+ #Add reset button to clear conversation
109
+ st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
110
 
 
 
111
 
112
  # Create model description
113
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
114
+ st.sidebar.markdown(model_info[selected_model]['description'])
115
+ st.sidebar.image(model_info[selected_model]['logo'])
116
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
 
117
 
118
+
119
+
120
+
121
+
122
  if "prev_option" not in st.session_state:
123
  st.session_state.prev_option = selected_model
124
 
125
  if st.session_state.prev_option != selected_model:
126
  st.session_state.messages = []
127
+ # st.write(f"Changed to {selected_model}")
128
  st.session_state.prev_option = selected_model
129
  reset_conversation()
130
 
131
+
132
+
133
+ #Pull in the model we want to use
134
  repo_id = model_links[selected_model]
135
 
136
+
137
+ st.subheader(f'AI - {selected_model}')
138
+ # st.title(f'ChatBot Using {selected_model}')
139
 
140
  # Set a default model
141
  if selected_model not in st.session_state:
142
+ st.session_state[selected_model] = model_links[selected_model]
143
 
144
  # Initialize chat history
145
  if "messages" not in st.session_state:
146
  st.session_state.messages = []
147
 
148
+
149
  # Display chat messages from history on app rerun
150
  for message in st.session_state.messages:
151
  with st.chat_message(message["role"]):
152
  st.markdown(message["content"])
153
 
154
+
155
+
156
  # Accept user input
157
  if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
158
+
159
  # Display user message in chat message container
160
  with st.chat_message("user"):
161
  st.markdown(prompt)
162
  # Add user message to chat history
163
  st.session_state.messages.append({"role": "user", "content": prompt})
164
 
165
+
166
  # Display assistant response in chat message container
167
  with st.chat_message("assistant"):
168
+
169
  try:
170
  stream = client.chat.completions.create(
171
  model=model_links[selected_model],
 
173
  {"role": m["role"], "content": m["content"]}
174
  for m in st.session_state.messages
175
  ],
176
+ temperature=temp_values,#0.5,
177
  stream=True,
178
  max_tokens=3000,
179
  )
180
+
181
  response = st.write_stream(stream)
182
 
183
  except Exception as e:
184
+ # st.empty()
185
+ response = "😵‍💫 Looks like someone unplugged something!\
186
+ \n Either the model space is being updated or something is down.\
187
+ \n\
188
+ \n Try again later. \
189
+ \n\
190
+ \n Here's a random pic of a 🐶:"
191
  st.write(response)
192
+ random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
193
  st.image(random_dog_pick)
194
  st.write("This was the error message:")
195
  st.write(e)
196
 
 
197
 
198
 
199
+
200
+ st.session_state.messages.append({"role": "assistant", "content": response})
201
 
202
  # import gradio as gr
203
  # from huggingface_hub import InferenceClient