Du Mingzhe commited on
Commit
57c83ef
1 Parent(s): eaac40d
Files changed (3) hide show
  1. app.py +14 -23
  2. components.py +26 -0
  3. requirements.txt +4 -1
app.py CHANGED
@@ -1,42 +1,33 @@
 
1
  import streamlit as st
2
- import random
3
- import time
4
 
5
- # Streamed response emulator
6
- def response_generator():
7
- response = random.choice(
8
- [
9
- "Hello there! How can I assist you today?",
10
- "Hi, human! Is there anything I can help you with?",
11
- "Do you need help?",
12
- ]
13
- )
14
- for word in response.split():
15
- yield word + " "
16
- time.sleep(0.05)
17
 
 
18
 
19
- st.title("Talk with Mingzhe")
 
20
 
21
- # Initialize chat history
22
  if "messages" not in st.session_state:
23
  st.session_state.messages = []
24
 
25
- # Display chat messages from history on app rerun
26
  for message in st.session_state.messages:
27
  with st.chat_message(message["role"]):
28
  st.markdown(message["content"])
29
 
30
- # Accept user input
31
  if prompt := st.chat_input("What is up?"):
32
- # Add user message to chat history
33
  st.session_state.messages.append({"role": "user", "content": prompt})
34
- # Display user message in chat message container
35
  with st.chat_message("user"):
36
  st.markdown(prompt)
37
 
38
- # Display assistant response in chat message container
39
  with st.chat_message("assistant"):
40
- response = st.write_stream(response_generator())
41
- # Add assistant response to chat history
 
 
 
 
 
 
 
42
  st.session_state.messages.append({"role": "assistant", "content": response})
 
1
+ from openai import OpenAI
2
  import streamlit as st
 
 
3
 
4
+ st.title("ChatGPT-like clone")
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ client = OpenAI(api_key=st.secrets["OPENAI_API_KEY"])
7
 
8
+ if "openai_model" not in st.session_state:
9
+ st.session_state["openai_model"] = "gpt-3.5-turbo"
10
 
 
11
  if "messages" not in st.session_state:
12
  st.session_state.messages = []
13
 
 
14
  for message in st.session_state.messages:
15
  with st.chat_message(message["role"]):
16
  st.markdown(message["content"])
17
 
 
18
  if prompt := st.chat_input("What is up?"):
 
19
  st.session_state.messages.append({"role": "user", "content": prompt})
 
20
  with st.chat_message("user"):
21
  st.markdown(prompt)
22
 
 
23
  with st.chat_message("assistant"):
24
+ stream = client.chat.completions.create(
25
+ model=st.session_state["openai_model"],
26
+ messages=[
27
+ {"role": m["role"], "content": m["content"]}
28
+ for m in st.session_state.messages
29
+ ],
30
+ stream=True,
31
+ )
32
+ response = st.write_stream(stream)
33
  st.session_state.messages.append({"role": "assistant", "content": response})
components.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Du Mingzhe ([email protected])
2
+ # Date: 2024/03/09
3
+
4
+ from openai import OpenAI
5
+
6
+ class LLMClient():
7
+ def __init__(self, api_key, model_name="gpt-3.5-turbo") -> None:
8
+ super().__init__()
9
+ self.model_name = model_name
10
+ self.llm_client = OpenAI(api_key=api_key)
11
+
12
+ def prompt_list_generate(self, query, history, web_results, personal_results):
13
+ prompt_list = []
14
+ prompt_list += [
15
+ {"role": m["role"], "content": m["content"]} for m in history
16
+ ],
17
+ return prompt_list
18
+
19
+ def generate(self, query, history=None, web_results=None, personal_results=None):
20
+ prompt_list = self.prompt_list_generate(query, history, web_results, personal_results)
21
+
22
+ response = self.llm_client.chat.completions.create(
23
+ model = self.model_name,
24
+ messages = prompt_list
25
+ )
26
+ return response.choices[0].message.content
requirements.txt CHANGED
@@ -1 +1,4 @@
1
- streamlit-chat
 
 
 
 
1
+ openai
2
+ requests
3
+ streamlit
4
+ pinecone-client