sainathBelagavi commited on
Commit
3010e69
·
verified ·
1 Parent(s): ac15cea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -37
app.py CHANGED
@@ -1,83 +1,99 @@
1
  import streamlit as st
2
  from huggingface_hub import InferenceClient
3
  import os
 
 
4
 
5
- st.title("CODEFUSSION ☄")
6
 
7
  base_url = "https://api-inference.huggingface.co/models/"
8
  API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
9
 
10
  model_links = {
11
- "LegacyLift🚀": base_url + "mistralai/Mistral-7B-Instruct-v0.2",
12
- "ModernMigrate⭐": base_url + "mistralai/Mixtral-8x7B-Instruct-v0.1",
13
- "RetroRecode🔄": base_url + "microsoft/Phi-3-mini-4k-instruct"
14
  }
15
 
16
  model_info = {
17
  "LegacyLift🚀": {
18
- 'description': """The LegacyLift model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
19
- \nThis model is best for minimal problem-solving, content writing, and daily tips.\n""",
20
  'logo': './11.jpg'
21
  },
22
  "ModernMigrate⭐": {
23
- 'description': """The ModernMigrate model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
24
- \nThis model excels in coding, logical reasoning, and high-speed inference. \n""",
25
  'logo': './2.jpg'
26
  },
27
  "RetroRecode🔄": {
28
- 'description': """The RetroRecode model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
29
- \nThis model is best suited for critical development, practical knowledge, and serverless inference.\n""",
30
  'logo': './3.jpg'
31
  },
32
  }
33
 
34
- def format_prompt(message, conversation_history, custom_instructions=None):
35
  prompt = ""
36
  if custom_instructions:
37
- prompt += f"[INST] {custom_instructions} [/INST]"
38
 
39
- prompt += "[CONV_HISTORY]\n"
 
40
  for role, content in conversation_history:
41
  prompt += f"{role.upper()}: {content}\n"
42
- prompt += "[/CONV_HISTORY]"
 
 
 
43
 
44
- prompt += f"[INST] {message} [/INST]"
45
  return prompt
46
 
47
  def reset_conversation():
48
  '''
49
  Resets Conversation
50
  '''
 
51
  st.session_state.messages = []
52
- st.session_state.conversation_history = []
53
-
54
- if "messages" not in st.session_state:
55
- st.session_state.messages = []
56
- st.session_state.conversation_history = []
 
 
 
 
 
 
 
 
 
 
57
 
58
  models = [key for key in model_links.keys()]
59
-
60
  selected_model = st.sidebar.selectbox("Select Model", models)
61
- temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5)
62
- st.sidebar.button('Reset Chat', on_click=reset_conversation)
63
 
64
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
65
  st.sidebar.markdown(model_info[selected_model]['description'])
66
  st.sidebar.image(model_info[selected_model]['logo'])
67
- st.sidebar.markdown("*Generating the code might go slow if you are using low power resources *")
 
68
 
69
  if "prev_option" not in st.session_state:
70
  st.session_state.prev_option = selected_model
71
 
72
  if st.session_state.prev_option != selected_model:
73
  st.session_state.messages = []
74
- st.session_state.conversation_history = []
75
  st.session_state.prev_option = selected_model
76
 
77
- repo_id = model_links[selected_model]
78
 
 
79
  st.subheader(f'{selected_model}')
80
 
 
 
 
81
  for message in st.session_state.messages:
82
  with st.chat_message(message["role"]):
83
  st.markdown(message["content"])
@@ -88,15 +104,21 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"
88
  st.markdown(prompt)
89
 
90
  st.session_state.messages.append({"role": "user", "content": prompt})
91
- st.session_state.conversation_history.append(("user", prompt))
92
-
93
- formatted_text = format_prompt(prompt, st.session_state.conversation_history, custom_instruction)
94
-
95
- with st.chat_message("assistant"):
96
- client = InferenceClient(model=model_links[selected_model])
97
- output = client.text_generation(formatted_text, temperature=temp_values, max_new_tokens=3000, stream=True)
98
- response = "".join([chunk for chunk in output])
99
- st.markdown(response)
100
 
101
- st.session_state.messages.append({"role": "assistant", "content": response})
102
- st.session_state.conversation_history.append(("assistant", response))
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  from huggingface_hub import InferenceClient
3
  import os
4
+ import sys
5
+ import pickle
6
 
7
+ st.title("CODEFUSSION ☄")
8
 
9
  base_url = "https://api-inference.huggingface.co/models/"
10
  API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
11
 
12
  model_links = {
13
+ "LegacyLift🚀": base_url + "mistralai/Mistral-7B-Instruct-v0.2",
14
+ "ModernMigrate⭐": base_url + "mistralai/Mixtral-8x7B-Instruct-v0.1",
15
+ "RetroRecode🔄": base_url + "microsoft/Phi-3-mini-4k-instruct"
16
  }
17
 
18
  model_info = {
19
  "LegacyLift🚀": {
20
+ 'description': """The LegacyLift model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \n\nThis model is best for minimal problem-solving, content writing, and daily tips.\n""",
 
21
  'logo': './11.jpg'
22
  },
23
  "ModernMigrate⭐": {
24
+ 'description': """The ModernMigrate model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \n\nThis model excels in coding, logical reasoning, and high-speed inference. \n""",
 
25
  'logo': './2.jpg'
26
  },
27
  "RetroRecode🔄": {
28
+ 'description': """The RetroRecode model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \n\nThis model is best suited for critical development, practical knowledge, and serverless inference.\n""",
 
29
  'logo': './3.jpg'
30
  },
31
  }
32
 
33
+ def format_promt(message, conversation_history, custom_instructions=None):
34
  prompt = ""
35
  if custom_instructions:
36
+ prompt += f"\[INST\] {custom_instructions} \[/INST\]"
37
 
38
+ # Add conversation history to the prompt
39
+ prompt += "\[CONV_HISTORY\]\n"
40
  for role, content in conversation_history:
41
  prompt += f"{role.upper()}: {content}\n"
42
+ prompt += "\[/CONV_HISTORY\]"
43
+
44
+ # Add the current message
45
+ prompt += f"\[INST\] {message} \[/INST\]"
46
 
 
47
  return prompt
48
 
49
  def reset_conversation():
50
  '''
51
  Resets Conversation
52
  '''
53
+ st.session_state.conversation = []
54
  st.session_state.messages = []
55
+ return None
56
+
57
+ def load_conversation_history():
58
+ history_file = "conversation_history.pickle"
59
+ if os.path.exists(history_file):
60
+ with open(history_file, "rb") as f:
61
+ conversation_history = pickle.load(f)
62
+ else:
63
+ conversation_history = []
64
+ return conversation_history
65
+
66
+ def save_conversation_history(conversation_history):
67
+ history_file = "conversation_history.pickle"
68
+ with open(history_file, "wb") as f:
69
+ pickle.dump(conversation_history, f)
70
 
71
  models = [key for key in model_links.keys()]
 
72
  selected_model = st.sidebar.selectbox("Select Model", models)
73
+ temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
74
+ st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
75
 
76
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
77
  st.sidebar.markdown(model_info[selected_model]['description'])
78
  st.sidebar.image(model_info[selected_model]['logo'])
79
+
80
+ st.sidebar.markdown("\*Generating the code might go slow if you are using low power resources \*")
81
 
82
  if "prev_option" not in st.session_state:
83
  st.session_state.prev_option = selected_model
84
 
85
  if st.session_state.prev_option != selected_model:
86
  st.session_state.messages = []
 
87
  st.session_state.prev_option = selected_model
88
 
89
+ reset_conversation()
90
 
91
+ repo_id = model_links[selected_model]
92
  st.subheader(f'{selected_model}')
93
 
94
+ # Load the conversation history from the file
95
+ st.session_state.messages = load_conversation_history()
96
+
97
  for message in st.session_state.messages:
98
  with st.chat_message(message["role"]):
99
  st.markdown(message["content"])
 
104
  st.markdown(prompt)
105
 
106
  st.session_state.messages.append({"role": "user", "content": prompt})
107
+ conversation_history = [(message["role"], message["content"]) for message in st.session_state.messages]
108
+
109
+ formated_text = format_promt(prompt, conversation_history, custom_instruction)
 
 
 
 
 
 
110
 
111
+ with st.chat_message("assistant"):
112
+ client = InferenceClient(
113
+ model=model_links[selected_model], )
114
+ output = client.text_generation(
115
+ formated_text,
116
+ temperature=temp_values, # 0.5
117
+ max_new_tokens=3000,
118
+ stream=True
119
+ )
120
+ response = st.write_stream(output)
121
+ st.session_state.messages.append({"role": "assistant", "content": response})
122
+
123
+ # Save the updated conversation history to the file
124
+ save_conversation_history(st.session_state.messages)