sainathBelagavi commited on
Commit
ac15cea
·
verified ·
1 Parent(s): 1c2e9be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -36
app.py CHANGED
@@ -1,15 +1,11 @@
1
  import streamlit as st
2
  from huggingface_hub import InferenceClient
3
  import os
4
- import sys
5
 
6
  st.title("CODEFUSSION ☄")
7
 
8
  base_url = "https://api-inference.huggingface.co/models/"
9
-
10
  API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
11
- # print(API_KEY)
12
- # headers = {"Authorization":"Bearer "+API_KEY}
13
 
14
  model_links = {
15
  "LegacyLift🚀": base_url + "mistralai/Mistral-7B-Instruct-v0.2",
@@ -17,20 +13,17 @@ model_links = {
17
  "RetroRecode🔄": base_url + "microsoft/Phi-3-mini-4k-instruct"
18
  }
19
 
20
- # Pull info about the model to display
21
  model_info = {
22
  "LegacyLift🚀": {
23
  'description': """The LegacyLift model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
24
  \nThis model is best for minimal problem-solving, content writing, and daily tips.\n""",
25
  'logo': './11.jpg'
26
  },
27
-
28
  "ModernMigrate⭐": {
29
  'description': """The ModernMigrate model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
30
  \nThis model excels in coding, logical reasoning, and high-speed inference. \n""",
31
  'logo': './2.jpg'
32
  },
33
-
34
  "RetroRecode🔄": {
35
  'description': """The RetroRecode model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
36
  \nThis model is best suited for critical development, practical knowledge, and serverless inference.\n""",
@@ -38,10 +31,16 @@ model_info = {
38
  },
39
  }
40
 
41
- def format_promt(message, custom_instructions=None):
42
  prompt = ""
43
  if custom_instructions:
44
  prompt += f"[INST] {custom_instructions} [/INST]"
 
 
 
 
 
 
45
  prompt += f"[INST] {message} [/INST]"
46
  return prompt
47
 
@@ -49,40 +48,35 @@ def reset_conversation():
49
  '''
50
  Resets Conversation
51
  '''
52
- st.session_state.conversation = []
53
  st.session_state.messages = []
54
- return None
 
 
 
 
55
 
56
  models = [key for key in model_links.keys()]
57
 
58
  selected_model = st.sidebar.selectbox("Select Model", models)
59
-
60
- temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
61
-
62
- st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
63
 
64
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
65
  st.sidebar.markdown(model_info[selected_model]['description'])
66
  st.sidebar.image(model_info[selected_model]['logo'])
67
  st.sidebar.markdown("*Generating the code might go slow if you are using low power resources *")
68
 
69
-
70
  if "prev_option" not in st.session_state:
71
  st.session_state.prev_option = selected_model
72
 
73
  if st.session_state.prev_option != selected_model:
74
  st.session_state.messages = []
75
- # st.write(f"Changed to {selected_model}")
76
  st.session_state.prev_option = selected_model
77
- reset_conversation()
78
 
79
  repo_id = model_links[selected_model]
80
 
81
  st.subheader(f'{selected_model}')
82
- # st.title(f'ChatBot Using {selected_model}')
83
-
84
- if "messages" not in st.session_state:
85
- st.session_state.messages = []
86
 
87
  for message in st.session_state.messages:
88
  with st.chat_message(message["role"]):
@@ -90,24 +84,19 @@ for message in st.session_state.messages:
90
 
91
  if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
92
  custom_instruction = "Act like a Human in conversation"
93
-
94
  with st.chat_message("user"):
95
  st.markdown(prompt)
96
-
97
  st.session_state.messages.append({"role": "user", "content": prompt})
 
98
 
99
- formated_text = format_promt(prompt, custom_instruction)
100
 
101
  with st.chat_message("assistant"):
102
- client = InferenceClient(
103
- model=model_links[selected_model], )
104
-
105
- output = client.text_generation(
106
- formated_text,
107
- temperature=temp_values, # 0.5
108
- max_new_tokens=3000,
109
- stream=True
110
- )
111
-
112
- response = st.write_stream(output)
113
- st.session_state.messages.append({"role": "assistant", "content": response})
 
1
  import streamlit as st
2
  from huggingface_hub import InferenceClient
3
  import os
 
4
 
5
  st.title("CODEFUSSION ☄")
6
 
7
  base_url = "https://api-inference.huggingface.co/models/"
 
8
  API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
 
 
9
 
10
  model_links = {
11
  "LegacyLift🚀": base_url + "mistralai/Mistral-7B-Instruct-v0.2",
 
13
  "RetroRecode🔄": base_url + "microsoft/Phi-3-mini-4k-instruct"
14
  }
15
 
 
16
  model_info = {
17
  "LegacyLift🚀": {
18
  'description': """The LegacyLift model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
19
  \nThis model is best for minimal problem-solving, content writing, and daily tips.\n""",
20
  'logo': './11.jpg'
21
  },
 
22
  "ModernMigrate⭐": {
23
  'description': """The ModernMigrate model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
24
  \nThis model excels in coding, logical reasoning, and high-speed inference. \n""",
25
  'logo': './2.jpg'
26
  },
 
27
  "RetroRecode🔄": {
28
  'description': """The RetroRecode model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
29
  \nThis model is best suited for critical development, practical knowledge, and serverless inference.\n""",
 
31
  },
32
  }
33
 
34
+ def format_prompt(message, conversation_history, custom_instructions=None):
35
  prompt = ""
36
  if custom_instructions:
37
  prompt += f"[INST] {custom_instructions} [/INST]"
38
+
39
+ prompt += "[CONV_HISTORY]\n"
40
+ for role, content in conversation_history:
41
+ prompt += f"{role.upper()}: {content}\n"
42
+ prompt += "[/CONV_HISTORY]"
43
+
44
  prompt += f"[INST] {message} [/INST]"
45
  return prompt
46
 
 
48
  '''
49
  Resets Conversation
50
  '''
 
51
  st.session_state.messages = []
52
+ st.session_state.conversation_history = []
53
+
54
+ if "messages" not in st.session_state:
55
+ st.session_state.messages = []
56
+ st.session_state.conversation_history = []
57
 
58
  models = [key for key in model_links.keys()]
59
 
60
  selected_model = st.sidebar.selectbox("Select Model", models)
61
+ temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5)
62
+ st.sidebar.button('Reset Chat', on_click=reset_conversation)
 
 
63
 
64
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
65
  st.sidebar.markdown(model_info[selected_model]['description'])
66
  st.sidebar.image(model_info[selected_model]['logo'])
67
  st.sidebar.markdown("*Generating the code might go slow if you are using low power resources *")
68
 
 
69
  if "prev_option" not in st.session_state:
70
  st.session_state.prev_option = selected_model
71
 
72
  if st.session_state.prev_option != selected_model:
73
  st.session_state.messages = []
74
+ st.session_state.conversation_history = []
75
  st.session_state.prev_option = selected_model
 
76
 
77
  repo_id = model_links[selected_model]
78
 
79
  st.subheader(f'{selected_model}')
 
 
 
 
80
 
81
  for message in st.session_state.messages:
82
  with st.chat_message(message["role"]):
 
84
 
85
  if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
86
  custom_instruction = "Act like a Human in conversation"
 
87
  with st.chat_message("user"):
88
  st.markdown(prompt)
89
+
90
  st.session_state.messages.append({"role": "user", "content": prompt})
91
+ st.session_state.conversation_history.append(("user", prompt))
92
 
93
+ formatted_text = format_prompt(prompt, st.session_state.conversation_history, custom_instruction)
94
 
95
  with st.chat_message("assistant"):
96
+ client = InferenceClient(model=model_links[selected_model])
97
+ output = client.text_generation(formatted_text, temperature=temp_values, max_new_tokens=3000, stream=True)
98
+ response = "".join([chunk for chunk in output])
99
+ st.markdown(response)
100
+
101
+ st.session_state.messages.append({"role": "assistant", "content": response})
102
+ st.session_state.conversation_history.append(("assistant", response))