Spaces:
Sleeping
Sleeping
justinxzhao
commited on
Commit
·
3703473
1
Parent(s):
c0a5a18
Fixed aggregator prompt.
Browse files- .gitignore +2 -1
- app.py +22 -5
.gitignore
CHANGED
@@ -1 +1,2 @@
|
|
1 |
-
env/
|
|
|
|
1 |
+
env/
|
2 |
+
client_secret.json
|
app.py
CHANGED
@@ -54,7 +54,7 @@ PROVIDER_TO_AVATAR_MAP = {
|
|
54 |
"anthropic://claude-3-haiku-20240307": "data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxZW0iIGhlaWdodD0iMWVtIiB2aWV3Qm94PSIwIDAgMjQgMjQiPjxwYXRoIGZpbGw9ImN1cnJlbnRDb2xvciIgZD0iTTE3LjMwNCAzLjU0MWgtMy42NzJsNi42OTYgMTYuOTE4SDI0Wm0tMTAuNjA4IDBMMCAyMC40NTloMy43NDRsMS4zNy0zLjU1M2g3LjAwNWwxLjM2OSAzLjU1M2gzLjc0NEwxMC41MzYgMy41NDFabS0uMzcxIDEwLjIyM0w4LjYxNiA3LjgybDIuMjkxIDUuOTQ1WiIvPjwvc3ZnPg==",
|
55 |
}
|
56 |
|
57 |
-
AGGREGATORS = ["openai://gpt-
|
58 |
|
59 |
|
60 |
def anthropic_streamlit_streamer(stream):
|
@@ -147,6 +147,10 @@ def get_llm_response(model_identifier, prompt):
|
|
147 |
return None
|
148 |
|
149 |
|
|
|
|
|
|
|
|
|
150 |
# Main Streamlit App
|
151 |
def main():
|
152 |
st.set_page_config(
|
@@ -226,22 +230,30 @@ def main():
|
|
226 |
stream = google_streamlit_streamer(stream)
|
227 |
elif model.startswith("together"):
|
228 |
stream = together_streamlit_streamer(stream)
|
229 |
-
|
|
|
|
|
230 |
|
231 |
# Constructing the aggregator prompt
|
232 |
aggregator_prompt = f"User prompt: {prompt}\n\n"
|
233 |
-
aggregator_prompt += "Responses from other LLMs:\n"
|
234 |
aggregator_prompt += "\n".join(
|
235 |
[
|
236 |
-
f"{model}: {st.session_state.get(model
|
237 |
for model in selected_models
|
238 |
]
|
239 |
)
|
240 |
aggregator_prompt += "\n\nPlease provide an aggregated response."
|
241 |
|
|
|
|
|
|
|
242 |
# Fetching and streaming response from the aggregator
|
243 |
st.write(f"Aggregated response from {selected_aggregator}:")
|
244 |
-
with st.chat_message(
|
|
|
|
|
|
|
245 |
message_placeholder = st.empty()
|
246 |
aggregator_stream = get_llm_response(
|
247 |
selected_aggregator, aggregator_prompt
|
@@ -257,6 +269,11 @@ if __name__ == "__main__":
|
|
257 |
main()
|
258 |
|
259 |
|
|
|
|
|
|
|
|
|
|
|
260 |
# import streamlit as st
|
261 |
# from components import llm_council_selector
|
262 |
|
|
|
54 |
"anthropic://claude-3-haiku-20240307": "data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIxZW0iIGhlaWdodD0iMWVtIiB2aWV3Qm94PSIwIDAgMjQgMjQiPjxwYXRoIGZpbGw9ImN1cnJlbnRDb2xvciIgZD0iTTE3LjMwNCAzLjU0MWgtMy42NzJsNi42OTYgMTYuOTE4SDI0Wm0tMTAuNjA4IDBMMCAyMC40NTloMy43NDRsMS4zNy0zLjU1M2g3LjAwNWwxLjM2OSAzLjU1M2gzLjc0NEwxMC41MzYgMy41NDFabS0uMzcxIDEwLjIyM0w4LjYxNiA3LjgybDIuMjkxIDUuOTQ1WiIvPjwvc3ZnPg==",
|
55 |
}
|
56 |
|
57 |
+
AGGREGATORS = ["openai://gpt-4o-mini", "openai://gpt-4o"]
|
58 |
|
59 |
|
60 |
def anthropic_streamlit_streamer(stream):
|
|
|
147 |
return None
|
148 |
|
149 |
|
150 |
+
def get_response_key(model):
|
151 |
+
return model + ".response"
|
152 |
+
|
153 |
+
|
154 |
# Main Streamlit App
|
155 |
def main():
|
156 |
st.set_page_config(
|
|
|
230 |
stream = google_streamlit_streamer(stream)
|
231 |
elif model.startswith("together"):
|
232 |
stream = together_streamlit_streamer(stream)
|
233 |
+
st.session_state[get_response_key(model)] = (
|
234 |
+
message_placeholder.write_stream(stream)
|
235 |
+
)
|
236 |
|
237 |
# Constructing the aggregator prompt
|
238 |
aggregator_prompt = f"User prompt: {prompt}\n\n"
|
239 |
+
aggregator_prompt += "Responses from other LLMs:\n\n"
|
240 |
aggregator_prompt += "\n".join(
|
241 |
[
|
242 |
+
f"{model}: {st.session_state.get(get_response_key(model))} \n\n"
|
243 |
for model in selected_models
|
244 |
]
|
245 |
)
|
246 |
aggregator_prompt += "\n\nPlease provide an aggregated response."
|
247 |
|
248 |
+
with st.expander("Aggregator Prompt"):
|
249 |
+
st.write(aggregator_prompt)
|
250 |
+
|
251 |
# Fetching and streaming response from the aggregator
|
252 |
st.write(f"Aggregated response from {selected_aggregator}:")
|
253 |
+
with st.chat_message(
|
254 |
+
selected_aggregator,
|
255 |
+
avatar=PROVIDER_TO_AVATAR_MAP[selected_aggregator],
|
256 |
+
):
|
257 |
message_placeholder = st.empty()
|
258 |
aggregator_stream = get_llm_response(
|
259 |
selected_aggregator, aggregator_prompt
|
|
|
269 |
main()
|
270 |
|
271 |
|
272 |
+
# Fix the aggregator step.
|
273 |
+
# Add a judging step.
|
274 |
+
# Add visualizations.
|
275 |
+
|
276 |
+
|
277 |
# import streamlit as st
|
278 |
# from components import llm_council_selector
|
279 |
|