Alvarez commited on
Commit
71c6d3b
2 Parent(s): 9e3047b 94e6643

Merge branch 'main' of https://huggingface.co/spaces/Intel/powered_by_intel_llm_leaderboard

Browse files
Files changed (1) hide show
  1. app.py +58 -64
app.py CHANGED
@@ -3,12 +3,6 @@ import requests
3
  import os
4
 
5
  import gradio
6
-
7
- # work around due to HF Spaces bug
8
- #if gradio.__version__ != '4.16.0':
9
- # os.system("pip uninstall -y gradio")
10
- # os.system("pip install gradio==4.16.0")
11
-
12
  import gradio as gr
13
 
14
  from info.train_a_model import (
@@ -77,62 +71,62 @@ with demo:
77
  except Exception as e:
78
  return f"❌Failed to submit due to an error: {str(e)}"
79
 
80
- with gr.Accordion("Chat with Top Models on the Leaderboard Here 💬", open=False):
81
-
82
- chat_model_dropdown = gr.Dropdown(
83
- choices=VALIDATED_CHAT_MODELS,
84
- label="Select a leaderboard model to chat with. ",
85
- multiselect=False,
86
- value=VALIDATED_CHAT_MODELS[0],
87
- interactive=True,
88
- )
89
-
90
- #chat_model_selection = chat_model_dropdown.value
91
- chat_model_selection = 'yuriachermann/My_AGI_llama_2_7B'
92
-
93
- def call_api_and_stream_response(query, chat_model):
94
- """
95
- Call the API endpoint and yield characters as they are received.
96
- This function simulates streaming by yielding characters one by one.
97
- """
98
- url = inference_endpoint_url
99
- params = {"query": query, "selected_model": chat_model}
100
- with requests.get(url, json=params, stream=True) as r: # Use params for query parameters
101
- for chunk in r.iter_content(chunk_size=1):
102
- if chunk:
103
- yield chunk.decode()
104
-
105
- def get_response(query, history):
106
- """
107
- Wrapper function to call the streaming API and compile the response.
108
- """
109
- response = ''
110
- for char in call_api_and_stream_response(query, chat_model=chat_model_selection):
111
- if char == '<': # This is stopping condition; adjust as needed.
112
- break
113
- response += char
114
- yield [(f"🤖 Response from LLM: {chat_model_selection}", response)] # Correct format for Gradio Chatbot
115
  #
116
-
117
- chatbot = gr.Chatbot()
118
- msg = gr.Textbox()
119
- submit = gr.Button("Submit")
120
- clear = gr.Button("Clear")
121
- def user(user_message, history):
122
- return "", history + [[user_message, None]]
123
- def clear_chat(*args):
124
- return [] # Returning an empty list to signify clearing the chat, adjust as per Gradio's capabilities
125
- submit.click(
126
- fn=get_response,
127
- inputs=[msg, chatbot],
128
- outputs=chatbot
129
- )
130
- clear.click(
131
- fn=clear_chat,
132
- inputs=None,
133
- outputs=chatbot
134
- )
135
-
 
 
 
 
 
 
 
 
 
 
 
136
 
137
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
138
  with gr.TabItem("🏆 LLM Leaderboard", elem_id="llm-benchmark-table", id=0):
@@ -152,10 +146,10 @@ with demo:
152
  value=["No Affiliation","Intel Innovator","Student Ambassador","Intel Liftoff", "Intel Engineering", "Other"])
153
 
154
  with gr.Column():
155
- filter_size = gr.CheckboxGroup(choices=[1,2,3,5,7,13,35,60,70,100],
156
  label="Model Sizes (Billion of Parameters)",
157
  elem_id="parameter_size",
158
- value=[1,2,3,5,7,13,35,60,70,100])
159
  filter_precision = gr.CheckboxGroup(choices=["fp32","fp16","bf16","int8","fp8", "int4"],
160
  label="Model Precision",
161
  elem_id="precision",
@@ -187,7 +181,7 @@ with demo:
187
  initial_filtered_df = update_df(["Gaudi","Xeon","GPU Max","Arc GPU","Core Ultra"],
188
  ["Intel Developer Cloud","AWS","Azure","Google Cloud Platform","Local"],
189
  ["No Affiliation","Intel Innovator","Student Ambassador","Intel Liftoff", "Intel Engineering", "Other"],
190
- [1,2,3,5,7,13,35,60,70,100],
191
  ["fp32","fp16","bf16","int8","fp8", "int4"],
192
  ["pretrained","fine-tuned","chat-models","merges/moerges"])
193
 
 
3
  import os
4
 
5
  import gradio
 
 
 
 
 
 
6
  import gradio as gr
7
 
8
  from info.train_a_model import (
 
71
  except Exception as e:
72
  return f"❌Failed to submit due to an error: {str(e)}"
73
 
74
+ #with gr.Accordion("Chat with Top Models on the Leaderboard Here 💬", open=False):
75
+ #
76
+ # chat_model_dropdown = gr.Dropdown(
77
+ # choices=VALIDATED_CHAT_MODELS,
78
+ # label="Select a leaderboard model to chat with. ",
79
+ # multiselect=False,
80
+ # value=VALIDATED_CHAT_MODELS[0],
81
+ # interactive=True,
82
+ # )
83
+ #
84
+ # #chat_model_selection = chat_model_dropdown.value
85
+ # chat_model_selection = 'yuriachermann/My_AGI_llama_2_7B'
86
+ #
87
+ # def call_api_and_stream_response(query, chat_model):
88
+ # """
89
+ # Call the API endpoint and yield characters as they are received.
90
+ # This function simulates streaming by yielding characters one by one.
91
+ # """
92
+ # url = inference_endpoint_url
93
+ # params = {"query": query, "selected_model": chat_model}
94
+ # with requests.get(url, json=params, stream=True) as r: # Use params for query parameters
95
+ # for chunk in r.iter_content(chunk_size=1):
96
+ # if chunk:
97
+ # yield chunk.decode()
 
 
 
 
 
 
 
 
 
 
 
98
  #
99
+ # def get_response(query, history):
100
+ # """
101
+ # Wrapper function to call the streaming API and compile the response.
102
+ # """
103
+ # response = ''
104
+ # for char in call_api_and_stream_response(query, chat_model=chat_model_selection):
105
+ # if char == '<': # This is stopping condition; adjust as needed.
106
+ # break
107
+ # response += char
108
+ # yield [(f"🤖 Response from LLM: {chat_model_selection}", response)] # Correct format for Gradio Chatbot
109
+ ##
110
+ #
111
+ # chatbot = gr.Chatbot()
112
+ # msg = gr.Textbox()
113
+ # submit = gr.Button("Submit")
114
+ # clear = gr.Button("Clear")
115
+ # def user(user_message, history):
116
+ # return "", history + [[user_message, None]]
117
+ # def clear_chat(*args):
118
+ # return [] # Returning an empty list to signify clearing the chat, adjust as per Gradio's capabilities
119
+ # submit.click(
120
+ # fn=get_response,
121
+ # inputs=[msg, chatbot],
122
+ # outputs=chatbot
123
+ # )
124
+ # clear.click(
125
+ # fn=clear_chat,
126
+ # inputs=None,
127
+ # outputs=chatbot
128
+ # )
129
+ #
130
 
131
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
132
  with gr.TabItem("🏆 LLM Leaderboard", elem_id="llm-benchmark-table", id=0):
 
146
  value=["No Affiliation","Intel Innovator","Student Ambassador","Intel Liftoff", "Intel Engineering", "Other"])
147
 
148
  with gr.Column():
149
+ filter_size = gr.CheckboxGroup(choices=[1,2,3,5,7,8,13,35,60,70,100],
150
  label="Model Sizes (Billion of Parameters)",
151
  elem_id="parameter_size",
152
+ value=[1,2,3,5,7,8,13,35,60,70,100])
153
  filter_precision = gr.CheckboxGroup(choices=["fp32","fp16","bf16","int8","fp8", "int4"],
154
  label="Model Precision",
155
  elem_id="precision",
 
181
  initial_filtered_df = update_df(["Gaudi","Xeon","GPU Max","Arc GPU","Core Ultra"],
182
  ["Intel Developer Cloud","AWS","Azure","Google Cloud Platform","Local"],
183
  ["No Affiliation","Intel Innovator","Student Ambassador","Intel Liftoff", "Intel Engineering", "Other"],
184
+ [1,2,3,5,7,8,13,35,60,70,100],
185
  ["fp32","fp16","bf16","int8","fp8", "int4"],
186
  ["pretrained","fine-tuned","chat-models","merges/moerges"])
187