mrfakename commited on
Commit
5d5cc81
·
1 Parent(s): ac443d1

* New leaderboard styling with custom table (instead of Gradio dataframe)

Browse files
Files changed (7) hide show
  1. .gitignore +176 -0
  2. README.md +6 -1
  3. app/config.py +7 -1
  4. app/leaderboard.py +225 -12
  5. app/models.py +30 -0
  6. app/ui.py +26 -1
  7. app/ui_leaderboard.py +45 -7
.gitignore ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116
+ .pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
169
+
170
+ # Ruff stuff:
171
+ .ruff_cache/
172
+
173
+ # PyPI configuration file
174
+ .pypirc
175
+
176
+ *.db
README.md CHANGED
@@ -13,4 +13,9 @@ header: mini
13
  sdk_version: 5.1.0
14
  short_description: Vote on the latest TTS models!
15
  ---
16
- # TTS Arena
 
 
 
 
 
 
13
  sdk_version: 5.1.0
14
  short_description: Vote on the latest TTS models!
15
  ---
16
+
17
+ # TTS Arena
18
+
19
+ The codebase for TTS Arena v2.
20
+
21
+ The TTS Arena is a Gradio app with several components. Please refer to the `app` directory for more information.
app/config.py CHANGED
@@ -1,5 +1,10 @@
1
  import os
2
 
 
 
 
 
 
3
  # NOTE: Configure models in `models.py`
4
 
5
  #########################
@@ -17,7 +22,8 @@ DB_PATH = f"/data/{DB_NAME}" if os.path.isdir("/data") else DB_NAME # If /data a
17
 
18
  ROUTER_ID = "TTS-AGI/tts-router" # You should use a router space to route TTS models to avoid exposing your API keys!
19
 
20
- SYNC_DB = True # Sync DB to HF dataset?
 
21
  DB_DATASET_ID = os.getenv('DATASET_ID') # HF dataset ID, can be None if not syncing
22
 
23
  SPACE_ID = os.getenv('SPACE_ID') # Don't change this! It detects if we're running in a HF Space
 
1
  import os
2
 
3
+ RUNNING_LOCALLY = os.getenv('RUNNING_LOCALLY', '0').lower() in ('true', '1', 't')
4
+ if RUNNING_LOCALLY:
5
+ print("Running locally, not syncing DB to HF dataset")
6
+ else:
7
+ print("Running in HF Space, syncing DB to HF dataset")
8
  # NOTE: Configure models in `models.py`
9
 
10
  #########################
 
22
 
23
  ROUTER_ID = "TTS-AGI/tts-router" # You should use a router space to route TTS models to avoid exposing your API keys!
24
 
25
+ SYNC_DB = not RUNNING_LOCALLY # Sync DB to HF dataset?
26
+
27
  DB_DATASET_ID = os.getenv('DATASET_ID') # HF dataset ID, can be None if not syncing
28
 
29
  SPACE_ID = os.getenv('SPACE_ID') # Don't change this! It detects if we're running in a HF Space
app/leaderboard.py CHANGED
@@ -3,7 +3,7 @@ from .db import *
3
  from .models import *
4
 
5
  import pandas as pd
6
- def get_leaderboard(reveal_prelim = False, hide_battle_votes = False):
7
  conn = get_db()
8
  cursor = conn.cursor()
9
 
@@ -30,30 +30,243 @@ def get_leaderboard(reveal_prelim = False, hide_battle_votes = False):
30
  data = cursor.fetchall()
31
  df = pd.DataFrame(data, columns=['name', 'upvote', 'downvote'])
32
  df['name'] = df['name'].replace(model_names).replace('Anonymous Sparkle', 'Fish Speech v1.5')
 
 
33
  df['votes'] = df['upvote'] + df['downvote']
 
 
 
 
34
 
35
  # Filter out rows with insufficient votes if not revealing preliminary results
36
  if not reveal_prelim:
37
  df = df[df['votes'] > 500]
38
 
39
- ## ELO SCORE
40
- df['score'] = 1200
41
  for i in range(len(df)):
42
  for j in range(len(df)):
43
  if i != j:
44
  try:
45
- expected_a = 1 / (1 + 10 ** ((df['score'].iloc[j] - df['score'].iloc[i]) / 400))
46
- expected_b = 1 / (1 + 10 ** ((df['score'].iloc[i] - df['score'].iloc[j]) / 400))
47
  actual_a = df['upvote'].iloc[i] / df['votes'].iloc[i] if df['votes'].iloc[i] > 0 else 0.5
48
  actual_b = df['upvote'].iloc[j] / df['votes'].iloc[j] if df['votes'].iloc[j] > 0 else 0.5
49
- df.iloc[i, df.columns.get_loc('score')] += 32 * (actual_a - expected_a)
50
- df.iloc[j, df.columns.get_loc('score')] += 32 * (actual_b - expected_b)
51
  except Exception as e:
52
  print(f"Error in ELO calculation for rows {i} and {j}: {str(e)}")
53
  continue
54
- df['score'] = round(df['score'])
55
- ## ELO SCORE
56
- df = df.sort_values(by='score', ascending=False)
 
 
57
  df['order'] = ['#' + str(i + 1) for i in range(len(df))]
58
- df = df[['order', 'name', 'score', 'votes']]
59
- return df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  from .models import *
4
 
5
  import pandas as pd
6
+ def get_leaderboard(reveal_prelim = False, hide_battle_votes = False, sort_by_elo = False, hide_proprietary = False):
7
  conn = get_db()
8
  cursor = conn.cursor()
9
 
 
30
  data = cursor.fetchall()
31
  df = pd.DataFrame(data, columns=['name', 'upvote', 'downvote'])
32
  df['name'] = df['name'].replace(model_names).replace('Anonymous Sparkle', 'Fish Speech v1.5')
33
+
34
+ # Calculate total votes and win rate
35
  df['votes'] = df['upvote'] + df['downvote']
36
+ df['win_rate'] = (df['upvote'] / df['votes'] * 100).round(1)
37
+
38
+ # Remove models with no votes
39
+ df = df[df['votes'] > 0]
40
 
41
  # Filter out rows with insufficient votes if not revealing preliminary results
42
  if not reveal_prelim:
43
  df = df[df['votes'] > 500]
44
 
45
+ ## Calculate ELO SCORE (kept as secondary metric)
46
+ df['elo'] = 1200
47
  for i in range(len(df)):
48
  for j in range(len(df)):
49
  if i != j:
50
  try:
51
+ expected_a = 1 / (1 + 10 ** ((df['elo'].iloc[j] - df['elo'].iloc[i]) / 400))
52
+ expected_b = 1 / (1 + 10 ** ((df['elo'].iloc[i] - df['elo'].iloc[j]) / 400))
53
  actual_a = df['upvote'].iloc[i] / df['votes'].iloc[i] if df['votes'].iloc[i] > 0 else 0.5
54
  actual_b = df['upvote'].iloc[j] / df['votes'].iloc[j] if df['votes'].iloc[j] > 0 else 0.5
55
+ df.iloc[i, df.columns.get_loc('elo')] += 32 * (actual_a - expected_a)
56
+ df.iloc[j, df.columns.get_loc('elo')] += 32 * (actual_b - expected_b)
57
  except Exception as e:
58
  print(f"Error in ELO calculation for rows {i} and {j}: {str(e)}")
59
  continue
60
+ df['elo'] = round(df['elo'])
61
+
62
+ # Sort based on user preference
63
+ sort_column = 'elo' if sort_by_elo else 'win_rate'
64
+ df = df.sort_values(by=sort_column, ascending=False)
65
  df['order'] = ['#' + str(i + 1) for i in range(len(df))]
66
+
67
+ # Select and order columns for display
68
+ df = df[['order', 'name', 'win_rate', 'votes', 'elo']]
69
+
70
+ # Remove proprietary models if filter is enabled
71
+ if hide_proprietary:
72
+ df = df[~df['name'].isin(closed_source)]
73
+
74
+ # Convert DataFrame to markdown table with CSS styling
75
+ markdown_table = """
76
+ <style>
77
+ /* Reset any Gradio table styles */
78
+ .leaderboard-table,
79
+ .leaderboard-table th,
80
+ .leaderboard-table td {
81
+ border: none !important;
82
+ border-collapse: separate !important;
83
+ border-spacing: 0 !important;
84
+ }
85
+
86
+ .leaderboard-container {
87
+ background: var(--background-fill-primary);
88
+ border: 1px solid var(--border-color-primary);
89
+ border-radius: 12px;
90
+ padding: 4px;
91
+ margin: 10px 0;
92
+ width: 100%;
93
+ overflow-x: auto; /* Enable horizontal scroll */
94
+ }
95
+
96
+ .leaderboard-scroll {
97
+ max-height: 600px;
98
+ overflow-y: auto;
99
+ border-radius: 8px;
100
+ }
101
+
102
+ .leaderboard-table {
103
+ width: 100%;
104
+ border-spacing: 0;
105
+ border-collapse: separate;
106
+ font-size: 15px;
107
+ line-height: 1.5;
108
+ table-layout: auto; /* Allow flexible column widths */
109
+ }
110
+
111
+ .leaderboard-table th {
112
+ background: var(--background-fill-secondary);
113
+ color: var(--body-text-color);
114
+ font-weight: 600;
115
+ text-align: left;
116
+ padding: 12px 16px;
117
+ position: sticky;
118
+ top: 0;
119
+ z-index: 1;
120
+ }
121
+
122
+ .leaderboard-table th:after {
123
+ content: '';
124
+ position: absolute;
125
+ left: 0;
126
+ bottom: 0;
127
+ width: 100%;
128
+ border-bottom: 1px solid var(--border-color-primary);
129
+ }
130
+
131
+ .leaderboard-table td {
132
+ padding: 12px 16px;
133
+ color: var(--body-text-color);
134
+ }
135
+
136
+ .leaderboard-table tr td {
137
+ border-bottom: 1px solid var(--border-color-primary);
138
+ }
139
+
140
+ .leaderboard-table tr:last-child td {
141
+ border-bottom: none;
142
+ }
143
+
144
+ .leaderboard-table tr:hover td {
145
+ background: var(--background-fill-secondary);
146
+ }
147
+
148
+ /* Column-specific styles */
149
+ .leaderboard-table .col-rank {
150
+ width: 70px;
151
+ min-width: 70px; /* Prevent rank from shrinking */
152
+ }
153
+
154
+ .leaderboard-table .col-model {
155
+ min-width: 200px; /* Minimum width before scrolling */
156
+ }
157
+
158
+ .leaderboard-table .col-winrate {
159
+ width: 100px;
160
+ min-width: 100px; /* Prevent win rate from shrinking */
161
+ }
162
+
163
+ .leaderboard-table .col-votes {
164
+ width: 100px;
165
+ min-width: 100px; /* Prevent votes from shrinking */
166
+ }
167
+
168
+ .leaderboard-table .col-arena {
169
+ width: 100px;
170
+ min-width: 100px; /* Prevent arena score from shrinking */
171
+ }
172
+
173
+ .win-rate {
174
+ display: inline-block;
175
+ font-weight: 600;
176
+ padding: 4px 8px;
177
+ border-radius: 6px;
178
+ min-width: 65px;
179
+ text-align: center;
180
+ }
181
+
182
+ .win-rate-excellent {
183
+ background-color: var(--color-accent);
184
+ color: var(--color-accent-foreground);
185
+ }
186
+
187
+ .win-rate-good {
188
+ background-color: var(--color-accent-soft);
189
+ color: var(--body-text-color);
190
+ }
191
+
192
+ .win-rate-average {
193
+ background-color: var(--background-fill-secondary);
194
+ color: var(--body-text-color);
195
+ border: 1px solid var(--border-color-primary);
196
+ }
197
+
198
+ .win-rate-below {
199
+ background-color: var(--error-background-fill);
200
+ color: var(--body-text-color);
201
+ }
202
+
203
+ .model-link {
204
+ color: var(--body-text-color) !important;
205
+ text-decoration: none !important;
206
+ border-bottom: 1px dashed var(--border-color-primary);
207
+ }
208
+
209
+ .model-link:hover {
210
+ color: var(--color-accent) !important;
211
+ border-bottom-color: var(--color-accent) !important;
212
+ }
213
+
214
+ .proprietary-badge {
215
+ display: inline-block;
216
+ font-size: 12px;
217
+ padding: 2px 6px;
218
+ border-radius: 4px;
219
+ background-color: var(--background-fill-secondary);
220
+ color: var(--body-text-color);
221
+ margin-left: 6px;
222
+ border: 1px solid var(--border-color-primary);
223
+ }
224
+ </style>
225
+ <div class="leaderboard-container">
226
+ <div class="leaderboard-scroll">
227
+ <table class="leaderboard-table">
228
+ <thead>
229
+ <tr>
230
+ <th class="col-rank">Rank</th>
231
+ <th class="col-model">Model</th>
232
+ <th class="col-winrate">Win Rate</th>
233
+ <th class="col-votes">Votes</th>
234
+ """ + ("""<th class="col-arena">Arena Score</th>""" if sort_by_elo else "") + """
235
+ </tr>
236
+ </thead>
237
+ <tbody>
238
+ """
239
+
240
+ def get_win_rate_class(win_rate):
241
+ if win_rate >= 60:
242
+ return "win-rate-excellent"
243
+ elif win_rate >= 55:
244
+ return "win-rate-good"
245
+ elif win_rate >= 45:
246
+ return "win-rate-average"
247
+ else:
248
+ return "win-rate-below"
249
+
250
+ for _, row in df.iterrows():
251
+ win_rate_class = get_win_rate_class(row['win_rate'])
252
+ win_rate_html = f'<span class="win-rate {win_rate_class}">{row["win_rate"]}%</span>'
253
+
254
+ # Add link to model name if available and proprietary badge if closed source
255
+ model_name = row['name']
256
+ original_model_name = model_name
257
+ if model_name in model_links:
258
+ model_name = f'<a href="{model_links[model_name]}" target="_blank" class="model-link">{model_name}</a>'
259
+
260
+ if original_model_name in closed_source:
261
+ model_name += '<span class="proprietary-badge">Proprietary</span>'
262
+
263
+ markdown_table += f'''<tr>
264
+ <td class="col-rank">{row['order']}</td>
265
+ <td class="col-model">{model_name}</td>
266
+ <td class="col-winrate">{win_rate_html}</td>
267
+ <td class="col-votes">{row['votes']:,}</td>''' + (
268
+ f'''<td class="col-arena">{int(row['elo'])}</td>''' if sort_by_elo else ""
269
+ ) + "</tr>\n"
270
+
271
+ markdown_table += "</tbody></table></div></div>"
272
+ return markdown_table
app/models.py CHANGED
@@ -22,6 +22,36 @@ AVAILABLE_MODELS = {
22
  #'Fish Speech v1.4': 'fish',
23
  }
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  # Model name mapping, can include models that users cannot vote on
27
  model_names = {
 
22
  #'Fish Speech v1.4': 'fish',
23
  }
24
 
25
+ model_links = {
26
+ 'ElevenLabs': 'https://elevenlabs.io/',
27
+ 'Play.HT 2.0': 'https://play.ht/',
28
+ 'Play.HT 3.0 Mini': 'https://play.ht/',
29
+ 'XTTSv2': 'https://huggingface.co/coqui/XTTS-v2',
30
+ 'MeloTTS': 'https://github.com/myshell-ai/MeloTTS',
31
+ 'StyleTTS 2': 'https://github.com/yl4579/StyleTTS2',
32
+ 'Parler TTS Large': 'https://github.com/huggingface/parler-tts',
33
+ 'Parler TTS': 'https://github.com/huggingface/parler-tts',
34
+ 'Fish Speech v1.5': 'https://github.com/fishaudio/fish-speech',
35
+ 'Fish Speech v1.4': 'https://github.com/fishaudio/fish-speech',
36
+ 'GPT-SoVITS': 'https://github.com/RVC-Boss/GPT-SoVITS',
37
+ 'WhisperSpeech': 'https://github.com/WhisperSpeech/WhisperSpeech',
38
+ 'VoiceCraft 2.0': 'https://github.com/jasonppy/VoiceCraft',
39
+ 'PlayDialog': 'https://play.ht/',
40
+ 'Kokoro v0.19': 'https://huggingface.co/hexgrad/Kokoro-82M',
41
+ 'CosyVoice 2.0': 'https://github.com/FunAudioLLM/CosyVoice',
42
+ 'MetaVoice': 'https://github.com/metavoiceio/metavoice-src',
43
+ 'OpenVoice': 'https://github.com/myshell-ai/OpenVoice',
44
+ 'OpenVoice V2': 'https://github.com/myshell-ai/OpenVoice',
45
+ 'Pheme': 'https://github.com/PolyAI-LDN/pheme',
46
+ 'Vokan TTS': 'https://huggingface.co/ShoukanLabs/Vokan',
47
+ }
48
+
49
+ closed_source = [
50
+ 'ElevenLabs',
51
+ 'Play.HT 2.0',
52
+ 'Play.HT 3.0 Mini',
53
+ 'PlayDialog',
54
+ ]
55
 
56
  # Model name mapping, can include models that users cannot vote on
57
  model_names = {
app/ui.py CHANGED
@@ -9,7 +9,32 @@ from .ui_leaderboard import *
9
  with gr.Blocks() as about:
10
  gr.Markdown(ABOUT)
11
 
12
- with gr.Blocks(css="footer {visibility: hidden}textbox{resize:none}", title="TTS Arena") as app:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  gr.Markdown(DESCR)
14
  gr.TabbedInterface([vote, battle, leaderboard, about], ['Vote', 'Battle', 'Leaderboard', 'About'])
15
  if CITATION_TEXT:
 
9
  with gr.Blocks() as about:
10
  gr.Markdown(ABOUT)
11
 
12
+ CSS = """
13
+ footer {visibility: hidden}
14
+ textbox {resize: none}
15
+
16
+ /* Custom scrollbar styles */
17
+ ::-webkit-scrollbar {
18
+ width: 8px;
19
+ height: 8px;
20
+ }
21
+
22
+ ::-webkit-scrollbar-track {
23
+ background: var(--background-fill-primary);
24
+ border-radius: 4px;
25
+ }
26
+
27
+ ::-webkit-scrollbar-thumb {
28
+ background: var(--border-color-primary);
29
+ border-radius: 4px;
30
+ }
31
+
32
+ ::-webkit-scrollbar-thumb:hover {
33
+ background: var(--body-text-color);
34
+ }
35
+ """
36
+
37
+ with gr.Blocks(css=CSS, theme=gr.themes.Default(font=[gr.themes.GoogleFont("Geist"), "sans-serif"]), title="TTS Arena") as app:
38
  gr.Markdown(DESCR)
39
  gr.TabbedInterface([vote, battle, leaderboard, about], ['Vote', 'Battle', 'Leaderboard', 'About'])
40
  if CITATION_TEXT:
app/ui_leaderboard.py CHANGED
@@ -5,13 +5,51 @@ from .messages import *
5
 
6
  with gr.Blocks() as leaderboard:
7
  gr.Markdown(LDESC)
8
- df = gr.Dataframe(interactive=False, min_width=0, wrap=True, column_widths=[30, 200, 50, 50])
9
  reloadbtn = gr.Button("Refresh")
10
  with gr.Row():
11
- reveal_prelim = gr.Checkbox(label="Reveal preliminary results", info="Show all models, including models with very few human ratings.", scale=1)
12
- hide_battle_votes = gr.Checkbox(label="Hide Battle Mode votes", info="Exclude votes obtained through Battle Mode.", scale=1)
13
- reveal_prelim.input(get_leaderboard, inputs=[reveal_prelim, hide_battle_votes], outputs=[df])
14
- hide_battle_votes.input(get_leaderboard, inputs=[reveal_prelim, hide_battle_votes], outputs=[df])
15
- leaderboard.load(get_leaderboard, inputs=[reveal_prelim, hide_battle_votes], outputs=[df])
16
- reloadbtn.click(get_leaderboard, inputs=[reveal_prelim, hide_battle_votes], outputs=[df])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  # gr.Markdown("DISCLAIMER: The licenses listed may not be accurate or up to date, you are responsible for checking the licenses before using the models. Also note that some models may have additional usage restrictions.")
 
5
 
6
  with gr.Blocks() as leaderboard:
7
  gr.Markdown(LDESC)
8
+ table = gr.HTML() # Changed to HTML to support custom CSS
9
  reloadbtn = gr.Button("Refresh")
10
  with gr.Row():
11
+ with gr.Column(scale=1):
12
+ reveal_prelim = gr.Checkbox(
13
+ label="Reveal preliminary results",
14
+ info="Show all models, including models with very few human ratings.",
15
+ )
16
+ with gr.Column(scale=1):
17
+ hide_battle_votes = gr.Checkbox(
18
+ label="Hide Battle Mode votes",
19
+ info="Exclude votes obtained through Battle Mode.",
20
+ )
21
+ with gr.Column(scale=1):
22
+ sort_by_elo = gr.Checkbox(
23
+ label="Sort by Arena Score",
24
+ info="Sort models by Arena Score instead of win rate",
25
+ value=False,
26
+ )
27
+ with gr.Column(scale=1):
28
+ hide_proprietary = gr.Checkbox(
29
+ label="Hide proprietary models",
30
+ info="Show only open models",
31
+ value=False,
32
+ )
33
+
34
+ def update_leaderboard(*args):
35
+ return get_leaderboard(*args)
36
+
37
+ reveal_prelim.input(update_leaderboard,
38
+ inputs=[reveal_prelim, hide_battle_votes, sort_by_elo, hide_proprietary],
39
+ outputs=[table])
40
+ hide_battle_votes.input(update_leaderboard,
41
+ inputs=[reveal_prelim, hide_battle_votes, sort_by_elo, hide_proprietary],
42
+ outputs=[table])
43
+ sort_by_elo.input(update_leaderboard,
44
+ inputs=[reveal_prelim, hide_battle_votes, sort_by_elo, hide_proprietary],
45
+ outputs=[table])
46
+ hide_proprietary.input(update_leaderboard,
47
+ inputs=[reveal_prelim, hide_battle_votes, sort_by_elo, hide_proprietary],
48
+ outputs=[table])
49
+ leaderboard.load(update_leaderboard,
50
+ inputs=[reveal_prelim, hide_battle_votes, sort_by_elo, hide_proprietary],
51
+ outputs=[table])
52
+ reloadbtn.click(update_leaderboard,
53
+ inputs=[reveal_prelim, hide_battle_votes, sort_by_elo, hide_proprietary],
54
+ outputs=[table])
55
  # gr.Markdown("DISCLAIMER: The licenses listed may not be accurate or up to date, you are responsible for checking the licenses before using the models. Also note that some models may have additional usage restrictions.")