awacke1 commited on
Commit
5ccd3ae
·
verified ·
1 Parent(s): f94988e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -21
app.py CHANGED
@@ -29,25 +29,6 @@ with gr.Blocks() as block:
29
 
30
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
31
  with gr.TabItem("📊 MEGA-Bench", elem_id="qa-tab-table1", id=0):
32
- with gr.Row():
33
- with gr.Accordion("Citation", open=True):
34
- citation_button = gr.Textbox(
35
- value=CITATION_BUTTON_TEXT,
36
- label=CITATION_BUTTON_LABEL,
37
- elem_id="citation-button",
38
- lines=10,
39
- )
40
- gr.Markdown(
41
- TABLE_INTRODUCTION
42
- )
43
-
44
- with gr.Row():
45
- table_selector = gr.Radio(
46
- choices=["Default", "Single Image"],
47
- label="Select table to display. Default: all MEGA-Bench tasks; Single Image: single-image tasks only.",
48
- value="Default"
49
- )
50
-
51
  # Define different captions for each table
52
  default_caption = "**Table 1: MEGA-Bench full results.** The number in the parentheses is the number of tasks of each keyword. <br> The Core set contains $N_{\\text{core}} = 440$ tasks evaluated by rule-based metrics, and the Open-ended set contains $N_{\\text{open}} = 65$ tasks evaluated by a VLM judge (we use GPT-4o-0806). <br> Different from the results in our paper, we only use the Core results with CoT prompting here for clarity and compatibility with the released data. <br> $\\text{Overall} \\ = \\ \\frac{\\text{Core} \\ \\cdot \\ N_{\\text{core}} \\ + \\ \\text{Open-ended} \\ \\cdot \\ N_{\\text{open}}}{N_{\\text{core}} \\ + \\ N_{\\text{open}}}$ <br> * indicates self-reported results from the model authors."
53
 
@@ -76,7 +57,7 @@ with gr.Blocks() as block:
76
  value=initial_data,
77
  headers=initial_headers,
78
  datatype=["number", "html"] + ["number"] * (len(initial_headers) - 2),
79
- interactive=False,
80
  elem_classes="custom-dataframe",
81
  max_height=2400,
82
  column_widths=["100px", "240px"] + ["160px"] * 3 + ["210px"] * (len(initial_headers) - 5),
@@ -95,13 +76,32 @@ with gr.Blocks() as block:
95
  value=data,
96
  headers=headers,
97
  datatype=["number", "html"] + ["number"] * (len(headers) - 2),
98
- interactive=False,
99
  column_widths=["100px", "240px"] + ["160px"] * 3 + ["210px"] * (len(headers) - 5),
100
  ),
101
  caption,
102
  f"<style>{base_css}\n{table_css}</style>"
103
  ]
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  def update_selectors(table_type):
106
  loader = default_loader if table_type == "Default" else si_loader
107
  return [
 
29
 
30
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
31
  with gr.TabItem("📊 MEGA-Bench", elem_id="qa-tab-table1", id=0):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  # Define different captions for each table
33
  default_caption = "**Table 1: MEGA-Bench full results.** The number in the parentheses is the number of tasks of each keyword. <br> The Core set contains $N_{\\text{core}} = 440$ tasks evaluated by rule-based metrics, and the Open-ended set contains $N_{\\text{open}} = 65$ tasks evaluated by a VLM judge (we use GPT-4o-0806). <br> Different from the results in our paper, we only use the Core results with CoT prompting here for clarity and compatibility with the released data. <br> $\\text{Overall} \\ = \\ \\frac{\\text{Core} \\ \\cdot \\ N_{\\text{core}} \\ + \\ \\text{Open-ended} \\ \\cdot \\ N_{\\text{open}}}{N_{\\text{core}} \\ + \\ N_{\\text{open}}}$ <br> * indicates self-reported results from the model authors."
34
 
 
57
  value=initial_data,
58
  headers=initial_headers,
59
  datatype=["number", "html"] + ["number"] * (len(initial_headers) - 2),
60
+ interactive=True,
61
  elem_classes="custom-dataframe",
62
  max_height=2400,
63
  column_widths=["100px", "240px"] + ["160px"] * 3 + ["210px"] * (len(initial_headers) - 5),
 
76
  value=data,
77
  headers=headers,
78
  datatype=["number", "html"] + ["number"] * (len(headers) - 2),
79
+ interactive=True,
80
  column_widths=["100px", "240px"] + ["160px"] * 3 + ["210px"] * (len(headers) - 5),
81
  ),
82
  caption,
83
  f"<style>{base_css}\n{table_css}</style>"
84
  ]
85
 
86
+ with gr.Row():
87
+ with gr.Accordion("Citation", open=False):
88
+ citation_button = gr.Textbox(
89
+ value=CITATION_BUTTON_TEXT,
90
+ label=CITATION_BUTTON_LABEL,
91
+ elem_id="citation-button",
92
+ lines=10,
93
+ )
94
+ gr.Markdown(
95
+ TABLE_INTRODUCTION
96
+ )
97
+
98
+ with gr.Row():
99
+ table_selector = gr.Radio(
100
+ choices=["Default", "Single Image"],
101
+ label="Select table to display. Default: all MEGA-Bench tasks; Single Image: single-image tasks only.",
102
+ value="Default"
103
+ )
104
+
105
  def update_selectors(table_type):
106
  loader = default_loader if table_type == "Default" else si_loader
107
  return [