apolinario commited on
Commit
06fdab0
·
1 Parent(s): 9512f11
Files changed (1) hide show
  1. app.py +140 -0
app.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import random
3
+ import time
4
+
5
+ MAX_QUESTIONS = 10 # Maximum number of questions to support
6
+
7
+ ######
8
+ # Fix the models
9
+ #
10
+ MODELS = [
11
+ "anthropic/claude-3-opus",
12
+ "anthropic/claude-3-sonnet",
13
+ "google/gemini-pro",
14
+ "meta-llama/llama-2-70b-chat",
15
+ "mistral/mistral-medium",
16
+ "deepseek/deepseek-coder",
17
+ "deepseek/deepseek-r1",
18
+ ]
19
+ #
20
+ ######
21
+
22
+ ######
23
+ # Add OpenRouter here
24
+ #
25
+ def get_response(question, model):
26
+ # Simulate an API call with a random delay
27
+ time.sleep(random.uniform(0.5, 1.5))
28
+ return f"Sample response from {model} for: {question}"
29
+ #
30
+ ######
31
+
32
+ def read_questions(file_obj):
33
+ """Read questions from uploaded file and return as list"""
34
+ with open(file_obj.name, 'r') as file:
35
+ questions = [q.strip() for q in file.readlines() if q.strip()]
36
+ if len(questions) > MAX_QUESTIONS:
37
+ raise gr.Error(f"Maximum {MAX_QUESTIONS} questions allowed.")
38
+ return questions
39
+
40
+ with gr.Blocks() as demo:
41
+ gr.Markdown("# Vibes Benchmark\nUpload a `.txt` file with **one question per line**.")
42
+
43
+ file_input = gr.File(label="Upload your questions (.txt)")
44
+ run_button = gr.Button("Run Benchmark", variant="primary")
45
+
46
+ # Create dynamic response areas
47
+ response_areas = []
48
+ for i in range(MAX_QUESTIONS):
49
+ with gr.Group(visible=False) as group_i:
50
+ gr.Markdown(f"### Question {i+1}")
51
+ with gr.Row():
52
+ with gr.Column():
53
+ # Accordion for Model 1
54
+ with gr.Accordion("Model 1", open=False):
55
+ model1_i = gr.Markdown("")
56
+ response1_i = gr.Textbox(label="Response 1", interactive=False, lines=4)
57
+ with gr.Column():
58
+ # Accordion for Model 2
59
+ with gr.Accordion("Model 2", open=False):
60
+ model2_i = gr.Markdown("")
61
+ response2_i = gr.Textbox(label="Response 2", interactive=False, lines=4)
62
+ gr.Markdown("---")
63
+
64
+ response_areas.append({
65
+ 'group': group_i,
66
+ 'model1': model1_i,
67
+ 'response1': response1_i,
68
+ 'model2': model2_i,
69
+ 'response2': response2_i
70
+ })
71
+
72
+ def process_file(file):
73
+ """Show/hide question groups depending on how many questions are in the file."""
74
+ if file is None:
75
+ raise gr.Error("Please upload a file first.")
76
+ questions = read_questions(file)
77
+
78
+ # Show as many question groups as needed; hide the rest
79
+ updates = []
80
+ for i in range(MAX_QUESTIONS):
81
+ updates.append(gr.update(visible=(i < len(questions))))
82
+
83
+ return updates
84
+
85
+ def run_benchmark(file):
86
+ """Generator function yielding partial updates in real time."""
87
+ questions = read_questions(file)
88
+
89
+ # Initialize all update values as blank
90
+ # We have 4 fields per question (model1, response1, model2, response2)
91
+ # => total of MAX_QUESTIONS * 4 output components
92
+ updates = [gr.update(value="")] * (MAX_QUESTIONS * 4)
93
+
94
+ # Process each question, 2 models per question
95
+ for i, question in enumerate(questions):
96
+ # 1) Pick first model, yield it
97
+ model_1 = random.choice(MODELS)
98
+ updates[i*4] = gr.update(value=f"**{model_1}**") # model1 for question i
99
+ yield updates # partial update (reveal model_1 accordion)
100
+
101
+ # 2) Get response from model_1
102
+ response_1 = get_response(question, model_1)
103
+ updates[i*4 + 1] = gr.update(value=response_1) # response1
104
+ yield updates
105
+
106
+ # 3) Pick second model (ensure different from first), yield it
107
+ remaining_models = [m for m in MODELS if m != model_1]
108
+ model_2 = random.choice(remaining_models)
109
+ updates[i*4 + 2] = gr.update(value=f"**{model_2}**") # model2
110
+ yield updates
111
+
112
+ # 4) Get response from model_2
113
+ response_2 = get_response(question, model_2)
114
+ updates[i*4 + 3] = gr.update(value=response_2) # response2
115
+ yield updates
116
+
117
+ # The outputs we update after each yield
118
+ update_targets = []
119
+ for area in response_areas:
120
+ update_targets.append(area['model1'])
121
+ update_targets.append(area['response1'])
122
+ update_targets.append(area['model2'])
123
+ update_targets.append(area['response2'])
124
+
125
+ # Connect events
126
+ file_input.change(
127
+ fn=process_file,
128
+ inputs=file_input,
129
+ outputs=[area['group'] for area in response_areas]
130
+ )
131
+
132
+ run_button.click(
133
+ fn=run_benchmark,
134
+ inputs=file_input,
135
+ outputs=update_targets
136
+ )
137
+
138
+ # Enable queue for partial outputs to appear as they are yielded
139
+ demo.queue()
140
+ demo.launch()