Spaces:
Runtime error
Runtime error
Tristan Thrush
commited on
Commit
Β·
9c04f1c
1
Parent(s):
dbe89c5
changed interface according to comments
Browse files
app.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
# Basic example for doing model-in-the-loop dynamic adversarial data collection
|
2 |
-
# using Gradio Blocks.
|
3 |
import json
|
4 |
import os
|
5 |
import threading
|
@@ -72,6 +70,10 @@ def random_sample_with_least_annotated_examples_first():
|
|
72 |
example["outputs"] = random.sample(example["outputs"], 2)
|
73 |
return example
|
74 |
|
|
|
|
|
|
|
|
|
75 |
|
76 |
with demo:
|
77 |
dummy = gr.Textbox(visible=False) # dummy for passing assignmentId
|
@@ -87,13 +89,13 @@ with demo:
|
|
87 |
}
|
88 |
state = gr.JSON(state_dict, visible=False)
|
89 |
|
90 |
-
gr.Markdown("# Choose the
|
91 |
|
92 |
state_display = gr.Markdown(f"Your messages: 0/{TOTAL_CNT}")
|
93 |
|
94 |
def _select_response(selected_response, state, dummy):
|
95 |
if selected_response == "":
|
96 |
-
# Don't do anything if the worker didn't select
|
97 |
return (
|
98 |
gr.update(),
|
99 |
gr.update(),
|
@@ -101,6 +103,7 @@ with demo:
|
|
101 |
gr.update(),
|
102 |
gr.update(),
|
103 |
gr.update(),
|
|
|
104 |
state,
|
105 |
dummy,
|
106 |
)
|
@@ -135,18 +138,20 @@ with demo:
|
|
135 |
toggle_final_submit_preview = gr.update(visible=done)
|
136 |
toggle_final_submit = gr.update(visible=False)
|
137 |
|
138 |
-
|
139 |
|
140 |
new_sample = random_sample_with_least_annotated_examples_first()
|
141 |
new_outputs = [obj["output"] for obj in new_sample["outputs"]]
|
142 |
state["data"].append(new_sample)
|
143 |
-
past_conversation = gr.update(
|
144 |
-
|
|
|
|
|
145 |
|
146 |
return (
|
147 |
past_conversation,
|
148 |
select_response,
|
149 |
-
|
150 |
toggle_final_submit,
|
151 |
toggle_final_submit_preview,
|
152 |
state_display,
|
@@ -155,12 +160,19 @@ with demo:
|
|
155 |
)
|
156 |
|
157 |
# Input fields
|
158 |
-
|
|
|
|
|
|
|
|
|
159 |
initial_outputs = [obj["output"] for obj in initial_sample["outputs"]]
|
|
|
|
|
160 |
select_response = gr.Radio(
|
161 |
-
choices=initial_outputs,
|
162 |
)
|
163 |
-
|
|
|
164 |
submit_hit_button = gr.Button("Submit HIT", visible=False)
|
165 |
submit_hit_button_preview = gr.Button(
|
166 |
"Submit Work (preview mode; no MTurk HIT credit, but your examples will still be stored)",
|
@@ -174,13 +186,13 @@ with demo:
|
|
174 |
}
|
175 |
"""
|
176 |
|
177 |
-
|
178 |
_select_response,
|
179 |
inputs=[select_response, state, dummy],
|
180 |
outputs=[
|
181 |
past_conversation,
|
182 |
select_response,
|
183 |
-
|
184 |
submit_hit_button,
|
185 |
submit_hit_button_preview,
|
186 |
state_display,
|
|
|
|
|
|
|
1 |
import json
|
2 |
import os
|
3 |
import threading
|
|
|
70 |
example["outputs"] = random.sample(example["outputs"], 2)
|
71 |
return example
|
72 |
|
73 |
+
def prompt_pretty_markdown(prompt):
|
74 |
+
prompt = prompt.replace("Input:", "\n\n<b>Input:</b>\n\n")
|
75 |
+
return prompt
|
76 |
+
|
77 |
|
78 |
with demo:
|
79 |
dummy = gr.Textbox(visible=False) # dummy for passing assignmentId
|
|
|
89 |
}
|
90 |
state = gr.JSON(state_dict, visible=False)
|
91 |
|
92 |
+
gr.Markdown("# Choose the more helpful response for the input")
|
93 |
|
94 |
state_display = gr.Markdown(f"Your messages: 0/{TOTAL_CNT}")
|
95 |
|
96 |
def _select_response(selected_response, state, dummy):
|
97 |
if selected_response == "":
|
98 |
+
# Don't do anything if the worker didn't select things yet.
|
99 |
return (
|
100 |
gr.update(),
|
101 |
gr.update(),
|
|
|
103 |
gr.update(),
|
104 |
gr.update(),
|
105 |
gr.update(),
|
106 |
+
gr.update(),
|
107 |
state,
|
108 |
dummy,
|
109 |
)
|
|
|
138 |
toggle_final_submit_preview = gr.update(visible=done)
|
139 |
toggle_final_submit = gr.update(visible=False)
|
140 |
|
141 |
+
toggle_submit_response_button = gr.update(visible=not done)
|
142 |
|
143 |
new_sample = random_sample_with_least_annotated_examples_first()
|
144 |
new_outputs = [obj["output"] for obj in new_sample["outputs"]]
|
145 |
state["data"].append(new_sample)
|
146 |
+
past_conversation = gr.update(
|
147 |
+
value=prompt_pretty_markdown(new_sample["prompt"])
|
148 |
+
)
|
149 |
+
select_response = gr.update(choices=["(a) " + new_outputs[0], "(b) " + new_outputs[1], "(c) Both (a) and (b) are similarly good", "(d) Both (a) and (b) are similarly bad"], value="")
|
150 |
|
151 |
return (
|
152 |
past_conversation,
|
153 |
select_response,
|
154 |
+
toggle_submit_response_button,
|
155 |
toggle_final_submit,
|
156 |
toggle_final_submit_preview,
|
157 |
state_display,
|
|
|
160 |
)
|
161 |
|
162 |
# Input fields
|
163 |
+
gr.Markdown('<span style="padding:7px;color:black;background:#ffd21e;border-radius:10px"><b>Prompt</b></span>')
|
164 |
+
|
165 |
+
past_conversation = gr.Markdown(
|
166 |
+
value=prompt_pretty_markdown(initial_sample["prompt"])
|
167 |
+
)
|
168 |
initial_outputs = [obj["output"] for obj in initial_sample["outputs"]]
|
169 |
+
|
170 |
+
gr.Markdown('<span style="padding:7px;color:black;background:#ffd21e;border-radius:10px"><b>Select the most helpful response</b></span>')
|
171 |
select_response = gr.Radio(
|
172 |
+
choices=["(a) " + initial_outputs[0], "(b) " + initial_outputs[1], "(c) Both (a) and (b) are similarly good", "(d) Both (a) and (b) are similarly bad"], label="",
|
173 |
)
|
174 |
+
|
175 |
+
submit_response_button = gr.Button("Submit Response")
|
176 |
submit_hit_button = gr.Button("Submit HIT", visible=False)
|
177 |
submit_hit_button_preview = gr.Button(
|
178 |
"Submit Work (preview mode; no MTurk HIT credit, but your examples will still be stored)",
|
|
|
186 |
}
|
187 |
"""
|
188 |
|
189 |
+
submit_response_button.click(
|
190 |
_select_response,
|
191 |
inputs=[select_response, state, dummy],
|
192 |
outputs=[
|
193 |
past_conversation,
|
194 |
select_response,
|
195 |
+
submit_response_button,
|
196 |
submit_hit_button,
|
197 |
submit_hit_button_preview,
|
198 |
state_display,
|