Clémentine commited on
Commit
ce98da8
·
1 Parent(s): f278160

forbid duplicates

Browse files
Files changed (1) hide show
  1. app.py +7 -1
app.py CHANGED
@@ -106,6 +106,7 @@ def add_new_eval(
106
  file_path = path_to_file.name
107
  scores = {"all": 0, 1: 0, 2: 0, 3: 0}
108
  num_questions = {"all": 0, 1: 0, 2: 0, 3: 0}
 
109
  with open(f"scored/{organisation}_{model}.jsonl", "w") as scored_file:
110
  with open(file_path, 'r') as f:
111
  for ix, line in enumerate(f):
@@ -133,12 +134,17 @@ def add_new_eval(
133
  "level": level
134
  }) + "\n"
135
  )
 
136
 
137
  scores["all"] += score
138
  scores[level] += score
139
  num_questions["all"] += 1
140
  num_questions[level] += 1
141
 
 
 
 
 
142
  # Save scored file
143
  api.upload_file(
144
  repo_id=SUBMISSION_DATASET,
@@ -165,7 +171,7 @@ def add_new_eval(
165
  "system_prompt": system_prompt,
166
  "url": url,
167
  "organisation": organisation,
168
- "score": scores["all"]/num_questions["all"],
169
  "score_level1": scores[1]/num_questions[1],
170
  "score_level2": scores[2]/num_questions[2],
171
  "score_level3": scores[3]/num_questions[3],
 
106
  file_path = path_to_file.name
107
  scores = {"all": 0, 1: 0, 2: 0, 3: 0}
108
  num_questions = {"all": 0, 1: 0, 2: 0, 3: 0}
109
+ task_ids = []
110
  with open(f"scored/{organisation}_{model}.jsonl", "w") as scored_file:
111
  with open(file_path, 'r') as f:
112
  for ix, line in enumerate(f):
 
134
  "level": level
135
  }) + "\n"
136
  )
137
+ task_ids.append(task_id)
138
 
139
  scores["all"] += score
140
  scores[level] += score
141
  num_questions["all"] += 1
142
  num_questions[level] += 1
143
 
144
+ # Check if there's any duplicate in the submission
145
+ if len(task_ids) != len(set(task_ids)):
146
+ return format_error("There are duplicates in your submission. Please check your file and resubmit it.")
147
+
148
  # Save scored file
149
  api.upload_file(
150
  repo_id=SUBMISSION_DATASET,
 
171
  "system_prompt": system_prompt,
172
  "url": url,
173
  "organisation": organisation,
174
+ "score": scores["all"]/ref_scores_len,
175
  "score_level1": scores[1]/num_questions[1],
176
  "score_level2": scores[2]/num_questions[2],
177
  "score_level3": scores[3]/num_questions[3],