t0-0 commited on
Commit
1aa7802
·
verified ·
1 Parent(s): b81ce92

Update src/display/utils.py

Browse files
Files changed (1) hide show
  1. src/display/utils.py +1 -13
src/display/utils.py CHANGED
@@ -39,7 +39,7 @@ auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B
39
  auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
40
  auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
  auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
- auto_eval_column_dict.append(["num_few_shots", ColumnContent, ColumnContent("Few-shot", "str", False)])
43
 
44
  # We use make dataclass to dynamically fill the scores from Tasks
45
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
@@ -101,17 +101,6 @@ class Precision(Enum):
101
  return Precision.bfloat16
102
  return Precision.Unknown
103
 
104
- class NumFewShotsType(Enum):
105
- shot0 = ModelDetails("0-shots")
106
- shot4 = ModelDetails("4-shots")
107
-
108
- def from_str(num_few_shots):
109
- if num_few_shots=="0":
110
- return NumFewShotsType.shot0
111
- if num_few_shots=="4":
112
- return NumFewShotsType.shot4
113
- assert True
114
-
115
  # Column selection
116
  COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
117
 
@@ -119,4 +108,3 @@ EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
119
  EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
120
 
121
  BENCHMARK_COLS = [t.value.col_name for t in Tasks]
122
-
 
39
  auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
40
  auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
  auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
+ auto_eval_column_dict.append(["num_few_shots", ColumnContent, ColumnContent("Few-shot", "number", False)])
43
 
44
  # We use make dataclass to dynamically fill the scores from Tasks
45
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
 
101
  return Precision.bfloat16
102
  return Precision.Unknown
103
 
 
 
 
 
 
 
 
 
 
 
 
104
  # Column selection
105
  COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
106
 
 
108
  EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
109
 
110
  BENCHMARK_COLS = [t.value.col_name for t in Tasks]