Update MetricTemplate1 to use string values for predictions and references
Browse files- metric_template_1.py +8 -2
metric_template_1.py
CHANGED
@@ -73,8 +73,8 @@ class MetricTemplate1(evaluate.Metric):
|
|
73 |
# This defines the format of each prediction and reference
|
74 |
features=datasets.Features(
|
75 |
{
|
76 |
-
"predictions": datasets.Value("
|
77 |
-
"references": datasets.Value("
|
78 |
}
|
79 |
),
|
80 |
# Homepage of the module for documentation
|
@@ -92,6 +92,12 @@ class MetricTemplate1(evaluate.Metric):
|
|
92 |
def _compute(self, predictions, references):
|
93 |
"""Returns the scores"""
|
94 |
# TODO: Compute the different scores of the module
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
accuracy = sum(i == j for i, j in zip(predictions, references)) / len(
|
96 |
predictions
|
97 |
)
|
|
|
73 |
# This defines the format of each prediction and reference
|
74 |
features=datasets.Features(
|
75 |
{
|
76 |
+
"predictions": datasets.Value("string"),
|
77 |
+
"references": datasets.Value("string"),
|
78 |
}
|
79 |
),
|
80 |
# Homepage of the module for documentation
|
|
|
92 |
def _compute(self, predictions, references):
|
93 |
"""Returns the scores"""
|
94 |
# TODO: Compute the different scores of the module
|
95 |
+
|
96 |
+
# Convert the inputs to strings
|
97 |
+
predictions = [str(p) for p in predictions]
|
98 |
+
references = [str(r) for r in references]
|
99 |
+
|
100 |
+
# Calculate accuracy
|
101 |
accuracy = sum(i == j for i, j in zip(predictions, references)) / len(
|
102 |
predictions
|
103 |
)
|