Upload metric.py with huggingface_hub
Browse files
metric.py
CHANGED
@@ -1,95 +1,147 @@
|
|
1 |
-
|
2 |
-
#
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
#
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
import evaluate
|
17 |
import datasets
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
-
# TODO: Add BibTeX citation
|
21 |
-
_CITATION = """\
|
22 |
-
@InProceedings{huggingface:module,
|
23 |
-
title = {A great new module},
|
24 |
-
authors={huggingface, Inc.},
|
25 |
-
year={2020}
|
26 |
-
}
|
27 |
-
"""
|
28 |
-
|
29 |
-
# TODO: Add description of the module here
|
30 |
-
_DESCRIPTION = """\
|
31 |
-
This new module is designed to solve this great ML task and is crafted with a lot of care.
|
32 |
-
"""
|
33 |
-
|
34 |
-
|
35 |
-
# TODO: Add description of the arguments of the module here
|
36 |
-
_KWARGS_DESCRIPTION = """
|
37 |
-
Calculates how good are predictions given some references, using certain scores
|
38 |
-
Args:
|
39 |
-
predictions: list of predictions to score. Each predictions
|
40 |
-
should be a string with tokens separated by spaces.
|
41 |
-
references: list of reference for each prediction. Each
|
42 |
-
reference should be a string with tokens separated by spaces.
|
43 |
-
Returns:
|
44 |
-
accuracy: description of the first score,
|
45 |
-
another_score: description of the second score,
|
46 |
-
Examples:
|
47 |
-
Examples should be written in doctest format, and should illustrate how
|
48 |
-
to use the function.
|
49 |
-
|
50 |
-
>>> my_new_module = evaluate.load("my_new_module")
|
51 |
-
>>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
|
52 |
-
>>> print(results)
|
53 |
-
{'accuracy': 1.0}
|
54 |
-
"""
|
55 |
-
|
56 |
-
# TODO: Define external resources urls if needed
|
57 |
-
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
|
58 |
-
|
59 |
-
|
60 |
-
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
61 |
-
class Metric(evaluate.Metric):
|
62 |
-
"""TODO: Short description of my evaluation module."""
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
def _info(self):
|
65 |
-
# TODO: Specifies the evaluate.EvaluationModuleInfo object
|
66 |
return evaluate.MetricInfo(
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
}),
|
77 |
-
# Homepage of the module for documentation
|
78 |
-
homepage="http://module.homepage",
|
79 |
-
# Additional links to the codebase or references
|
80 |
-
codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
|
81 |
-
reference_urls=["http://path.to.reference.url/new_module"]
|
82 |
)
|
83 |
|
84 |
-
def
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
|
|
1 |
+
#####
|
2 |
+
# imports for hf system:
|
3 |
+
#####
|
4 |
+
from .artifact import __file__ as _
|
5 |
+
from .blocks import __file__ as _
|
6 |
+
from .card import __file__ as _
|
7 |
+
from .catalog import __file__ as _
|
8 |
+
from .collections import __file__ as _
|
9 |
+
from .common import __file__ as _
|
10 |
+
from .file_utils import __file__ as _
|
11 |
+
|
12 |
+
# from .fusion import __file__
|
13 |
+
from .generator_utils import __file__ as _
|
14 |
+
from .instructions import __file__ as _
|
15 |
+
from .loaders import __file__ as _
|
16 |
+
from .load import __file__ as _
|
17 |
+
from .metrics import __file__ as _
|
18 |
+
from .normalizers import __file__ as _
|
19 |
+
from .operator import __file__ as _
|
20 |
+
from .operators import __file__ as _
|
21 |
+
from .processors import __file__ as _
|
22 |
+
from .recipe import __file__ as _
|
23 |
+
from .register import __file__ as _
|
24 |
+
from .splitters import __file__ as _
|
25 |
+
from .split_utils import __file__ as _
|
26 |
+
from .stream import __file__ as _
|
27 |
+
from .task import __file__ as _
|
28 |
+
from .templates import __file__ as _
|
29 |
+
from .text_utils import __file__ as _
|
30 |
+
from .schema import __file__ as _
|
31 |
+
|
32 |
+
# from .utilize import __file__ as _
|
33 |
+
# from .validate import __file__ as _
|
34 |
+
#############
|
35 |
+
|
36 |
+
from .stream import MultiStream, Stream
|
37 |
+
|
38 |
+
from .operator import SequntialOperator, SequntialOperatorInitilizer, MultiStreamOperator, StreamInitializerOperator
|
39 |
+
|
40 |
+
from .operators import (
|
41 |
+
ApplyValueOperatorsField,
|
42 |
+
ApplyStreamOperatorsField,
|
43 |
+
SplitByValue,
|
44 |
+
MergeStreams,
|
45 |
+
FlattenInstances,
|
46 |
+
)
|
47 |
|
48 |
import evaluate
|
49 |
import datasets
|
50 |
|
51 |
+
from datasets import (
|
52 |
+
Features,
|
53 |
+
Value,
|
54 |
+
Sequence,
|
55 |
+
)
|
56 |
+
|
57 |
+
from dataclasses import field
|
58 |
+
from typing import List, Union, Dict, Optional, Generator, Any, Iterable
|
59 |
+
|
60 |
+
|
61 |
+
class MultiStreamScoreMean(MultiStreamOperator):
|
62 |
+
def aggegate_results(self, multi_stream: MultiStream):
|
63 |
+
scores = []
|
64 |
+
for stream in multi_stream.values():
|
65 |
+
instance = stream.peak()
|
66 |
+
scores.append(instance["score"]["global"]["score"])
|
67 |
+
|
68 |
+
from statistics import mean
|
69 |
+
|
70 |
+
return mean(scores)
|
71 |
+
|
72 |
+
def spread_results(self, stream: Stream, score: float):
|
73 |
+
for instance in stream:
|
74 |
+
instance["score"]["global"]["groups_mean_score"] = score
|
75 |
+
yield instance
|
76 |
+
|
77 |
+
def process(self, multi_stream: MultiStream) -> MultiStream:
|
78 |
+
mean_score = self.aggegate_results(multi_stream)
|
79 |
+
|
80 |
+
result = {}
|
81 |
+
for stream_name, stream in multi_stream.items():
|
82 |
+
result[stream_name] = Stream(self.spread_results, gen_kwargs={"stream": stream, "score": mean_score})
|
83 |
+
|
84 |
+
return MultiStream(result)
|
85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
+
class FromPredictionsAndOriginalData(StreamInitializerOperator):
|
88 |
+
def zip(self, predictions, references):
|
89 |
+
for prediction, original in zip(predictions, references):
|
90 |
+
yield {**original, "prediction": prediction}
|
91 |
+
|
92 |
+
def process(self, predictions: List[str], references: Iterable, split_name: str = "all") -> MultiStream:
|
93 |
+
return MultiStream(
|
94 |
+
{split_name: Stream(self.zip, gen_kwargs={"predictions": predictions, "references": references})}
|
95 |
+
)
|
96 |
+
|
97 |
+
|
98 |
+
from .schema import UNITXT_DATASET_SCHEMA
|
99 |
+
|
100 |
+
|
101 |
+
class MetricRecipe(SequntialOperatorInitilizer):
|
102 |
+
def prepare(self):
|
103 |
+
self.steps = [
|
104 |
+
FromPredictionsAndOriginalData(),
|
105 |
+
ApplyValueOperatorsField(
|
106 |
+
value_field="prediction", operators_field="processors", default_operators=["to_string"]
|
107 |
+
),
|
108 |
+
SplitByValue(["group"]),
|
109 |
+
ApplyStreamOperatorsField(
|
110 |
+
"metrics",
|
111 |
+
reversed=True,
|
112 |
+
),
|
113 |
+
MultiStreamScoreMean(),
|
114 |
+
MergeStreams(),
|
115 |
+
]
|
116 |
+
|
117 |
+
|
118 |
+
UNITXT_METRIC_SCHEMA = Features({"predictions": Value("string"), "references": dict(UNITXT_DATASET_SCHEMA)})
|
119 |
+
|
120 |
+
|
121 |
+
# @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
122 |
+
class UnitextMetric(evaluate.Metric):
|
123 |
def _info(self):
|
|
|
124 |
return evaluate.MetricInfo(
|
125 |
+
description="_DESCRIPTION",
|
126 |
+
citation="_CITATION",
|
127 |
+
# inputs_description=_KWARGS_DESCRIPTION,
|
128 |
+
features=UNITXT_METRIC_SCHEMA,
|
129 |
+
codebase_urls=["https://"],
|
130 |
+
reference_urls=[
|
131 |
+
"https://",
|
132 |
+
"https://",
|
133 |
+
],
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
)
|
135 |
|
136 |
+
def _compute(self, predictions: List[str], references: Iterable, flatten: bool = False, split_name: str = "all"):
|
137 |
+
recipe = MetricRecipe()
|
138 |
+
|
139 |
+
multi_stream = recipe(predictions=predictions, references=references, split_name=split_name)
|
140 |
+
|
141 |
+
if flatten:
|
142 |
+
operator = FlattenInstances()
|
143 |
+
multi_stream = operator(multi_stream)
|
144 |
+
|
145 |
+
stream = multi_stream[split_name]
|
146 |
+
|
147 |
+
return list(stream)
|