arnaud commited on
Commit
9ef2a3b
·
1 Parent(s): 64f83a5

First commit

Browse files
Files changed (3) hide show
  1. app.py +10 -0
  2. requirements.txt +3 -0
  3. seqeval.py +158 -0
app.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import evaluate
3
+ from evaluate.utils import launch_gradio_widget
4
+
5
+
6
+ sys.path = [p for p in sys.path if p != "/home/user/app"]
7
+ module = evaluate.load("seqeval")
8
+ sys.path = ["/home/user/app"] + sys.path
9
+
10
+ launch_gradio_widget(module)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ git+https://github.com/huggingface/evaluate@eef73da23f52e3cc40e92a50cffeab538f9dd99b
2
+ git+https://github.com/huggingface/transformers.git
3
+ seqeval
seqeval.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ seqeval metric. """
15
+
16
+ import importlib
17
+ from typing import List, Optional, Union
18
+
19
+ import datasets
20
+ from seqeval.metrics import accuracy_score, classification_report
21
+
22
+ import evaluate
23
+
24
+
25
+ _CITATION = """\
26
+ @inproceedings{ramshaw-marcus-1995-text,
27
+ title = "Text Chunking using Transformation-Based Learning",
28
+ author = "Ramshaw, Lance and
29
+ Marcus, Mitch",
30
+ booktitle = "Third Workshop on Very Large Corpora",
31
+ year = "1995",
32
+ url = "https://www.aclweb.org/anthology/W95-0107",
33
+ }
34
+ @misc{seqeval,
35
+ title={{seqeval}: A Python framework for sequence labeling evaluation},
36
+ url={https://github.com/chakki-works/seqeval},
37
+ note={Software available from https://github.com/chakki-works/seqeval},
38
+ author={Hiroki Nakayama},
39
+ year={2018},
40
+ }
41
+ """
42
+
43
+ _DESCRIPTION = """\
44
+ seqeval is a Python framework for sequence labeling evaluation.
45
+ seqeval can evaluate the performance of chunking tasks such as named-entity recognition, part-of-speech tagging, semantic role labeling and so on.
46
+ This is well-tested by using the Perl script conlleval, which can be used for
47
+ measuring the performance of a system that has processed the CoNLL-2000 shared task data.
48
+ seqeval supports following formats:
49
+ IOB1
50
+ IOB2
51
+ IOE1
52
+ IOE2
53
+ IOBES
54
+ See the [README.md] file at https://github.com/chakki-works/seqeval for more information.
55
+ """
56
+
57
+ _KWARGS_DESCRIPTION = """
58
+ Produces labelling scores along with its sufficient statistics
59
+ from a source against one or more references.
60
+ Args:
61
+ predictions: List of List of predicted labels (Estimated targets as returned by a tagger)
62
+ references: List of List of reference labels (Ground truth (correct) target values)
63
+ suffix: True if the IOB prefix is after type, False otherwise. default: False
64
+ scheme: Specify target tagging scheme. Should be one of ["IOB1", "IOB2", "IOE1", "IOE2", "IOBES", "BILOU"].
65
+ default: None
66
+ mode: Whether to count correct entity labels with incorrect I/B tags as true positives or not.
67
+ If you want to only count exact matches, pass mode="strict". default: None.
68
+ sample_weight: Array-like of shape (n_samples,), weights for individual samples. default: None
69
+ zero_division: Which value to substitute as a metric value when encountering zero division. Should be on of 0, 1,
70
+ "warn". "warn" acts as 0, but the warning is raised.
71
+ Returns:
72
+ 'scores': dict. Summary of the scores for overall and per type
73
+ Overall:
74
+ 'accuracy': accuracy,
75
+ 'precision': precision,
76
+ 'recall': recall,
77
+ 'f1': F1 score, also known as balanced F-score or F-measure,
78
+ Per type:
79
+ 'precision': precision,
80
+ 'recall': recall,
81
+ 'f1': F1 score, also known as balanced F-score or F-measure
82
+ Examples:
83
+ >>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
84
+ >>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
85
+ >>> seqeval = evaluate.load("seqeval")
86
+ >>> results = seqeval.compute(predictions=predictions, references=references)
87
+ >>> print(list(results.keys()))
88
+ ['MISC', 'PER', 'overall_precision', 'overall_recall', 'overall_f1', 'overall_accuracy']
89
+ >>> print(results["overall_f1"])
90
+ 0.5
91
+ >>> print(results["PER"]["f1"])
92
+ 1.0
93
+ """
94
+
95
+
96
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
97
+ class Seqeval(evaluate.Metric):
98
+ def _info(self):
99
+ return evaluate.MetricInfo(
100
+ description=_DESCRIPTION,
101
+ citation=_CITATION,
102
+ homepage="https://github.com/chakki-works/seqeval",
103
+ inputs_description=_KWARGS_DESCRIPTION,
104
+ features=datasets.Features(
105
+ {
106
+ "predictions": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
107
+ "references": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
108
+ }
109
+ ),
110
+ codebase_urls=["https://github.com/chakki-works/seqeval"],
111
+ reference_urls=["https://github.com/chakki-works/seqeval"],
112
+ )
113
+
114
+ def _compute(
115
+ self,
116
+ predictions,
117
+ references,
118
+ suffix: bool = False,
119
+ scheme: Optional[str] = None,
120
+ mode: Optional[str] = None,
121
+ sample_weight: Optional[List[int]] = None,
122
+ zero_division: Union[str, int] = "warn",
123
+ ):
124
+ if scheme is not None:
125
+ try:
126
+ scheme_module = importlib.import_module("seqeval.scheme")
127
+ scheme = getattr(scheme_module, scheme)
128
+ except AttributeError:
129
+ raise ValueError(f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {scheme}")
130
+ report = classification_report(
131
+ y_true=references,
132
+ y_pred=predictions,
133
+ suffix=suffix,
134
+ output_dict=True,
135
+ scheme=scheme,
136
+ mode=mode,
137
+ sample_weight=sample_weight,
138
+ zero_division=zero_division,
139
+ )
140
+ report.pop("macro avg")
141
+ report.pop("weighted avg")
142
+ overall_score = report.pop("micro avg")
143
+
144
+ scores = {
145
+ type_name: {
146
+ "precision": score["precision"],
147
+ "recall": score["recall"],
148
+ "f1": score["f1-score"],
149
+ "number": score["support"],
150
+ }
151
+ for type_name, score in report.items()
152
+ }
153
+ scores["overall_precision"] = overall_score["precision"]
154
+ scores["overall_recall"] = overall_score["recall"]
155
+ scores["overall_f1"] = overall_score["f1-score"]
156
+ scores["overall_accuracy"] = accuracy_score(y_true=references, y_pred=predictions)
157
+
158
+ return scores