init
Browse files- data/tweet_ner7/test.jsonl +0 -0
- data/tweet_ner7/train.jsonl +0 -0
- data/tweet_ner7/validation.jsonl +0 -0
- super_tweet_eval.py +12 -89
data/tweet_ner7/test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/tweet_ner7/train.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/tweet_ner7/validation.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
super_tweet_eval.py
CHANGED
@@ -1,9 +1,5 @@
|
|
1 |
"""The SuperTweetEval benchmark."""
|
2 |
-
|
3 |
-
|
4 |
import json
|
5 |
-
import os
|
6 |
-
|
7 |
import datasets
|
8 |
|
9 |
|
@@ -102,7 +98,7 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
|
|
102 |
description=_TWEET_TOPIC_DESCRIPTION,
|
103 |
citation=_TWEET_TOPIC_CITATION,
|
104 |
features=["text", "label_list", "id", "date"],
|
105 |
-
data_url="https://
|
106 |
),
|
107 |
SuperTweetEvalConfig(
|
108 |
name="tweet_ner7",
|
@@ -142,87 +138,14 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
|
|
142 |
)
|
143 |
|
144 |
def _split_generators(self, dl_manager):
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
),
|
157 |
-
]
|
158 |
-
return [
|
159 |
-
datasets.SplitGenerator(
|
160 |
-
name=datasets.Split.TRAIN,
|
161 |
-
gen_kwargs={
|
162 |
-
"data_file": os.path.join(dl_dir, "train.jsonl"),
|
163 |
-
"split": datasets.Split.TRAIN,
|
164 |
-
},
|
165 |
-
),
|
166 |
-
datasets.SplitGenerator(
|
167 |
-
name=datasets.Split.VALIDATION,
|
168 |
-
gen_kwargs={
|
169 |
-
"data_file": os.path.join(dl_dir, "val.jsonl"),
|
170 |
-
"split": datasets.Split.VALIDATION,
|
171 |
-
},
|
172 |
-
),
|
173 |
-
datasets.SplitGenerator(
|
174 |
-
name=datasets.Split.TEST,
|
175 |
-
gen_kwargs={
|
176 |
-
"data_file": os.path.join(dl_dir, "test.jsonl"),
|
177 |
-
"split": datasets.Split.TEST,
|
178 |
-
},
|
179 |
-
),
|
180 |
-
]
|
181 |
-
|
182 |
-
def _generate_examples(self, data_file, split):
|
183 |
-
with open(data_file, encoding="utf-8") as f:
|
184 |
-
for line in f:
|
185 |
-
row = json.loads(line)
|
186 |
-
|
187 |
-
if self.config.name == "multirc":
|
188 |
-
paragraph = row["passage"]
|
189 |
-
for question in paragraph["questions"]:
|
190 |
-
for answer in question["answers"]:
|
191 |
-
label = answer.get("label")
|
192 |
-
key = "%s_%s_%s" % (row["idx"], question["idx"], answer["idx"])
|
193 |
-
yield key, {
|
194 |
-
"paragraph": paragraph["text"],
|
195 |
-
"question": question["question"],
|
196 |
-
"answer": answer["text"],
|
197 |
-
"label": -1 if label is None else _cast_label(bool(label)),
|
198 |
-
"idx": {"paragraph": row["idx"], "question": question["idx"], "answer": answer["idx"]},
|
199 |
-
}
|
200 |
-
elif self.config.name == "record":
|
201 |
-
passage = row["passage"]
|
202 |
-
entity_texts, entity_spans = _get_record_entities(passage)
|
203 |
-
for qa in row["qas"]:
|
204 |
-
yield qa["idx"], {
|
205 |
-
"passage": passage["text"],
|
206 |
-
"query": qa["query"],
|
207 |
-
"entities": entity_texts,
|
208 |
-
"entity_spans": entity_spans,
|
209 |
-
"answers": _get_record_answers(qa),
|
210 |
-
"idx": {"passage": row["idx"], "query": qa["idx"]},
|
211 |
-
}
|
212 |
-
else:
|
213 |
-
if self.config.name.startswith("wsc"):
|
214 |
-
row.update(row["target"])
|
215 |
-
example = {feature: row[feature] for feature in self.config.features}
|
216 |
-
if self.config.name == "wsc.fixed":
|
217 |
-
example = _fix_wst(example)
|
218 |
-
example["idx"] = row["idx"]
|
219 |
-
|
220 |
-
if "label" in row:
|
221 |
-
if self.config.name == "copa":
|
222 |
-
example["label"] = "choice2" if row["label"] else "choice1"
|
223 |
-
else:
|
224 |
-
example["label"] = _cast_label(row["label"])
|
225 |
-
else:
|
226 |
-
assert split == datasets.Split.TEST, row
|
227 |
-
example["label"] = -1
|
228 |
-
yield example["idx"], example
|
|
|
1 |
"""The SuperTweetEval benchmark."""
|
|
|
|
|
2 |
import json
|
|
|
|
|
3 |
import datasets
|
4 |
|
5 |
|
|
|
98 |
description=_TWEET_TOPIC_DESCRIPTION,
|
99 |
citation=_TWEET_TOPIC_CITATION,
|
100 |
features=["text", "label_list", "id", "date"],
|
101 |
+
data_url="https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tweet_topic",
|
102 |
),
|
103 |
SuperTweetEvalConfig(
|
104 |
name="tweet_ner7",
|
|
|
138 |
)
|
139 |
|
140 |
def _split_generators(self, dl_manager):
|
141 |
+
downloaded_file = dl_manager.download_and_extract({s: f"{self.config.data_url}/{s}.jsonl" for s in ["train", "test", "validation"]})
|
142 |
+
return [datasets.SplitGenerator(name=s, gen_kwargs={"filepath": downloaded_file[s]}) for s in ["train", "test", "validation"]]
|
143 |
+
|
144 |
+
def _generate_examples(self, filepath):
|
145 |
+
_key = 0
|
146 |
+
with open(filepath, encoding="utf-8") as f:
|
147 |
+
_list = [i for i in f.read().split('\n') if len(i) > 0]
|
148 |
+
for i in _list:
|
149 |
+
data = json.loads(i)
|
150 |
+
yield _key, data
|
151 |
+
_key += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|