fixed token-label alignment
Browse files- iwslt2011.py +17 -16
iwslt2011.py
CHANGED
@@ -146,7 +146,7 @@ class IWSLT11(datasets.GeneratorBasedBuilder):
|
|
146 |
features=datasets.Features(
|
147 |
{
|
148 |
"ids": datasets.Sequence(datasets.Value("int32")),
|
149 |
-
"tokens": datasets.Sequence(datasets.Value("
|
150 |
"labels": datasets.Sequence(
|
151 |
datasets.features.ClassLabel(
|
152 |
names=[p.name for p in self.config.punctuation]
|
@@ -194,12 +194,12 @@ class IWSLT11(datasets.GeneratorBasedBuilder):
|
|
194 |
]
|
195 |
else:
|
196 |
return [
|
197 |
-
datasets.SplitGenerator(
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
),
|
203 |
datasets.SplitGenerator(
|
204 |
name=datasets.Split.VALIDATION,
|
205 |
gen_kwargs={
|
@@ -252,23 +252,24 @@ class IWSLT11(datasets.GeneratorBasedBuilder):
|
|
252 |
)
|
253 |
ids = apply_window(np.arange(len(tokens)))
|
254 |
tokens = apply_window(tokens)
|
|
|
255 |
labels = apply_window(labels)
|
256 |
-
for i, (ids,
|
257 |
if self.config.tokenizer is None:
|
258 |
raise ValueError('tokenizer argument has to be passed to load_dataset')
|
259 |
else:
|
260 |
-
|
261 |
-
offsets = np.array(
|
262 |
enc_labels = np.array([self.config.label_subword.name]*len(offsets), dtype=object)
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
labels = enc_labels
|
269 |
yield i, {
|
270 |
"ids": ids,
|
271 |
-
"tokens": tokens,
|
272 |
"labels": labels,
|
273 |
}
|
274 |
logging.info(f"Loaded number of tokens = {token_len}")
|
|
|
146 |
features=datasets.Features(
|
147 |
{
|
148 |
"ids": datasets.Sequence(datasets.Value("int32")),
|
149 |
+
"tokens": datasets.Sequence(datasets.Value("int32")),
|
150 |
"labels": datasets.Sequence(
|
151 |
datasets.features.ClassLabel(
|
152 |
names=[p.name for p in self.config.punctuation]
|
|
|
194 |
]
|
195 |
else:
|
196 |
return [
|
197 |
+
# datasets.SplitGenerator(
|
198 |
+
# name=datasets.Split.TRAIN,
|
199 |
+
# gen_kwargs={
|
200 |
+
# "filepath": files["train"]
|
201 |
+
# },
|
202 |
+
# ),
|
203 |
datasets.SplitGenerator(
|
204 |
name=datasets.Split.VALIDATION,
|
205 |
gen_kwargs={
|
|
|
252 |
)
|
253 |
ids = apply_window(np.arange(len(tokens)))
|
254 |
tokens = apply_window(tokens)
|
255 |
+
tokens = self.config.tokenizer([t.tolist() for t in tokens], is_split_into_words=True, return_offsets_mapping=True, padding=True, truncation=True)
|
256 |
labels = apply_window(labels)
|
257 |
+
for i, (ids, labels) in enumerate(zip(ids, labels)):
|
258 |
if self.config.tokenizer is None:
|
259 |
raise ValueError('tokenizer argument has to be passed to load_dataset')
|
260 |
else:
|
261 |
+
words = tokens[i].words
|
262 |
+
offsets = np.array(tokens['offset_mapping'][i])
|
263 |
enc_labels = np.array([self.config.label_subword.name]*len(offsets), dtype=object)
|
264 |
+
count = 0
|
265 |
+
for j, word_id in enumerate(words):
|
266 |
+
if word_id is not None and (j == 0 or words[j-1] != word_id):
|
267 |
+
enc_labels[j] = labels[count].name
|
268 |
+
count += 1
|
269 |
labels = enc_labels
|
270 |
yield i, {
|
271 |
"ids": ids,
|
272 |
+
"tokens": tokens['input_ids'][i],
|
273 |
"labels": labels,
|
274 |
}
|
275 |
logging.info(f"Loaded number of tokens = {token_len}")
|