cdminix commited on
Commit
1b737b4
·
1 Parent(s): 1022f4b

fixed token-label alignment

Browse files
Files changed (1) hide show
  1. iwslt2011.py +17 -16
iwslt2011.py CHANGED
@@ -146,7 +146,7 @@ class IWSLT11(datasets.GeneratorBasedBuilder):
146
  features=datasets.Features(
147
  {
148
  "ids": datasets.Sequence(datasets.Value("int32")),
149
- "tokens": datasets.Sequence(datasets.Value("string")),
150
  "labels": datasets.Sequence(
151
  datasets.features.ClassLabel(
152
  names=[p.name for p in self.config.punctuation]
@@ -194,12 +194,12 @@ class IWSLT11(datasets.GeneratorBasedBuilder):
194
  ]
195
  else:
196
  return [
197
- datasets.SplitGenerator(
198
- name=datasets.Split.TRAIN,
199
- gen_kwargs={
200
- "filepath": files["train"]
201
- },
202
- ),
203
  datasets.SplitGenerator(
204
  name=datasets.Split.VALIDATION,
205
  gen_kwargs={
@@ -252,23 +252,24 @@ class IWSLT11(datasets.GeneratorBasedBuilder):
252
  )
253
  ids = apply_window(np.arange(len(tokens)))
254
  tokens = apply_window(tokens)
 
255
  labels = apply_window(labels)
256
- for i, (ids, tokens, labels) in enumerate(zip(ids, tokens, labels)):
257
  if self.config.tokenizer is None:
258
  raise ValueError('tokenizer argument has to be passed to load_dataset')
259
  else:
260
- tokenized = self.config.tokenizer([tokens.tolist()], is_split_into_words=True, return_offsets_mapping=True, padding=True, truncation=True)
261
- offsets = np.array(tokenized['offset_mapping'][0])
262
  enc_labels = np.array([self.config.label_subword.name]*len(offsets), dtype=object)
263
- # todo: check if performance changes if in-word is set to NONE
264
- enc_labels[(offsets[:,0] == 0) & (offsets[:,1] != 0)] = [l.name for l in labels]
265
- #print(enc_labels)
266
- # not needed as long as the same tokenizer is used later?
267
- # tokens = {k:v[0] for k,v in tokenized if k != 'offset_mapping'}
268
  labels = enc_labels
269
  yield i, {
270
  "ids": ids,
271
- "tokens": tokens,
272
  "labels": labels,
273
  }
274
  logging.info(f"Loaded number of tokens = {token_len}")
 
146
  features=datasets.Features(
147
  {
148
  "ids": datasets.Sequence(datasets.Value("int32")),
149
+ "tokens": datasets.Sequence(datasets.Value("int32")),
150
  "labels": datasets.Sequence(
151
  datasets.features.ClassLabel(
152
  names=[p.name for p in self.config.punctuation]
 
194
  ]
195
  else:
196
  return [
197
+ # datasets.SplitGenerator(
198
+ # name=datasets.Split.TRAIN,
199
+ # gen_kwargs={
200
+ # "filepath": files["train"]
201
+ # },
202
+ # ),
203
  datasets.SplitGenerator(
204
  name=datasets.Split.VALIDATION,
205
  gen_kwargs={
 
252
  )
253
  ids = apply_window(np.arange(len(tokens)))
254
  tokens = apply_window(tokens)
255
+ tokens = self.config.tokenizer([t.tolist() for t in tokens], is_split_into_words=True, return_offsets_mapping=True, padding=True, truncation=True)
256
  labels = apply_window(labels)
257
+ for i, (ids, labels) in enumerate(zip(ids, labels)):
258
  if self.config.tokenizer is None:
259
  raise ValueError('tokenizer argument has to be passed to load_dataset')
260
  else:
261
+ words = tokens[i].words
262
+ offsets = np.array(tokens['offset_mapping'][i])
263
  enc_labels = np.array([self.config.label_subword.name]*len(offsets), dtype=object)
264
+ count = 0
265
+ for j, word_id in enumerate(words):
266
+ if word_id is not None and (j == 0 or words[j-1] != word_id):
267
+ enc_labels[j] = labels[count].name
268
+ count += 1
269
  labels = enc_labels
270
  yield i, {
271
  "ids": ids,
272
+ "tokens": tokens['input_ids'][i],
273
  "labels": labels,
274
  }
275
  logging.info(f"Loaded number of tokens = {token_len}")