system HF staff commited on
Commit
95dbac8
1 Parent(s): d6ef070

import from S3

Browse files
Files changed (1) hide show
  1. mgb1.py +138 -0
mgb1.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The MGB Challenge Dataset."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import logging
6
+
7
+ import datasets
8
+ from collections import deque
9
+
10
+
11
+ _CITATION = """\
12
+ @inproceedings{bell2015mgb,
13
+ title={The MGB challenge: Evaluating multi-genre broadcast media recognition},
14
+ author={Bell, Peter and Gales, Mark JF and Hain, Thomas and Kilgour, Jonathan and Lanchantin, Pierre and Liu, Xunying and McParland, Andrew and Renals, Steve and Saz, Oscar and Wester, Mirjam and others},
15
+ booktitle={2015 IEEE Workshop on Automatic Speech Recognition and Understanding (ASRU)},
16
+ pages={687--693},
17
+ year={2015},
18
+ organization={IEEE}
19
+ }
20
+
21
+ """
22
+
23
+ _DESCRIPTION = """\
24
+ The first edition of the Multi-Genre Broadcast (MGB-1) Challenge is an evaluation of speech recognition, speaker diarization, and lightly supervised alignment using TV recordings in English.
25
+
26
+ The speech data is broad and multi-genre, spanning the whole range of TV output, and represents a challenging task for speech technology.
27
+
28
+ In 2015, the challenge used data from the British Broadcasting Corporation (BBC).
29
+ """
30
+
31
+ _LM_FILE = "lm.txt"
32
+ _TRAINING_FILE = "train.txt"
33
+ _DEV_FILE = "dev.txt"
34
+
35
+ class MGB_1Config(datasets.BuilderConfig):
36
+ """The MGB-1 Dataset."""
37
+
38
+ def __init__(self, with_dots=False, **kwargs):
39
+ """BuilderConfig for MGB-1.
40
+ Args:
41
+ **kwargs: keyword arguments forwarded to super.
42
+ """
43
+ self.with_dots = with_dots
44
+ super(MGB_1Config, self).__init__(**kwargs)
45
+
46
+
47
+ class MGB_1(datasets.GeneratorBasedBuilder):
48
+ """The WNUT 17 Emerging Entities Dataset."""
49
+
50
+ BUILDER_CONFIG_CLASS = MGB_1Config
51
+
52
+ def _info(self):
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=datasets.Features(
56
+ {
57
+ "id": datasets.Value("string"),
58
+ "words": datasets.Sequence(datasets.Value("string")),
59
+ "punctuation": datasets.Sequence(datasets.Value("string")),
60
+ }
61
+ ),
62
+ supervised_keys=None,
63
+ homepage="http://www.mgb-challenge.org/MGB-1.html",
64
+ citation=_CITATION,
65
+ )
66
+
67
+ def _split_generators(self, dl_manager):
68
+ """Returns SplitGenerators."""
69
+ urls_to_download = {
70
+ "lm": f"{_URL}{_LM_FILE}",
71
+ "train": f"{_URL}{_TRAINING_FILE}",
72
+ "dev": f"{_URL}{_DEV_FILE}",
73
+ }
74
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
75
+
76
+ return [
77
+ datasets.SplitGenerator(name=datasets.Split('lm'), gen_kwargs={"filepath": downloaded_files["lm"], "start_index": 0}),
78
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], "start_index": 1}),
79
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"], "start_index": 1}),
80
+ ]
81
+
82
+ def _generate_examples(self, filepath, start_index):
83
+ logging.info("⏳ Generating examples from = %s", filepath)
84
+ with open(filepath, encoding="utf-8") as f:
85
+ current_tokens = deque()
86
+ current_labels = deque()
87
+ sentence_counter = 0
88
+ for row in f:
89
+ row = row.rstrip()
90
+ if row:
91
+ tokens = row.lower().split(" ")
92
+ tokens = tokens[start_index:]
93
+ punct = [
94
+ '<full_stop>',
95
+ '<dots>',
96
+ '<comma>',
97
+ '<exclamation_mark>',
98
+ '<question_mark>'
99
+ ]
100
+ if tokens[0] in punct:
101
+ # we cannot interpret lines starting with punctuation
102
+ continue
103
+ prev_tok = None
104
+ for i, t in enumerate(tokens):
105
+ if t in punct and (i == 0 or prev_tok not in punct):
106
+ if not self.config.with_dots and t == '<dots>':
107
+ current_labels.append('<full_stop>')
108
+ else:
109
+ current_labels.append(t)
110
+ elif t not in punct:
111
+ current_tokens.append(t)
112
+ if i == len(tokens) - 1 or tokens[i+1] not in punct:
113
+ current_labels.append('<none>')
114
+ prev_tok = t
115
+ # New sentence
116
+ if not current_tokens:
117
+ # Consecutive empty lines will cause empty sentences
118
+ continue
119
+ assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
120
+ sentence = (
121
+ sentence_counter,
122
+ {
123
+ "id": str(sentence_counter),
124
+ "words": current_tokens,
125
+ "punctuation": current_labels,
126
+ },
127
+ )
128
+ sentence_counter += 1
129
+ current_tokens = deque()
130
+ current_labels = deque()
131
+ yield sentence
132
+ # Don't forget last sentence in dataset 🧐
133
+ if current_tokens:
134
+ yield sentence_counter, {
135
+ "id": str(sentence_counter),
136
+ "words": current_tokens,
137
+ "punctuation": current_labels,
138
+ }