rdiehlmartinez commited on
Commit
3f5de1d
1 Parent(s): 917f59b

v1 of dataset loading script

Browse files
Files changed (1) hide show
  1. BabyLM.py +99 -0
BabyLM.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+ _DESCRIPTION = """\
4
+ Dataset for the shared baby language modeling task.
5
+ The goal is to train a language model from scratch on this data which represents
6
+ roughly the amount of text and speech data a young child observes.
7
+ """
8
+
9
+ _HOMEPAGE = "https://babylm.github.io"
10
+
11
+ class BabyLM(datasets.GeneratorBasedBuilder):
12
+
13
+ BUILDER_CONFIGS = [
14
+ datasets.BuilderConfig(
15
+ name="strict_small",
16
+ description="Small version of the dataset with 10M words",
17
+ version="1.0.0",
18
+ data_dir="10M",
19
+ features=["text"]
20
+ ),
21
+ datasets.BuilderConfig(
22
+ name="strict",
23
+ description="Full version of the dataset with 100M words",
24
+ version="1.0.0",
25
+ data_dir="100M",
26
+ features=["text"]
27
+ )
28
+ ]
29
+
30
+ DEFAULT_CONFIG_NAME = "strict_small"
31
+
32
+ def _info(self):
33
+ features = datasets.Features(
34
+ {
35
+ "text": datasets.Value("string"),
36
+ }
37
+ )
38
+ return datasets.DatasetInfo(
39
+ # This is the description that will appear on the datasets page.
40
+ description=_DESCRIPTION,
41
+ features=features, # Here we define them above because they are different between the two configurations
42
+ homepage=_HOMEPAGE,
43
+ )
44
+
45
+
46
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
47
+ """
48
+ Returns data for different splits
49
+ """
50
+
51
+ if self.config.name == "strict_small":
52
+ train_data_dir = "10M"
53
+ else:
54
+ train_data_dir = "100M"
55
+
56
+ urls_to_download = {
57
+ "train": f"{train_data_dir}/*.txt",
58
+ "dev": "dev/*.txt",
59
+ "test": "test/*.txt"
60
+ }
61
+
62
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
63
+
64
+ return [
65
+ datasets.SplitGenerator(
66
+ name=datasets.Split.TRAIN,
67
+ gen_kwargs={
68
+ "split": "train",
69
+ "filepaths": downloaded_files["train"]}
70
+ ),
71
+ datasets.SplitGenerator(
72
+ name=datasets.Split.VALIDATION,
73
+ gen_kwargs={
74
+ "split": "dev",
75
+ "filepaths": downloaded_files["dev"]}
76
+ ),
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split.TEST,
79
+ gen_kwargs={
80
+ "split": "test",
81
+ "filepaths": downloaded_files["test"]
82
+ }
83
+ ),
84
+ ]
85
+
86
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
87
+ def _generate_examples(self, split, filepaths):
88
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
89
+
90
+ # the filepaths should be a list of filepaths
91
+ if isinstance(filepaths, str):
92
+ filepaths = [filepaths]
93
+
94
+ global_idx = 0
95
+
96
+ for filepath in filepaths:
97
+ with open(filepath, encoding="utf-8") as f:
98
+ for row in f:
99
+ yield global_idx, {"text": row}