Create emnist.py
Browse files
emnist.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import datasets
|
3 |
+
import scipy.io as sio
|
4 |
+
import os
|
5 |
+
|
6 |
+
|
7 |
+
class EMNISTConfig(datasets.BuilderConfig):
|
8 |
+
def __init__(self, variant, **kwargs):
|
9 |
+
super(EMNISTConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
|
10 |
+
self.variant = variant
|
11 |
+
|
12 |
+
|
13 |
+
class EMNIST(datasets.GeneratorBasedBuilder):
|
14 |
+
BUILDER_CONFIGS = [
|
15 |
+
EMNISTConfig(name="byclass", variant="byclass"),
|
16 |
+
EMNISTConfig(name="bymerge", variant="bymerge"),
|
17 |
+
EMNISTConfig(name="balanced", variant="balanced"),
|
18 |
+
EMNISTConfig(name="letters", variant="letters"),
|
19 |
+
EMNISTConfig(name="digits", variant="digits"),
|
20 |
+
EMNISTConfig(name="mnist", variant="mnist"),
|
21 |
+
]
|
22 |
+
|
23 |
+
def _info(self):
|
24 |
+
variant = self.config.variant
|
25 |
+
if variant == "byclass":
|
26 |
+
num_classes = 62
|
27 |
+
elif variant == "bymerge":
|
28 |
+
num_classes = 47
|
29 |
+
elif variant == "balanced":
|
30 |
+
num_classes = 47
|
31 |
+
elif variant == "letters":
|
32 |
+
num_classes = 26
|
33 |
+
elif variant == "digits":
|
34 |
+
num_classes = 10
|
35 |
+
elif variant == "mnist":
|
36 |
+
num_classes = 10
|
37 |
+
|
38 |
+
return datasets.DatasetInfo(
|
39 |
+
description="The EMNIST dataset is a set of handwritten character and digit samples derived from the NIST Special Database 19. It provides multiple splits designed for different classification tasks, formatted to be directly compatible with the original MNIST dataset.",
|
40 |
+
features=datasets.Features(
|
41 |
+
{
|
42 |
+
"image": datasets.Image(),
|
43 |
+
"label": datasets.ClassLabel(num_classes=num_classes)
|
44 |
+
}
|
45 |
+
),
|
46 |
+
supervised_keys=("image", "label"),
|
47 |
+
homepage="https://www.nist.gov/itl/products-and-services/emnist-dataset",
|
48 |
+
citation="""@article{cohen2017emnist,
|
49 |
+
title={EMNIST: Extending MNIST to handwritten letters},
|
50 |
+
author={Gregory Cohen and Saeed Afshar and Jonathan Tapson and André van Schaik},
|
51 |
+
journal={arXiv preprint arXiv:1702.05373},
|
52 |
+
year={2017},
|
53 |
+
url={https://arxiv.org/abs/1702.05373}}"""
|
54 |
+
)
|
55 |
+
|
56 |
+
def _split_generators(self, dl_manager):
|
57 |
+
variant = self.config.variant
|
58 |
+
# Download and extract the matlab.zip file
|
59 |
+
extracted_path = dl_manager.download_and_extract("https://biometrics.nist.gov/cs_links/EMNIST/matlab.zip")
|
60 |
+
|
61 |
+
# The extracted_path now points to the directory containing "matlab" folder
|
62 |
+
# The .mat files are likely in extracted_path/matlab/
|
63 |
+
mat_dir = os.path.join(extracted_path, "matlab")
|
64 |
+
mat_file = f"emnist-{variant}.mat"
|
65 |
+
mat_path = os.path.join(mat_dir, mat_file)
|
66 |
+
|
67 |
+
return [
|
68 |
+
datasets.SplitGenerator(
|
69 |
+
name=datasets.Split.TRAIN,
|
70 |
+
gen_kwargs={"mat_path": mat_path, "split": "train"}
|
71 |
+
),
|
72 |
+
datasets.SplitGenerator(
|
73 |
+
name=datasets.Split.TEST,
|
74 |
+
gen_kwargs={"mat_path": mat_path, "split": "test"}
|
75 |
+
),
|
76 |
+
]
|
77 |
+
|
78 |
+
def _generate_examples(self, mat_path, split):
|
79 |
+
data = sio.loadmat(mat_path)
|
80 |
+
dataset = data['dataset'][0, 0]
|
81 |
+
subset = dataset[split][0, 0]
|
82 |
+
|
83 |
+
images = subset['images']
|
84 |
+
labels = subset['labels']
|
85 |
+
|
86 |
+
images = np.array(images, dtype=np.uint8).reshape(-1, 28, 28)
|
87 |
+
labels = np.array(labels, dtype=np.int64).flatten()
|
88 |
+
|
89 |
+
for idx, (img, lbl) in enumerate(zip(images, labels)):
|
90 |
+
yield idx, {
|
91 |
+
"image": img,
|
92 |
+
"label": int(lbl)
|
93 |
+
}
|