sps44 commited on
Commit
722bcc5
·
1 Parent(s): 99185dd

fist dataset version

Browse files
data/food101-metadata.parquet.gzip ADDED
Binary file (389 kB). View file
 
data/food101_raw.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0747e577985e9886d13f7c7399c397282ffeb36dd36efa52504effbae4a05a94
3
+ size 4139803338
food101-enriched.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Dataset class for Food-101 dataset."""
16
+
17
+ import datasets
18
+ from datasets.tasks import ImageClassification
19
+
20
+ import pandas as pd
21
+
22
+ from pathlib import Path
23
+ from datasets import load_dataset
24
+
25
+
26
+ _BASE_URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
27
+
28
+ _METADATA_URLS = {
29
+ "train": "https://s3.amazonaws.com/datasets.huggingface.co/food101/meta/train.txt",
30
+ "test": "https://s3.amazonaws.com/datasets.huggingface.co/food101/meta/test.txt",
31
+ }
32
+
33
+ _HOMEPAGE = "https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/"
34
+
35
+ _DESCRIPTION = (
36
+ "This dataset consists of 101 food categories, with 101'000 images. For "
37
+ "each class, 250 manually reviewed test images are provided as well as 750"
38
+ " training images. On purpose, the training images were not cleaned, and "
39
+ "thus still contain some amount of noise. This comes mostly in the form of"
40
+ " intense colors and sometimes wrong labels. All images were rescaled to "
41
+ "have a maximum side length of 512 pixels."
42
+ )
43
+
44
+ _CITATION = """\
45
+ @inproceedings{bossard14,
46
+ title = {Food-101 -- Mining Discriminative Components with Random Forests},
47
+ author = {Bossard, Lukas and Guillaumin, Matthieu and Van Gool, Luc},
48
+ booktitle = {European Conference on Computer Vision},
49
+ year = {2014}
50
+ }
51
+ """
52
+
53
+ _LICENSE = """\
54
+ LICENSE AGREEMENT
55
+ =================
56
+ - The Food-101 data set consists of images from Foodspotting [1] which are not
57
+ property of the Federal Institute of Technology Zurich (ETHZ). Any use beyond
58
+ scientific fair use must be negociated with the respective picture owners
59
+ according to the Foodspotting terms of use [2].
60
+
61
+ [1] http://www.foodspotting.com/
62
+ [2] http://www.foodspotting.com/terms/
63
+ """
64
+
65
+ _NAMES = [
66
+ "apple_pie",
67
+ "baby_back_ribs",
68
+ "baklava",
69
+ "beef_carpaccio",
70
+ "beef_tartare",
71
+ "beet_salad",
72
+ "beignets",
73
+ "bibimbap",
74
+ "bread_pudding",
75
+ "breakfast_burrito",
76
+ "bruschetta",
77
+ "caesar_salad",
78
+ "cannoli",
79
+ "caprese_salad",
80
+ "carrot_cake",
81
+ "ceviche",
82
+ "cheesecake",
83
+ "cheese_plate",
84
+ "chicken_curry",
85
+ "chicken_quesadilla",
86
+ "chicken_wings",
87
+ "chocolate_cake",
88
+ "chocolate_mousse",
89
+ "churros",
90
+ "clam_chowder",
91
+ "club_sandwich",
92
+ "crab_cakes",
93
+ "creme_brulee",
94
+ "croque_madame",
95
+ "cup_cakes",
96
+ "deviled_eggs",
97
+ "donuts",
98
+ "dumplings",
99
+ "edamame",
100
+ "eggs_benedict",
101
+ "escargots",
102
+ "falafel",
103
+ "filet_mignon",
104
+ "fish_and_chips",
105
+ "foie_gras",
106
+ "french_fries",
107
+ "french_onion_soup",
108
+ "french_toast",
109
+ "fried_calamari",
110
+ "fried_rice",
111
+ "frozen_yogurt",
112
+ "garlic_bread",
113
+ "gnocchi",
114
+ "greek_salad",
115
+ "grilled_cheese_sandwich",
116
+ "grilled_salmon",
117
+ "guacamole",
118
+ "gyoza",
119
+ "hamburger",
120
+ "hot_and_sour_soup",
121
+ "hot_dog",
122
+ "huevos_rancheros",
123
+ "hummus",
124
+ "ice_cream",
125
+ "lasagna",
126
+ "lobster_bisque",
127
+ "lobster_roll_sandwich",
128
+ "macaroni_and_cheese",
129
+ "macarons",
130
+ "miso_soup",
131
+ "mussels",
132
+ "nachos",
133
+ "omelette",
134
+ "onion_rings",
135
+ "oysters",
136
+ "pad_thai",
137
+ "paella",
138
+ "pancakes",
139
+ "panna_cotta",
140
+ "peking_duck",
141
+ "pho",
142
+ "pizza",
143
+ "pork_chop",
144
+ "poutine",
145
+ "prime_rib",
146
+ "pulled_pork_sandwich",
147
+ "ramen",
148
+ "ravioli",
149
+ "red_velvet_cake",
150
+ "risotto",
151
+ "samosa",
152
+ "sashimi",
153
+ "scallops",
154
+ "seaweed_salad",
155
+ "shrimp_and_grits",
156
+ "spaghetti_bolognese",
157
+ "spaghetti_carbonara",
158
+ "spring_rolls",
159
+ "steak",
160
+ "strawberry_shortcake",
161
+ "sushi",
162
+ "tacos",
163
+ "takoyaki",
164
+ "tiramisu",
165
+ "tuna_tartare",
166
+ "waffles",
167
+ ]
168
+
169
+ _DATA_URLS = {
170
+ 'raw': "data/food101_raw.tar.gz",
171
+ 'metadata': 'data/food101-metadata.parquet.gzip'
172
+ }
173
+
174
+
175
+
176
+ class Food101Enriched(datasets.GeneratorBasedBuilder):
177
+ """Food101Enriched Data Set"""
178
+
179
+ BUILDER_CONFIGS = [
180
+ datasets.BuilderConfig(
181
+ name="enriched",
182
+ version=datasets.Version("1.0.0", ""),
183
+ description="Import of enriched Food 101 Data Set",
184
+ )
185
+ ]
186
+
187
+ def _info(self):
188
+ return datasets.DatasetInfo(
189
+ description=_DESCRIPTION,
190
+ features=datasets.Features(
191
+ {
192
+ "image": datasets.Value("string"),
193
+ "label": datasets.features.ClassLabel(names=_NAMES),
194
+ #"label_str": datasets.Value("string"),
195
+ "split": datasets.Value("string"),
196
+ }
197
+ ),
198
+ supervised_keys=("image", "label"),
199
+ homepage=_HOMEPAGE,
200
+ citation=_CITATION,
201
+ license=_LICENSE,
202
+ task_templates=[ImageClassification(image_column="image", label_column="label")],
203
+ )
204
+
205
+ def _split_generators(self, dl_manager):
206
+
207
+ archive_path = dl_manager.download_and_extract(_DATA_URLS['raw'])
208
+ metadata = pd.read_parquet(dl_manager.download(_DATA_URLS['metadata']))
209
+
210
+
211
+ return [
212
+ datasets.SplitGenerator(
213
+ name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path, 'metadata': metadata, "split": "train"}
214
+ ),
215
+ datasets.SplitGenerator(
216
+ name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, 'metadata': metadata, "split": "test"}
217
+ ),
218
+ ]
219
+
220
+ def _generate_examples(self, archive_path, metadata, split):
221
+ """This function returns the examples in the raw (text) form."""
222
+
223
+ if split == "train":
224
+ df = metadata[metadata['split']=='train']
225
+
226
+ if split == "test":
227
+ df = metadata[metadata['split']=='test']
228
+
229
+ if split == "all":
230
+ df = metadata
231
+
232
+
233
+
234
+
235
+ for index, row in df.iterrows():
236
+ img_path = archive_path + "/" + row['image']
237
+ #img = {"path": img_path, "bytes": None}
238
+ #print(str(len(row['probabilities'])))
239
+ #print(str(index))
240
+ result = {
241
+ 'image': img_path,
242
+ 'label': row['label'],
243
+ #'label_str': row['fine_label_str'],
244
+ 'split': split,
245
+ }
246
+ yield index, result
247
+
248
+
249
+
250
+
251
+
252
+
253
+
254
+ if __name__ == "__main__":
255
+ ds = load_dataset("food101-enriched.py", split="all")