bstds commited on
Commit
0fb0281
·
1 Parent(s): 204a2af

Create abo_listings.py

Browse files
Files changed (1) hide show
  1. abo_listings.py +105 -0
abo_listings.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ import gzip
4
+ import json
5
+ from functools import partial
6
+ from pathlib import Path
7
+
8
+ import datasets
9
+ from datasets import DatasetDict, load_dataset
10
+
11
+
12
+ AVAILABLE_DATASETS = {
13
+ 'main': 'https://amazon-berkeley-objects.s3.amazonaws.com/archives/abo-listings.tar'
14
+ }
15
+
16
+ VERSION = datasets.Version("0.0.1")
17
+
18
+ _FIELDS = [
19
+ "item_id",
20
+ "brand",
21
+ "bullet_point",
22
+ "color",
23
+ "item_name",
24
+ "model_name",
25
+ "model_number",
26
+ "model_year",
27
+ "product_type",
28
+ "style",
29
+ "main_image_id",
30
+ "other_image_id",
31
+ "item_keywords",
32
+ "country",
33
+ "marketplace",
34
+ "domain_name",
35
+ "node",
36
+ ]
37
+
38
+
39
+ class AbolistingsDataset(datasets.GeneratorBasedBuilder):
40
+ """AbolistingsDataset dataset."""
41
+
42
+ BUILDER_CONFIGS = [
43
+ datasets.BuilderConfig(
44
+ name=data_name, version=VERSION, description=f"{data_name} abolistings dataset"
45
+ )
46
+ for data_name in AVAILABLE_DATASETS
47
+ ]
48
+
49
+ @staticmethod
50
+ def load(data_name_config: str = "main") -> DatasetDict:
51
+ ds = load_dataset(__file__, data_name_config)
52
+ return ds
53
+
54
+ def _info(self):
55
+ return datasets.DatasetInfo(
56
+ description="",
57
+ features=datasets.Features(
58
+ {
59
+ "item_id": datasets.Value("string"),
60
+ "brand": datasets.Sequence(datasets.Value("string")),
61
+ "bullet_point": datasets.Sequence(datasets.Value("string")),
62
+ "color": datasets.Sequence(datasets.Value("string")),
63
+ "item_name": datasets.Sequence(datasets.Value("string")),
64
+ "model_name": datasets.Sequence(datasets.Value("string")),
65
+ "model_number": datasets.Sequence(datasets.Value("string")),
66
+ "model_year": datasets.Sequence(datasets.Value("string")),
67
+ "product_type": datasets.Sequence(datasets.Value("string")),
68
+ "style": datasets.Sequence(datasets.Value("string")),
69
+ "main_image_id": datasets.Value("string"),
70
+ "other_image_id": datasets.Sequence(datasets.Value("string")),
71
+ "item_keywords": datasets.Sequence(datasets.Value("string")),
72
+ "country": datasets.Value("string"),
73
+ "marketplace": datasets.Value("string"),
74
+ "domain_name": datasets.Value("string"),
75
+ "node": datasets.Sequence(datasets.Value("string")),
76
+ }
77
+ ),
78
+ supervised_keys=None,
79
+ homepage="https://amazon-berkeley-objects.s3.amazonaws.com/index.html#download",
80
+ citation="",
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+ downloader = partial(
85
+ lambda: dl_manager.download_and_extract(AVAILABLE_DATASETS[self.config.name])
86
+ )
87
+ # There is no predefined train/val/test split for this dataset.
88
+ root_path = Path(downloader()) / 'listings' / 'metadata'
89
+ return [
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TRAIN, gen_kwargs={"root_path": root_path}
92
+ ),
93
+ ]
94
+
95
+ def _generate_examples(self, root_path):
96
+ root_path = Path(root_path)
97
+ files = list(root_path.glob("*.json.gz"))
98
+ idx = 0
99
+ for file in files:
100
+ with gzip.GzipFile(file) as f_in:
101
+ for l in f_in:
102
+ l = l.decode("utf-8")
103
+ sample = json.loads(l)
104
+ yield idx, {k: sample.get(k) for k in _FIELDS}
105
+ idx += 1