File size: 3,595 Bytes
0fb0281
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import gzip
import json
from functools import partial
from pathlib import Path

import datasets
from datasets import DatasetDict, load_dataset


AVAILABLE_DATASETS = {
    'main': 'https://amazon-berkeley-objects.s3.amazonaws.com/archives/abo-listings.tar'
}

VERSION = datasets.Version("0.0.1")

_FIELDS = [
    "item_id",
    "brand",
    "bullet_point",
    "color",
    "item_name",
    "model_name",
    "model_number",
    "model_year",
    "product_type",
    "style",
    "main_image_id",
    "other_image_id",
    "item_keywords",
    "country",
    "marketplace",
    "domain_name",
    "node",
]


class AbolistingsDataset(datasets.GeneratorBasedBuilder):
    """AbolistingsDataset dataset."""

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name=data_name, version=VERSION, description=f"{data_name} abolistings dataset"
        )
        for data_name in AVAILABLE_DATASETS
    ]

    @staticmethod
    def load(data_name_config: str = "main") -> DatasetDict:
        ds = load_dataset(__file__, data_name_config)
        return ds

    def _info(self):
        return datasets.DatasetInfo(
            description="",
            features=datasets.Features(
                {
                    "item_id": datasets.Value("string"),
                    "brand": datasets.Sequence(datasets.Value("string")),
                    "bullet_point": datasets.Sequence(datasets.Value("string")),
                    "color": datasets.Sequence(datasets.Value("string")),
                    "item_name": datasets.Sequence(datasets.Value("string")),
                    "model_name": datasets.Sequence(datasets.Value("string")),
                    "model_number": datasets.Sequence(datasets.Value("string")),
                    "model_year": datasets.Sequence(datasets.Value("string")),
                    "product_type": datasets.Sequence(datasets.Value("string")),
                    "style": datasets.Sequence(datasets.Value("string")),
                    "main_image_id": datasets.Value("string"),
                    "other_image_id": datasets.Sequence(datasets.Value("string")),
                    "item_keywords": datasets.Sequence(datasets.Value("string")),
                    "country": datasets.Value("string"),
                    "marketplace": datasets.Value("string"),
                    "domain_name": datasets.Value("string"),
                    "node": datasets.Sequence(datasets.Value("string")),
                }
            ),
            supervised_keys=None,
            homepage="https://amazon-berkeley-objects.s3.amazonaws.com/index.html#download",
            citation="",
        )

    def _split_generators(self, dl_manager):
        downloader = partial(
            lambda: dl_manager.download_and_extract(AVAILABLE_DATASETS[self.config.name])
        )
        # There is no predefined train/val/test split for this dataset.
        root_path = Path(downloader()) / 'listings' / 'metadata'
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"root_path": root_path}
            ),
        ]

    def _generate_examples(self, root_path):
        root_path = Path(root_path)
        files = list(root_path.glob("*.json.gz"))
        idx = 0
        for file in files:
            with gzip.GzipFile(file) as f_in:
                for l in f_in:
                    l = l.decode("utf-8")
                    sample = json.loads(l)
                    yield idx, {k: sample.get(k) for k in _FIELDS}
                    idx += 1