|
|
|
|
|
import gzip |
|
import json |
|
from functools import partial |
|
from pathlib import Path |
|
|
|
import datasets |
|
from datasets import DatasetDict, load_dataset |
|
|
|
|
|
AVAILABLE_DATASETS = { |
|
'main': 'https://amazon-berkeley-objects.s3.amazonaws.com/archives/abo-listings.tar' |
|
} |
|
|
|
VERSION = datasets.Version("0.0.1") |
|
|
|
_FIELDS = [ |
|
"item_id", |
|
"brand", |
|
"bullet_point", |
|
"color", |
|
"item_name", |
|
"model_name", |
|
"model_number", |
|
"model_year", |
|
"product_type", |
|
"style", |
|
"main_image_id", |
|
"other_image_id", |
|
"item_keywords", |
|
"country", |
|
"marketplace", |
|
"domain_name", |
|
"node", |
|
] |
|
|
|
|
|
class AbolistingsDataset(datasets.GeneratorBasedBuilder): |
|
"""AbolistingsDataset dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name=data_name, version=VERSION, description=f"{data_name} abolistings dataset" |
|
) |
|
for data_name in AVAILABLE_DATASETS |
|
] |
|
|
|
@staticmethod |
|
def load(data_name_config: str = "main") -> DatasetDict: |
|
ds = load_dataset(__file__, data_name_config) |
|
return ds |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description="", |
|
features=datasets.Features( |
|
{ |
|
"item_id": datasets.Value("string"), |
|
"brand": datasets.Sequence(datasets.Value("string")), |
|
"bullet_point": datasets.Sequence(datasets.Value("string")), |
|
"color": datasets.Sequence(datasets.Value("string")), |
|
"item_name": datasets.Sequence(datasets.Value("string")), |
|
"model_name": datasets.Sequence(datasets.Value("string")), |
|
"model_number": datasets.Sequence(datasets.Value("string")), |
|
"model_year": datasets.Sequence(datasets.Value("string")), |
|
"product_type": datasets.Sequence(datasets.Value("string")), |
|
"style": datasets.Sequence(datasets.Value("string")), |
|
"main_image_id": datasets.Value("string"), |
|
"other_image_id": datasets.Sequence(datasets.Value("string")), |
|
"item_keywords": datasets.Sequence(datasets.Value("string")), |
|
"country": datasets.Value("string"), |
|
"marketplace": datasets.Value("string"), |
|
"domain_name": datasets.Value("string"), |
|
"node": datasets.Sequence(datasets.Value("string")), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage="https://amazon-berkeley-objects.s3.amazonaws.com/index.html#download", |
|
citation="", |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
downloader = partial( |
|
lambda: dl_manager.download_and_extract(AVAILABLE_DATASETS[self.config.name]) |
|
) |
|
|
|
root_path = Path(downloader()) / 'listings' / 'metadata' |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"root_path": root_path} |
|
), |
|
] |
|
|
|
def _generate_examples(self, root_path): |
|
root_path = Path(root_path) |
|
files = list(root_path.glob("*.json.gz")) |
|
idx = 0 |
|
for file in files: |
|
with gzip.GzipFile(file) as f_in: |
|
for l in f_in: |
|
l = l.decode("utf-8") |
|
sample = json.loads(l) |
|
yield idx, {k: sample.get(k) for k in _FIELDS} |
|
idx += 1 |
|
|