import csv | |
import json | |
import os | |
import datasets | |
# TODO: Add BibTeX citation | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = "" | |
# TODO: Add description of the dataset here | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
This is an ancient mortars dataset. | |
""" | |
# TODO: Add a link to an official homepage for the dataset here | |
_HOMEPAGE = "" | |
# TODO: Add the licence for the dataset here if you can find it | |
_LICENSE = "" | |
# TODO: Add link to the official dataset URLs here | |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files. | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
_URLS = "https://huggingface.co/apetulante/mortars_test/mortars_data.zip" | |
_METADATA_URLS = { | |
"train": "https://huggingface.co/apetulante/mortars_test/train.txt", | |
"valid": "https://huggingface.co/apetulante/mortars_test/valid.txt", | |
"test": "https://huggingface.co/apetulante/mortars_test/test.txt", | |
} | |
_NAMES = { | |
"Chert", | |
"Obsidian", | |
"Sand" | |
} | |
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case | |
class MortarsData(datasets.GeneratorBasedBuilder): | |
"""TODO: Short description of my dataset.""" | |
VERSION = datasets.Version("1.1.0") | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
# BUILDER_CONFIG_CLASS = MyBuilderConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
def _info(self): | |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset | |
features = datasets.Features( | |
{ | |
"image": datasets.Image(), | |
"label": datasets.ClassLabel(names=_NAMES) | |
# These are the features of your dataset like images, labels ... | |
} | |
) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and | |
# specify them. They'll be used if as_supervised=True in builder.as_dataset. | |
# supervised_keys=("sentence", "label"), | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
archive_path = dl_manager.download_and_extract(_BASE_URL) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"images": dl_manager.iter_archive(archive_path) | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={ | |
"images": dl_manager.iter_archive(archive_path) | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"images": dl_manager.iter_archive(archive_path) | |
}, | |
), | |
] | |
def _generate_examples(self, images, metadata_path): | |
"""Generate images and labels for splits.""" | |
with open(metadata_path, encoding="utf-8") as f: | |
files_to_keep = set(f.read().split("\n")) | |
for file_path, file_obj in images: | |
if file_path.startswith(_IMAGES_DIR): | |
if file_path[len(_IMAGES_DIR) : -len(".bmp")] in files_to_keep: | |
label = file_path.split("/")[-2].split('-')[0] | |
yield file_path, { | |
"image": {"path": file_path, "bytes": file_obj.read()}, | |
"label": label, | |
} |