datasaur-OGZiZWFjODM-MjFmYmJjMTI / datasaur-OGZiZWFjODM-MjFmYmJjMTI.py
Tangguh's picture
Upload datasaur-OGZiZWFjODM-MjFmYmJjMTI.py with huggingface_hub
3cd0a67 verified
import os
import json
import datasets
from sklearn.model_selection import train_test_split
_DATASET_LABELS = ['O', 'B-NORP', 'I-NORP', 'B-DATE', 'I-DATE', 'B-PRODUCT', 'I-PRODUCT', 'B-WORK_OF_ART', 'I-WORK_OF_ART', 'B-PERCENT', 'I-PERCENT', 'B-MONEY', 'I-MONEY', 'B-LAW', 'I-LAW', 'B-TIME', 'I-TIME', 'B-CARDINAL', 'I-CARDINAL', 'B-LANGUAGE', 'I-LANGUAGE', 'B-ORDINAL', 'I-ORDINAL', 'B-LOC', 'I-LOC', 'B-GPE', 'I-GPE', 'B-PERSON', 'I-PERSON', 'B-ORG', 'I-ORG']
class Custom(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description='',
features=datasets.Features(
{
'id': datasets.Value('string'),
'tokens': datasets.Sequence(datasets.Value('string')),
'ner_tags': datasets.Sequence(
datasets.features.ClassLabel(
names=_DATASET_LABELS
)
),
}
),
supervised_keys=None,
homepage='',
citation='',
)
def _split_generators(self, dl_manager):
data_path = dl_manager.download_and_extract("data.jsonl")
with open(data_path, 'r') as file:
lines = file.readlines()
train_lines, valid_lines = train_test_split(lines, test_size=0.2, random_state=42)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'lines': train_lines}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'lines': valid_lines}),
]
def _generate_examples(self, lines):
for guid, line in enumerate(lines):
data = json.loads(line)
yield guid, {
'id': str(guid),
'tokens': data['words'],
'ner_tags': data['pos'],
}