File size: 1,904 Bytes
3cd0a67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import os
import json
import datasets

from sklearn.model_selection import train_test_split

_DATASET_LABELS = ['O', 'B-NORP', 'I-NORP', 'B-DATE', 'I-DATE', 'B-PRODUCT', 'I-PRODUCT', 'B-WORK_OF_ART', 'I-WORK_OF_ART', 'B-PERCENT', 'I-PERCENT', 'B-MONEY', 'I-MONEY', 'B-LAW', 'I-LAW', 'B-TIME', 'I-TIME', 'B-CARDINAL', 'I-CARDINAL', 'B-LANGUAGE', 'I-LANGUAGE', 'B-ORDINAL', 'I-ORDINAL', 'B-LOC', 'I-LOC', 'B-GPE', 'I-GPE', 'B-PERSON', 'I-PERSON', 'B-ORG', 'I-ORG']

class Custom(datasets.GeneratorBasedBuilder):

    def _info(self):
        return datasets.DatasetInfo(
            description='',
            features=datasets.Features(
                {
                    'id': datasets.Value('string'),
                    'tokens': datasets.Sequence(datasets.Value('string')),
                    'ner_tags': datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=_DATASET_LABELS
                        )
                    ),
                }
            ),
            supervised_keys=None,
            homepage='',
            citation='',
        )

    def _split_generators(self, dl_manager):
        data_path = dl_manager.download_and_extract("data.jsonl")

        with open(data_path, 'r') as file:
            lines = file.readlines()

        train_lines, valid_lines = train_test_split(lines, test_size=0.2, random_state=42)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'lines': train_lines}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'lines': valid_lines}),
        ]

    def _generate_examples(self, lines):
        for guid, line in enumerate(lines):
            data = json.loads(line)
            yield guid, {
                'id': str(guid),
                'tokens': data['words'],
                'ner_tags': data['pos'],
            }