Saripudin commited on
Commit
9525005
·
1 Parent(s): f5bb5d6

Upload span-based-dataset-sample.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. span-based-dataset-sample.py +50 -0
span-based-dataset-sample.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import datasets
4
+
5
+ from sklearn.model_selection import train_test_split
6
+
7
+ _DATASET_LABELS = ['O', 'B-CITY', 'I-CITY', 'B-NAMES', 'I-NAMES', 'B-DATE', 'I-DATE']
8
+
9
+ class Custom(datasets.GeneratorBasedBuilder):
10
+
11
+ def _info(self):
12
+ return datasets.DatasetInfo(
13
+ description='',
14
+ features=datasets.Features(
15
+ {
16
+ 'id': datasets.Value('string'),
17
+ 'tokens': datasets.Sequence(datasets.Value('string')),
18
+ 'ner_tags': datasets.Sequence(
19
+ datasets.features.ClassLabel(
20
+ names=_DATASET_LABELS
21
+ )
22
+ ),
23
+ }
24
+ ),
25
+ supervised_keys=None,
26
+ homepage='',
27
+ citation='',
28
+ )
29
+
30
+ def _split_generators(self, dl_manager):
31
+ data_path = dl_manager.download_and_extract("data.jsonl")
32
+
33
+ with open(data_path, 'r') as file:
34
+ lines = file.readlines()
35
+
36
+ train_lines, valid_lines = train_test_split(lines, test_size=0.2, random_state=42)
37
+
38
+ return [
39
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'lines': train_lines}),
40
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'lines': valid_lines}),
41
+ ]
42
+
43
+ def _generate_examples(self, lines):
44
+ for guid, line in enumerate(lines):
45
+ data = json.loads(line)
46
+ yield guid, {
47
+ 'id': str(guid),
48
+ 'tokens': data['words'],
49
+ 'ner_tags': data['pos'],
50
+ }