Martyna Wiącek commited on
Commit
373367f
1 Parent(s): abe1c7a

added script for loading dataset

Browse files
Files changed (1) hide show
  1. nlprepl.py +161 -0
nlprepl.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """NKJP1M: The manually annotated subcorpus of the National Corpus of Polish"""
2
+ import conllu
3
+ import datasets
4
+
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+
9
+ _CITATION = ""
10
+
11
+ BY_NAME = "by_name"
12
+ BY_TYPE = "by_type"
13
+
14
+ TAGSET_NKJP = "nkjp"
15
+ TAGSET_UD = "ud"
16
+
17
+ _DESCRIPTION = {
18
+ BY_NAME: {
19
+ TAGSET_NKJP: "NLPrePL divided by document name for NKJP tagset",
20
+ TAGSET_UD: "TNLPrePL divided by document name for UD tagset"
21
+ },
22
+ BY_TYPE: {
23
+ TAGSET_NKJP: "NLPrePL divided by document type for NKJP tagset",
24
+ TAGSET_UD: "NLPrePL divided by document type for UD tagset"
25
+ }
26
+ }
27
+
28
+ _TYPES = [BY_NAME, BY_TYPE]
29
+ _TAGSETS = [TAGSET_NKJP, TAGSET_UD]
30
+
31
+ _URLS = {
32
+ BY_NAME: {
33
+ TAGSET_NKJP: {
34
+ 'train': "nkjp_tagset/fair_by_document_name/train_nlprepl-nkjp.conllu.gz",
35
+ 'dev': "nkjp_tagset/fair_by_document_name/dev_nlprepl-nkjp.conllu.gz",
36
+ 'test': "nkjp_tagset/fair_by_document_name/test_nlprepl-nkjp.conllu.gz"
37
+ },
38
+ TAGSET_UD: {
39
+ 'train': "ud_tagset/fair_by_document_name/train_nlprepl-ud.conllu.gz",
40
+ 'dev': "ud_tagset/fair_by_document_name/dev_nlprepl-ud.conllu.gz",
41
+ 'test': "ud_tagset/fair_by_document_name/test_nlprepl-ud.conllu.gz"
42
+ }
43
+ },
44
+ BY_TYPE: {
45
+ TAGSET_NKJP: {
46
+ 'train': "nkjp_tagset/fair_by_document_type/train_nlprepl-nkjp.conllu.gz",
47
+ 'dev': "nkjp_tagset/fair_by_document_type/dev_nlprepl-nkjp.conllu.gz",
48
+ 'test': "nkjp_tagset/fair_by_document_type/test_nlprepl-nkjp.conllu.gz"
49
+ },
50
+ TAGSET_UD: {
51
+ 'train': "ud_tagset/fair_by_document_type/train_nlprepl-ud.conllu.gz",
52
+ 'dev': "ud_tagset/fair_by_document_type/dev_nlprepl-ud.conllu.gz",
53
+ 'test': "ud_tagset/fair_by_document_type/test_nlprepl-ud.conllu.gz"
54
+ }
55
+ }
56
+ }
57
+
58
+
59
+ class NLPrePLConfig(datasets.BuilderConfig):
60
+ """BuilderConfig for NKJP1M"""
61
+
62
+ def __init__(self, tagset, **kwargs):
63
+ """BuilderConfig forNKJP1M.
64
+
65
+ Args:
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ super(NLPrePLConfig, self).__init__(**kwargs)
69
+
70
+ self.tagset = tagset
71
+
72
+
73
+ class NLPrePL(datasets.GeneratorBasedBuilder):
74
+ """NKJP1M dataset."""
75
+
76
+ BUILDER_CONFIGS = [
77
+ NLPrePLConfig(
78
+ name=t + "-" + tagset,
79
+ version=datasets.Version("1.0.0"),
80
+ tagset=tagset,
81
+ description=_DESCRIPTION[t]
82
+ )
83
+ for t in _TYPES for tagset in _TAGSETS
84
+ ]
85
+
86
+ def _info(self):
87
+ dataset, tagset = self.config.name.split("-")
88
+ return datasets.DatasetInfo(
89
+ description=_DESCRIPTION[dataset][tagset],
90
+ features=datasets.Features(
91
+ {
92
+ "sent_id": datasets.Value("string"),
93
+ "text": datasets.Value("string"),
94
+ "id": datasets.Value("string"),
95
+ "tokens": datasets.Sequence(datasets.Value("string")),
96
+ "lemmas": datasets.Sequence(datasets.Value("string")),
97
+ "upos": datasets.Sequence(datasets.Value("string")),
98
+ "xpos": datasets.Sequence(datasets.Value("string")),
99
+ "feats": datasets.Sequence(datasets.Value("string")),
100
+ "head": datasets.Sequence(datasets.Value("string")),
101
+ "deprel": datasets.Sequence(datasets.Value("string")),
102
+ "deps": datasets.Sequence(datasets.Value("string")),
103
+ "misc": datasets.Sequence(datasets.Value("string")),
104
+ }
105
+ ),
106
+ supervised_keys=None,
107
+ homepage="http://nkjp.pl/",
108
+ citation=_CITATION,
109
+ )
110
+
111
+ def _split_generators(self, dl_manager):
112
+ """Returns SplitGenerators."""
113
+ dataset, tagset = self.config.name.split("-")
114
+ urls = _URLS[dataset][tagset]
115
+ downloaded_files = dl_manager.download_and_extract(urls)
116
+ return [
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TRAIN,
119
+ gen_kwargs={"filepath": downloaded_files["train"]}),
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.VALIDATION,
122
+ gen_kwargs={"filepath": downloaded_files["dev"]}),
123
+ datasets.SplitGenerator(
124
+ name=datasets.Split.TEST,
125
+ gen_kwargs={"filepath": downloaded_files["test"]}),
126
+ ]
127
+
128
+ def _generate_examples(self, filepath):
129
+ id = 0
130
+ logger.info("⏳ Generating examples from = %s", filepath)
131
+
132
+ with open(filepath, 'r', encoding="utf-8") as f:
133
+ tokenlist = list(conllu.parse_incr(f))
134
+ for sent in tokenlist:
135
+ if "sent_id" in sent.metadata:
136
+ idx = sent.metadata["sent_id"]
137
+ else:
138
+ idx = id
139
+
140
+ tokens = [token["form"] for token in sent]
141
+
142
+ if "text" in sent.metadata:
143
+ txt = sent.metadata["text"]
144
+ else:
145
+ txt = " ".join(tokens)
146
+
147
+ yield id, {
148
+ "sent_id": str(idx),
149
+ "text": txt,
150
+ "id": [token["id"] for token in sent],
151
+ "tokens": [token["form"] for token in sent],
152
+ "lemmas": [token["lemma"] for token in sent],
153
+ "upos": [token["upos"] for token in sent],
154
+ "xpos": [token["xpos"] for token in sent],
155
+ "feats": [str(token["feats"]) for token in sent],
156
+ "head": [str(token["head"]) for token in sent],
157
+ "deprel": [str(token["deprel"]) for token in sent],
158
+ "deps": [str(token["deps"]) for token in sent],
159
+ "misc": [str(token["misc"]) for token in sent],
160
+ }
161
+ id += 1