holylovenia commited on
Commit
43acc61
1 Parent(s): 97aeda6

Upload alt_burmese_treebank.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. alt_burmese_treebank.py +151 -0
alt_burmese_treebank.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import os
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+
21
+ from seacrowd.sea_datasets.alt_burmese_treebank.utils.alt_burmese_treebank_utils import extract_data
22
+ from seacrowd.utils import schemas
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import Licenses, Tasks
25
+
26
+ _CITATION = """\
27
+ @article{
28
+ 10.1145/3373268,
29
+ author = {Ding, Chenchen and Yee, Sann Su Su and Pa, Win Pa and Soe, Khin Mar and Utiyama, Masao and Sumita, Eiichiro},
30
+ title = {A Burmese (Myanmar) Treebank: Guideline and Analysis},
31
+ year = {2020},
32
+ issue_date = {May 2020},
33
+ publisher = {Association for Computing Machinery},
34
+ address = {New York, NY, USA},
35
+ volume = {19},
36
+ number = {3},
37
+ issn = {2375-4699},
38
+ url = {https://doi.org/10.1145/3373268},
39
+ doi = {10.1145/3373268},
40
+ abstract = {A 20,000-sentence Burmese (Myanmar) treebank on news articles has been released under a CC BY-NC-SA license.\
41
+ Complete phrase structure annotation was developed for each sentence from the morphologically annotated data\
42
+ prepared in previous work of Ding et al. [1]. As the final result of the Burmese component in the Asian\
43
+ Language Treebank Project, this is the first large-scale, open-access treebank for the Burmese language.\
44
+ The annotation details and features of this treebank are presented.\
45
+ },
46
+ journal = {ACM Trans. Asian Low-Resour. Lang. Inf. Process.},
47
+ month = {jan},
48
+ articleno = {40},
49
+ numpages = {13},
50
+ keywords = {Burmese (Myanmar), phrase structure, treebank}
51
+ }
52
+ """
53
+
54
+ _DATASETNAME = "alt_burmese_treebank"
55
+
56
+ _DESCRIPTION = """\
57
+ A 20,000-sentence Burmese (Myanmar) treebank on news articles containing complete phrase structure annotation.\
58
+ As the final result of the Burmese component in the Asian Language Treebank Project, this is the first large-scale,\
59
+ open-access treebank for the Burmese language.
60
+ """
61
+
62
+ _HOMEPAGE = "https://zenodo.org/records/3463010"
63
+
64
+ _LANGUAGES = ["mya"]
65
+
66
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
67
+
68
+ _LOCAL = False
69
+
70
+ _URLS = {
71
+ _DATASETNAME: "https://zenodo.org/records/3463010/files/my-alt-190530.zip?download=1",
72
+ }
73
+
74
+ _SUPPORTED_TASKS = [Tasks.CONSTITUENCY_PARSING]
75
+
76
+ _SOURCE_VERSION = "1.0.0"
77
+
78
+ _SEACROWD_VERSION = "2024.06.20"
79
+
80
+
81
+ class AltBurmeseTreebank(datasets.GeneratorBasedBuilder):
82
+ """A 20,000-sentence Burmese (Myanmar) treebank on news articles containing complete phrase structure annotation.\
83
+ As the final result of the Burmese component in the Asian Language Treebank Project, this is the first large-scale,\
84
+ open-access treebank for the Burmese language."""
85
+
86
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
87
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
88
+
89
+ BUILDER_CONFIGS = [
90
+ SEACrowdConfig(
91
+ name=f"{_DATASETNAME}_source",
92
+ version=SOURCE_VERSION,
93
+ description=f"{_DATASETNAME} source schema",
94
+ schema="source",
95
+ subset_id=f"{_DATASETNAME}",
96
+ ),
97
+ SEACrowdConfig(
98
+ name=f"{_DATASETNAME}_seacrowd_tree",
99
+ version=SEACROWD_VERSION,
100
+ description=f"{_DATASETNAME} SEACrowd schema",
101
+ schema="seacrowd_tree",
102
+ subset_id=f"{_DATASETNAME}",
103
+ ),
104
+ ]
105
+
106
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
107
+
108
+ def _info(self) -> datasets.DatasetInfo:
109
+
110
+ if self.config.schema == "source":
111
+ features = datasets.Features({"id": datasets.Value("string"), "text": datasets.Value("string")})
112
+ elif self.config.schema == "seacrowd_tree":
113
+ features = schemas.tree_features
114
+
115
+ return datasets.DatasetInfo(
116
+ description=_DESCRIPTION,
117
+ features=features,
118
+ homepage=_HOMEPAGE,
119
+ license=_LICENSE,
120
+ citation=_CITATION,
121
+ )
122
+
123
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
124
+ """Returns SplitGenerators."""
125
+ urls = _URLS[_DATASETNAME]
126
+ data_dir = dl_manager.download_and_extract(urls)
127
+
128
+ return [
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TRAIN,
131
+ gen_kwargs={
132
+ "filepath": os.path.join(data_dir, "my-alt-190530/data"),
133
+ "split": "train",
134
+ },
135
+ ),
136
+ ]
137
+
138
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
139
+ """Yields examples as (key, example) tuples."""
140
+
141
+ if self.config.schema == "source":
142
+ with open(filepath, "r") as f:
143
+ for idx, line in enumerate(f):
144
+ example = {"id": line.split("\t")[0], "text": line.split("\t")[1]}
145
+ yield idx, example
146
+
147
+ elif self.config.schema == "seacrowd_tree":
148
+ with open(filepath, "r") as f:
149
+ for idx, line in enumerate(f):
150
+ example = extract_data(line)
151
+ yield idx, example