Datasets:

Modalities:
Text
Libraries:
Datasets
dibyaaaaax commited on
Commit
d49b7e4
·
1 Parent(s): a09c9ce

Upload kdd.py

Browse files
Files changed (1) hide show
  1. kdd.py +145 -0
kdd.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ # _SPLIT = ['test']
5
+ _CITATION = """\
6
+ @inproceedings{caragea-etal-2014-citation,
7
+ title = "Citation-Enhanced Keyphrase Extraction from Research Papers: A Supervised Approach",
8
+ author = "Caragea, Cornelia and
9
+ Bulgarov, Florin Adrian and
10
+ Godea, Andreea and
11
+ Das Gollapalli, Sujatha",
12
+ booktitle = "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing ({EMNLP})",
13
+ month = oct,
14
+ year = "2014",
15
+ address = "Doha, Qatar",
16
+ publisher = "Association for Computational Linguistics",
17
+ url = "https://aclanthology.org/D14-1150",
18
+ doi = "10.3115/v1/D14-1150",
19
+ pages = "1435--1446",
20
+ }
21
+ """
22
+
23
+ _DESCRIPTION = """\
24
+
25
+ """
26
+
27
+ _HOMEPAGE = ""
28
+
29
+ # TODO: Add the licence for the dataset here if you can find it
30
+ _LICENSE = ""
31
+
32
+ # TODO: Add link to the official dataset URLs here
33
+
34
+ _URLS = {
35
+ "test": "test.jsonl"
36
+ }
37
+
38
+
39
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
40
+ class KDD(datasets.GeneratorBasedBuilder):
41
+ """TODO: Short description of my dataset."""
42
+
43
+ VERSION = datasets.Version("0.0.1")
44
+
45
+ BUILDER_CONFIGS = [
46
+ datasets.BuilderConfig(name="extraction", version=VERSION,
47
+ description="This part of my dataset covers extraction"),
48
+ datasets.BuilderConfig(name="generation", version=VERSION,
49
+ description="This part of my dataset covers generation"),
50
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data"),
51
+ ]
52
+
53
+ DEFAULT_CONFIG_NAME = "extraction"
54
+
55
+ def _info(self):
56
+ if self.config.name == "extraction": # This is the name of the configuration selected in BUILDER_CONFIGS above
57
+ features = datasets.Features(
58
+ {
59
+ "id": datasets.Value("int64"),
60
+ "document": datasets.features.Sequence(datasets.Value("string")),
61
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string"))
62
+
63
+ }
64
+ )
65
+ elif self.config.name == "generation":
66
+ features = datasets.Features(
67
+ {
68
+ "id": datasets.Value("int64"),
69
+ "document": datasets.features.Sequence(datasets.Value("string")),
70
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
71
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string"))
72
+
73
+ }
74
+ )
75
+ else:
76
+ features = datasets.Features(
77
+ {
78
+ "id": datasets.Value("int64"),
79
+ "document": datasets.features.Sequence(datasets.Value("string")),
80
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string")),
81
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
82
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
83
+ "other_metadata": datasets.features.Sequence(
84
+ {
85
+ "text": datasets.features.Sequence(datasets.Value("string")),
86
+ "bio_tags": datasets.features.Sequence(datasets.Value("string"))
87
+ }
88
+ )
89
+
90
+ }
91
+ )
92
+ return datasets.DatasetInfo(
93
+ # This is the description that will appear on the datasets page.
94
+ description=_DESCRIPTION,
95
+ # This defines the different columns of the dataset and their types
96
+ features=features,
97
+ homepage=_HOMEPAGE,
98
+ # License for the dataset if available
99
+ license=_LICENSE,
100
+ # Citation for the dataset
101
+ citation=_CITATION,
102
+ )
103
+
104
+ def _split_generators(self, dl_manager):
105
+
106
+ data_dir = dl_manager.download_and_extract(_URLS)
107
+ return [
108
+ datasets.SplitGenerator(
109
+ name=datasets.Split.TEST,
110
+ # These kwargs will be passed to _generate_examples
111
+ gen_kwargs={
112
+ "filepath": data_dir['test'],
113
+ "split": "test"
114
+ },
115
+ ),
116
+ ]
117
+
118
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
119
+ def _generate_examples(self, filepath, split):
120
+ with open(filepath, encoding="utf-8") as f:
121
+ for key, row in enumerate(f):
122
+ data = json.loads(row)
123
+ if self.config.name == "extraction":
124
+ # Yields examples as (key, example) tuples
125
+ yield key, {
126
+ "id": data['paper_id'],
127
+ "document": data["document"],
128
+ "doc_bio_tags": data.get("doc_bio_tags")
129
+ }
130
+ elif self.config.name == "generation":
131
+ yield key, {
132
+ "id": data['paper_id'],
133
+ "document": data["document"],
134
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
135
+ "abstractive_keyphrases": data.get("abstractive_keyphrases")
136
+ }
137
+ else:
138
+ yield key, {
139
+ "id": data['paper_id'],
140
+ "document": data["document"],
141
+ "doc_bio_tags": data.get("doc_bio_tags"),
142
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
143
+ "abstractive_keyphrases": data.get("abstractive_keyphrases"),
144
+ "other_metadata": data["other_metadata"]
145
+ }