holylovenia commited on
Commit
918b382
1 Parent(s): b3d4260

Upload su_emot.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. su_emot.py +126 -0
su_emot.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ import datasets
6
+ import pandas as pd
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import DEFAULT_SEACROWD_VIEW_NAME, DEFAULT_SOURCE_VIEW_NAME, Tasks
11
+
12
+ _DATASETNAME = "su_emot"
13
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
14
+ _UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
15
+
16
+ _LANGUAGES = ["sun"]
17
+ _LOCAL = False
18
+ _CITATION = """\
19
+ @INPROCEEDINGS{
20
+ 9297929,
21
+ author={Putra, Oddy Virgantara and Wasmanson, Fathin Muhammad and Harmini, Triana and Utama, Shoffin Nahwa},
22
+ booktitle={2020 International Conference on Computer Engineering, Network, and Intelligent Multimedia (CENIM)},
23
+ title={Sundanese Twitter Dataset for Emotion Classification},
24
+ year={2020},
25
+ volume={},
26
+ number={},
27
+ pages={391--395},
28
+ doi={10.1109/CENIM51130.2020.9297929}
29
+ }
30
+ """
31
+
32
+ _DESCRIPTION = """\
33
+ This is a dataset for emotion classification of Sundanese text. The dataset is gathered from Twitter API between January and March 2019 with 2518 tweets in total.
34
+ The tweets filtered by using some hashtags which are represented Sundanese emotion, for instance, #persib, #corona, #saredih, #nyakakak, #garoblog, #sangsara, #gumujeng, #bungah, #sararieun, #ceurik, and #hariwang.
35
+ This dataset contains four distinctive emotions: anger, joy, fear, and sadness. Each tweet is annotated using related emotion. For data
36
+ validation, the authors consulted a Sundanese language teacher for expert validation.
37
+ """
38
+ _HOMEPAGE = "https://github.com/virgantara/sundanese-twitter-dataset"
39
+
40
+ _LICENSE = "UNKNOWN"
41
+
42
+ _URLS = {
43
+ "datasets": "https://raw.githubusercontent.com/virgantara/sundanese-twitter-dataset/master/newdataset.csv"
44
+ }
45
+
46
+ _SUPPORTED_TASKS = [Tasks.EMOTION_CLASSIFICATION]
47
+
48
+
49
+ _SOURCE_VERSION = "1.0.0"
50
+
51
+ _SEACROWD_VERSION = "2024.06.20"
52
+
53
+
54
+ class SuEmot(datasets.GeneratorBasedBuilder):
55
+ """This is a dataset for emotion classification of Sundanese text. The dataset is gathered from Twitter API between January and March 2019 with 2518 tweets in total."""
56
+
57
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
58
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
59
+
60
+ BUILDER_CONFIGS = [
61
+ SEACrowdConfig(
62
+ name="su_emot_source",
63
+ version=SOURCE_VERSION,
64
+ description="Sundanese Twitter Dataset for Emotion source schema",
65
+ schema="source",
66
+ subset_id="su_emot",
67
+ ),
68
+ SEACrowdConfig(
69
+ name="su_emot_seacrowd_text",
70
+ version=SEACROWD_VERSION,
71
+ description="Sundanese Twitter Dataset for Emotion Nusantara schema",
72
+ schema="seacrowd_text",
73
+ subset_id="su_emot",
74
+ ),
75
+ ]
76
+
77
+ DEFAULT_CONFIG_NAME = "su_emot_source"
78
+
79
+ def _info(self) -> datasets.DatasetInfo:
80
+
81
+ if self.config.schema == "source":
82
+ features = datasets.Features({
83
+ "index": datasets.Value("string"),
84
+ "data": datasets.Value("string"),
85
+ "label": datasets.Value("string")})
86
+
87
+ # For example seacrowd_kb, seacrowd_t2t
88
+ elif self.config.schema == "seacrowd_text":
89
+ features = schemas.text_features(["anger", "joy", "fear", "sadness"])
90
+
91
+ return datasets.DatasetInfo(
92
+ description=_DESCRIPTION,
93
+ features=features,
94
+ homepage=_HOMEPAGE,
95
+ license=_LICENSE,
96
+ citation=_CITATION,
97
+ )
98
+
99
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
100
+ urls = _URLS
101
+ data_dir = Path(dl_manager.download_and_extract(urls['datasets']))
102
+ data_files = {"train":data_dir}
103
+
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TRAIN,
107
+ gen_kwargs={
108
+ "filepath": data_files['train'],
109
+ "split": "train",
110
+ },
111
+ )
112
+ ]
113
+
114
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
115
+
116
+ df = pd.read_csv(filepath, sep=",", header="infer").reset_index()
117
+ df.columns = ["index","label", "data"]
118
+
119
+ if self.config.schema == "source":
120
+ for row in df.itertuples():
121
+ ex = {"index": str(row.index+1), "data": row.data, "label": row.label}
122
+ yield row.index, ex
123
+ elif self.config.schema == "seacrowd_text":
124
+ for row in df.itertuples():
125
+ ex = {"id": str(row.index+1), "text": row.data, "label": row.label}
126
+ yield row.index, ex