jeffnyman commited on
Commit
dcb07a1
1 Parent(s): 78103b0

Initial project setup.

Browse files
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ data/data.jsonl.gz filter=lfs diff=lfs merge=lfs -text
data/data.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8944e6b35cb42294769ac30cf17bd006231545b2eeecfa59324246e192564d1f
3
+ size 15388281
data/test.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4524468d0b7ee8eab07a088216cde7f9278f1c574669504a805ed172df6dad75
3
+ size 74935
data/train.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:757a0a73f1483f4b3f94783b774cdbf0831722a2b2c9abb5b820b4614ff6882a
3
+ size 591930
data/validation.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50783464882f450f88e61ece964a200e492495eed1472ed520d013bbcd3049be
3
+ size 74018
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"description": "\nEmotions is a dataset of English Twitter messages with six basic emotions:\nanger, fear, joy, love, sadness, and surprise. For more detailed information\nplease refer to the paper.\n", "citation": "@inproceedings{saravia-etal-2018-carer,\n title = \"{CARER}: Contextualized Affect Representations for Emotion Recognition\",\n author = \"Saravia, Elvis and\n Liu, Hsien-Chi Toby and\n Huang, Yen-Hao and\n Wu, Junlin and\n Chen, Yi-Shin\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\",\n month = oct # \"-\" # nov,\n year = \"2018\",\n address = \"Brussels, Belgium\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/D18-1404\",\n doi = \"10.18653/v1/D18-1404\",\n pages = \"3687--3697\",\n abstract = \"Emotions are expressed in nuanced ways, which varies by collective or individual experiences, knowledge, and beliefs. Therefore, to understand emotion, as conveyed through text, a robust mechanism capable of capturing and modeling different linguistic nuances and phenomena is needed. We propose a semi-supervised, graph-based algorithm to produce rich structural descriptors which serve as the building blocks for constructing contextualized affect representations from text. The pattern-based representations are further enriched with word embeddings and evaluated through several emotion recognition tasks. Our experimental results demonstrate that the proposed method outperforms state-of-the-art techniques on emotion recognition tasks.\",\n}\n", "homepage": "https://huggingface.co/datasets/jeffnyman/emotions", "license": "cc-by-sa-4.0", "features": {"text": {"dtype": "string", "_type": "Value"}, "label": {"names": ["sadness", "joy", "love", "anger", "fear", "surprise"], "_type": "ClassLabel"}}, "supervised_keys": {"input": "text", "output": "label"}, "task_templates": [{"task": "text-classification", "label_column": "label"}], "builder_name": "emotions", "dataset_name": "emotions", "config_name": "split", "version": {"version_str": "1.0.0", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1741533, "num_examples": 16000, "dataset_name": "emotions"}, "validation": {"name": "validation", "num_bytes": 214695, "num_examples": 2000, "dataset_name": "emotions"}, "test": {"name": "test", "num_bytes": 217173, "num_examples": 2000, "dataset_name": "emotions"}}, "download_checksums": {"data/train.jsonl.gz": {"num_bytes": 591930, "checksum": null}, "data/validation.jsonl.gz": {"num_bytes": 74018, "checksum": null}, "data/test.jsonl.gz": {"num_bytes": 74935, "checksum": null}}, "download_size": 740883, "dataset_size": 2173401, "size_in_bytes": 2914284}
emotions.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import datasets
4
+ from datasets.tasks import TextClassification
5
+
6
+
7
+ _CITATION = """\
8
+ @inproceedings{saravia-etal-2018-carer,
9
+ title = "{CARER}: Contextualized Affect Representations for Emotion Recognition",
10
+ author = "Saravia, Elvis and
11
+ Liu, Hsien-Chi Toby and
12
+ Huang, Yen-Hao and
13
+ Wu, Junlin and
14
+ Chen, Yi-Shin",
15
+ booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
16
+ month = oct # "-" # nov,
17
+ year = "2018",
18
+ address = "Brussels, Belgium",
19
+ publisher = "Association for Computational Linguistics",
20
+ url = "https://www.aclweb.org/anthology/D18-1404",
21
+ doi = "10.18653/v1/D18-1404",
22
+ pages = "3687--3697",
23
+ abstract = "Emotions are expressed in nuanced ways, which varies by collective or individual experiences, knowledge, and beliefs. Therefore, to understand emotion, as conveyed through text, a robust mechanism capable of capturing and modeling different linguistic nuances and phenomena is needed. We propose a semi-supervised, graph-based algorithm to produce rich structural descriptors which serve as the building blocks for constructing contextualized affect representations from text. The pattern-based representations are further enriched with word embeddings and evaluated through several emotion recognition tasks. Our experimental results demonstrate that the proposed method outperforms state-of-the-art techniques on emotion recognition tasks.",
24
+ }
25
+ """
26
+
27
+ _DESCRIPTION = """
28
+ Emotion is a dataset of English Twitter messages with six basic emotions:
29
+ anger, fear, joy, love, sadness, and surprise. For more detailed information
30
+ please refer to the paper.
31
+ """
32
+
33
+ _HOMEPAGE = "https://huggingface.co/datasets/jeffnyman/emotions"
34
+
35
+ _LICENSE = "cc-by-sa-4.0"
36
+
37
+ _URLS = {
38
+ "split": {
39
+ "train": "data/train.jsonl.gz",
40
+ "validation": "data/validation.jsonl.gz",
41
+ "test": "data/test.jsonl.gz",
42
+ },
43
+ "unsplit": {
44
+ "train": "data/data.jsonl.gz",
45
+ },
46
+ }
47
+
48
+
49
+ class Emotions(datasets.GeneratorBasedBuilder):
50
+ VERSION = datasets.Version("1.0.0")
51
+ BUILDER_CONFIGS = [
52
+ datasets.BuilderConfig(
53
+ name="split",
54
+ version=VERSION,
55
+ description="Dataset split in train, validation and test",
56
+ ),
57
+ datasets.BuilderConfig(
58
+ name="unsplit", version=VERSION, description="Unsplit dataset"
59
+ ),
60
+ ]
61
+ DEFAULT_CONFIG_NAME = "split"
62
+
63
+ def _info(self):
64
+ class_names = ["sadness", "joy", "love", "anger", "fear", "surprise"]
65
+
66
+ return datasets.DatasetInfo(
67
+ description=_DESCRIPTION,
68
+ features=datasets.Features(
69
+ {
70
+ "text": datasets.Value("string"),
71
+ "label": datasets.ClassLabel(names=class_names),
72
+ }
73
+ ),
74
+ supervised_keys=("text", "label"),
75
+ homepage=_HOMEPAGE,
76
+ citation=_CITATION,
77
+ license=_LICENSE,
78
+ task_templates=[
79
+ TextClassification(text_column="text", label_column="label")
80
+ ],
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+ paths = dl_manager.download_and_extract(_URLS[self.config.name])
85
+
86
+ if self.config.name == "split":
87
+ return [
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": paths["train"]}
90
+ ),
91
+ datasets.SplitGenerator(
92
+ name=datasets.Split.VALIDATION,
93
+ gen_kwargs={"filepath": paths["validation"]},
94
+ ),
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.TEST, gen_kwargs={"filepath": paths["test"]}
97
+ ),
98
+ ]
99
+ else:
100
+ return [
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": paths["train"]}
103
+ )
104
+ ]
105
+
106
+ def _generate_examples(self, filepath):
107
+ with open(filepath, encoding="utf-8") as f:
108
+ for idx, line in enumerate(f):
109
+ example = json.loads(line)
110
+
111
+ yield idx, example