Datasets:
Update merged entity link
Browse files- collect_vocab.py +93 -0
- entity2id.json +0 -0
- entity2link.json +0 -0
- entityName2id.json +0 -0
- entityName2link.json +0 -0
- extract_name2link.py +45 -0
- id2entity.json +0 -0
- redial.py +75 -10
- vocab.json +0 -0
collect_vocab.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, sys
|
2 |
+
import nltk
|
3 |
+
from collections import Counter
|
4 |
+
import pickle
|
5 |
+
from datasets import load_dataset
|
6 |
+
from tqdm import tqdm
|
7 |
+
import csv
|
8 |
+
import json
|
9 |
+
import re
|
10 |
+
|
11 |
+
def tokenize(message):
|
12 |
+
"""
|
13 |
+
Text processing: Sentence tokenize, then concatenate the word_tokenize of each sentence. Then lower.
|
14 |
+
:param message:
|
15 |
+
:return:
|
16 |
+
"""
|
17 |
+
sentences = nltk.sent_tokenize(message)
|
18 |
+
tokenized = []
|
19 |
+
for sentence in sentences:
|
20 |
+
tokenized += nltk.word_tokenize(sentence)
|
21 |
+
return [word.lower() for word in tokenized]
|
22 |
+
|
23 |
+
|
24 |
+
def load_movie_mappings(path):
|
25 |
+
id2name = {}
|
26 |
+
db2id = {}
|
27 |
+
|
28 |
+
with open(path, 'r') as f:
|
29 |
+
reader = csv.reader(f)
|
30 |
+
# remove date from movie name
|
31 |
+
for row in reader:
|
32 |
+
if row[0] != "index":
|
33 |
+
id2name[int(row[0])] = row[1]
|
34 |
+
# id2name[int(row[0])] = row[1]
|
35 |
+
db2id[int(row[2])] = int(row[0])
|
36 |
+
|
37 |
+
del db2id[-1]
|
38 |
+
date_pattern = re.compile(r'\(\d{4}\)')
|
39 |
+
|
40 |
+
# get dataset characteristics
|
41 |
+
db2name = {db: date_pattern.sub('', id2name[id]).strip(" ") for db, id in db2id.items()}
|
42 |
+
n_redial_movies = len(db2id.values()) # number of movies mentioned in ReDial
|
43 |
+
# name2id = {name: int(i) for i, name in id2name.items() if name != ''}
|
44 |
+
|
45 |
+
# print("loaded {} movies from {}".format(len(name2id), path))
|
46 |
+
return id2name, db2name
|
47 |
+
|
48 |
+
|
49 |
+
def get_vocab(dataset, db2name):
|
50 |
+
"""
|
51 |
+
get the vocabulary from the train data
|
52 |
+
:return: vocabulary
|
53 |
+
"""
|
54 |
+
print(f"Loading vocabulary from {dataset} dataset")
|
55 |
+
counter = Counter()
|
56 |
+
# get vocabulary from dialogues
|
57 |
+
datasets = load_dataset(dataset, download_mode="force_redownload")
|
58 |
+
date_pattern = re.compile(r'@(\d+)')
|
59 |
+
for subset in ["train", "validation", "test"]:
|
60 |
+
for conversation in tqdm(datasets[subset]):
|
61 |
+
for message in conversation["messages"]:
|
62 |
+
# remove movie Ids
|
63 |
+
text = tokenize(date_pattern.sub(" ", message))
|
64 |
+
counter.update([word.lower() for word in text])
|
65 |
+
# get vocabulary from movie names
|
66 |
+
for movieId in db2name:
|
67 |
+
tokenized_movie = tokenize(db2name[movieId])
|
68 |
+
counter.update([word.lower() for word in tokenized_movie])
|
69 |
+
# Keep the most common words
|
70 |
+
kept_vocab = counter.most_common(15000)
|
71 |
+
vocab = [x[0] for x in kept_vocab]
|
72 |
+
print("Vocab covers {} word instances over {}".format(
|
73 |
+
sum([x[1] for x in kept_vocab]),
|
74 |
+
sum([counter[x] for x in counter])
|
75 |
+
))
|
76 |
+
# note: let the <pad> token corresponds to 0
|
77 |
+
vocab = ['<pad>', '<s>', '</s>', '<unk>', '\n'] + vocab
|
78 |
+
|
79 |
+
return vocab
|
80 |
+
|
81 |
+
if __name__ == '__main__':
|
82 |
+
import os
|
83 |
+
dataset = 'redial'
|
84 |
+
base_dir = os.path.dirname(os.path.abspath(__file__))
|
85 |
+
id2entity, db2name = load_movie_mappings(os.path.join(base_dir, "movies_merged.csv"))
|
86 |
+
|
87 |
+
with open(os.path.join(base_dir, 'id2entity.json'), 'w') as f:
|
88 |
+
json.dump(id2entity, f)
|
89 |
+
# vocab = get_vocab(dataset, db2name)
|
90 |
+
# print("vocab has length:", len(vocab))
|
91 |
+
# with open(os.path.join(base_dir, 'vocab.json'), 'w') as f:
|
92 |
+
# json.dump(vocab, f)
|
93 |
+
#
|
entity2id.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
entity2link.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
entityName2id.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
entityName2link.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
extract_name2link.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import csv
|
3 |
+
import re
|
4 |
+
|
5 |
+
entityLink2id = json.load(open('entity2id.json'))
|
6 |
+
|
7 |
+
reader = csv.reader(open('movies_merged.csv'))
|
8 |
+
|
9 |
+
date_pattern = re.compile(r'\(\d+\)')
|
10 |
+
|
11 |
+
entity2link = {}
|
12 |
+
|
13 |
+
temp1 = "<http://dbpedia.org/resource/{}_(film)>"
|
14 |
+
temp2 = "<http://dbpedia.org/resource/{}_({}_film)>"
|
15 |
+
temp3 = "<http://dbpedia.org/resource/{}>"
|
16 |
+
|
17 |
+
|
18 |
+
for row in reader:
|
19 |
+
if row[0] == 'index':
|
20 |
+
continue
|
21 |
+
entity = row[1].strip('"')
|
22 |
+
match = date_pattern.search(entity)
|
23 |
+
if match:
|
24 |
+
movieName = entity[:match.start()].strip(' ')
|
25 |
+
year = match.group(0)[1:-1]
|
26 |
+
else:
|
27 |
+
movieName = entity.strip(' ')
|
28 |
+
year = ''
|
29 |
+
movieName = movieName.replace(' ', '_')
|
30 |
+
if (t1 := temp1.format(movieName)) in entityLink2id:
|
31 |
+
entity2link[entity] = t1
|
32 |
+
elif (t2 := temp2.format(movieName, year)) in entityLink2id:
|
33 |
+
entity2link[entity] = t2
|
34 |
+
elif (t3 := temp3.format(movieName)) in entityLink2id:
|
35 |
+
entity2link[entity] = t3
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
print('entity2link: ', len(entity2link))
|
40 |
+
for e, link in entity2link.items():
|
41 |
+
entity2link[e] = link[1:-1]
|
42 |
+
json.dump(entity2link, open('entity2link.json', 'w'))
|
43 |
+
|
44 |
+
|
45 |
+
|
id2entity.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
redial.py
CHANGED
@@ -1,15 +1,22 @@
|
|
1 |
import json
|
2 |
import re
|
3 |
from typing import List
|
4 |
-
|
5 |
import datasets
|
6 |
|
|
|
|
|
|
|
7 |
logger = datasets.logging.get_logger(__name__)
|
8 |
|
|
|
9 |
class RedialConfig(datasets.BuilderConfig):
|
10 |
"""BuilderConfig for ReDIAL."""
|
11 |
|
12 |
-
def __init__(self, features,
|
|
|
|
|
|
|
13 |
"""BuilderConfig for ReDIAL.
|
14 |
|
15 |
Args:
|
@@ -19,6 +26,9 @@ class RedialConfig(datasets.BuilderConfig):
|
|
19 |
"""
|
20 |
super().__init__(version=datasets.Version("0.0.1"), **kwargs)
|
21 |
self.features = features
|
|
|
|
|
|
|
22 |
|
23 |
_URL = "./"
|
24 |
_URLS = {
|
@@ -28,11 +38,10 @@ _URLS = {
|
|
28 |
}
|
29 |
|
30 |
|
31 |
-
|
32 |
-
|
33 |
class ReDIAL(datasets.GeneratorBasedBuilder):
|
34 |
DEFAULT_CONFIG_NAME = "rec"
|
35 |
BUILDER_CONFIGS = [
|
|
|
36 |
RedialConfig(
|
37 |
name="SA",
|
38 |
description="For using the ReDIAL dataset to train sentiment analysis on movies in sentences",
|
@@ -45,7 +54,21 @@ class ReDIAL(datasets.GeneratorBasedBuilder):
|
|
45 |
datasets.Value("int32"), length=6
|
46 |
)
|
47 |
},
|
|
|
48 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
RedialConfig(
|
50 |
name="autorec",
|
51 |
description="For training autorec model on ReDIAL data",
|
@@ -63,14 +86,19 @@ class ReDIAL(datasets.GeneratorBasedBuilder):
|
|
63 |
"senders": datasets.features.Sequence(datasets.Value("int32")),
|
64 |
},
|
65 |
),
|
66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
]
|
68 |
|
69 |
def __init__(self, **kwargs):
|
70 |
super().__init__(**kwargs)
|
71 |
self.last_sender = None
|
72 |
|
73 |
-
|
74 |
def _processMessage(self, msg, initialId):
|
75 |
"""
|
76 |
msg example: {
|
@@ -80,13 +108,13 @@ class ReDIAL(datasets.GeneratorBasedBuilder):
|
|
80 |
"messageId": 204171
|
81 |
},
|
82 |
"""
|
83 |
-
res =
|
84 |
"text": msg["text"],
|
85 |
"sender": 1 if msg["senderWorkerId"] == initialId else -1
|
86 |
}
|
87 |
return res
|
88 |
|
89 |
-
def _flattenMessages(self, conversation):
|
90 |
messages = []
|
91 |
senders = []
|
92 |
for message in conversation["messages"]:
|
@@ -96,12 +124,15 @@ class ReDIAL(datasets.GeneratorBasedBuilder):
|
|
96 |
messages[-1] += "\n" + text
|
97 |
else:
|
98 |
senders.append(role)
|
|
|
|
|
|
|
99 |
messages.append(text)
|
100 |
return messages, senders
|
101 |
|
102 |
def _info(self):
|
103 |
return datasets.DatasetInfo(
|
104 |
-
description=
|
105 |
features=datasets.Features(self.config.features),
|
106 |
)
|
107 |
|
@@ -114,6 +145,28 @@ class ReDIAL(datasets.GeneratorBasedBuilder):
|
|
114 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
|
115 |
]
|
116 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
def _generate_examples(self, filepath):
|
118 |
"""This function returns the examples in the raw (text) form."""
|
119 |
logger.info("generating examples from = %s", filepath)
|
@@ -141,7 +194,7 @@ class ReDIAL(datasets.GeneratorBasedBuilder):
|
|
141 |
|
142 |
elif "SA" in self.config.name:
|
143 |
Idx = 0
|
144 |
-
date_pattern = re.compile(r'\(\d{4}\)')
|
145 |
with open(filepath, encoding="utf-8") as f:
|
146 |
for line in f:
|
147 |
conversation = json.loads(line)
|
@@ -179,3 +232,15 @@ class ReDIAL(datasets.GeneratorBasedBuilder):
|
|
179 |
"movieIds": [int(movieId) for movieId in conversation["movieMentions"]]
|
180 |
}
|
181 |
Idx += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import json
|
2 |
import re
|
3 |
from typing import List
|
4 |
+
import html
|
5 |
import datasets
|
6 |
|
7 |
+
ENTITY = 'entity'
|
8 |
+
ENTITY_PATTERN = r'<entity>{}</entity>'
|
9 |
+
|
10 |
logger = datasets.logging.get_logger(__name__)
|
11 |
|
12 |
+
|
13 |
class RedialConfig(datasets.BuilderConfig):
|
14 |
"""BuilderConfig for ReDIAL."""
|
15 |
|
16 |
+
def __init__(self, features,
|
17 |
+
initiator_prefix='User: ',
|
18 |
+
respondent_prefix='System: ',
|
19 |
+
**kwargs):
|
20 |
"""BuilderConfig for ReDIAL.
|
21 |
|
22 |
Args:
|
|
|
26 |
"""
|
27 |
super().__init__(version=datasets.Version("0.0.1"), **kwargs)
|
28 |
self.features = features
|
29 |
+
self.initiator_prefix = initiator_prefix
|
30 |
+
self.respondent_prefix = respondent_prefix
|
31 |
+
|
32 |
|
33 |
_URL = "./"
|
34 |
_URLS = {
|
|
|
38 |
}
|
39 |
|
40 |
|
|
|
|
|
41 |
class ReDIAL(datasets.GeneratorBasedBuilder):
|
42 |
DEFAULT_CONFIG_NAME = "rec"
|
43 |
BUILDER_CONFIGS = [
|
44 |
+
|
45 |
RedialConfig(
|
46 |
name="SA",
|
47 |
description="For using the ReDIAL dataset to train sentiment analysis on movies in sentences",
|
|
|
54 |
datasets.Value("int32"), length=6
|
55 |
)
|
56 |
},
|
57 |
+
# certain information(e.g. movie_occurrences) is model-specific, and we leave it for Dataset.map
|
58 |
),
|
59 |
+
# RedialConfig(
|
60 |
+
# name="SA_debug",
|
61 |
+
# description="For using the ReDIAL dataset to train sentiment analysis on movies in sentences",
|
62 |
+
# features={
|
63 |
+
# "id": datasets.Value("int32"),
|
64 |
+
# "movieName": datasets.Value("string"),
|
65 |
+
# "messages": datasets.features.Sequence(datasets.Value("string")),
|
66 |
+
# "senders": datasets.features.Sequence(datasets.Value("int32")),
|
67 |
+
# "form": datasets.features.Sequence(
|
68 |
+
# datasets.Value("int32"), length=6
|
69 |
+
# )
|
70 |
+
# },
|
71 |
+
# ),
|
72 |
RedialConfig(
|
73 |
name="autorec",
|
74 |
description="For training autorec model on ReDIAL data",
|
|
|
86 |
"senders": datasets.features.Sequence(datasets.Value("int32")),
|
87 |
},
|
88 |
),
|
89 |
+
RedialConfig(
|
90 |
+
name="formatted",
|
91 |
+
description='Embed all information into a text sequence for each dialog',
|
92 |
+
features={
|
93 |
+
"messages": datasets.features.Sequence(datasets.Value("string")),
|
94 |
+
}
|
95 |
+
)
|
96 |
]
|
97 |
|
98 |
def __init__(self, **kwargs):
|
99 |
super().__init__(**kwargs)
|
100 |
self.last_sender = None
|
101 |
|
|
|
102 |
def _processMessage(self, msg, initialId):
|
103 |
"""
|
104 |
msg example: {
|
|
|
108 |
"messageId": 204171
|
109 |
},
|
110 |
"""
|
111 |
+
res = {
|
112 |
"text": msg["text"],
|
113 |
"sender": 1 if msg["senderWorkerId"] == initialId else -1
|
114 |
}
|
115 |
return res
|
116 |
|
117 |
+
def _flattenMessages(self, conversation, add_prefix=False):
|
118 |
messages = []
|
119 |
senders = []
|
120 |
for message in conversation["messages"]:
|
|
|
124 |
messages[-1] += "\n" + text
|
125 |
else:
|
126 |
senders.append(role)
|
127 |
+
if add_prefix:
|
128 |
+
prefix = self.config.initiator_prefix if role == 1 else self.config.respondent_prefix
|
129 |
+
text = prefix + text
|
130 |
messages.append(text)
|
131 |
return messages, senders
|
132 |
|
133 |
def _info(self):
|
134 |
return datasets.DatasetInfo(
|
135 |
+
description=self.config.description,
|
136 |
features=datasets.Features(self.config.features),
|
137 |
)
|
138 |
|
|
|
145 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
|
146 |
]
|
147 |
|
148 |
+
movie_pattern = re.compile(r'@(\d+)')
|
149 |
+
default_movie_entity = '<movie>'
|
150 |
+
|
151 |
+
def _process_utt(self, utt, movieid2name, replace_movieId=True, remove_movie=False):
|
152 |
+
def convert(match):
|
153 |
+
movieid = match.group(0)[1:]
|
154 |
+
if movieid in movieid2name:
|
155 |
+
if remove_movie:
|
156 |
+
return '<movie>'
|
157 |
+
movie_name = movieid2name[movieid]
|
158 |
+
movie_name = ' '.join(movie_name.split())
|
159 |
+
return ENTITY_PATTERN.format(movie_name)
|
160 |
+
else:
|
161 |
+
return match.group(0)
|
162 |
+
|
163 |
+
if replace_movieId:
|
164 |
+
utt = re.sub(self.movie_pattern, convert, utt)
|
165 |
+
utt = ' '.join(utt.split())
|
166 |
+
utt = html.unescape(utt)
|
167 |
+
|
168 |
+
return utt
|
169 |
+
|
170 |
def _generate_examples(self, filepath):
|
171 |
"""This function returns the examples in the raw (text) form."""
|
172 |
logger.info("generating examples from = %s", filepath)
|
|
|
194 |
|
195 |
elif "SA" in self.config.name:
|
196 |
Idx = 0
|
197 |
+
date_pattern = re.compile(r'\(\d{4}\)') # To match e.g. "(2009)"
|
198 |
with open(filepath, encoding="utf-8") as f:
|
199 |
for line in f:
|
200 |
conversation = json.loads(line)
|
|
|
232 |
"movieIds": [int(movieId) for movieId in conversation["movieMentions"]]
|
233 |
}
|
234 |
Idx += 1
|
235 |
+
elif "formatted" in self.config.name:
|
236 |
+
Idx = 0
|
237 |
+
with open(filepath, encoding="utf-8") as f:
|
238 |
+
for line in f:
|
239 |
+
dialog = json.loads(line)
|
240 |
+
msgs, senders = self._flattenMessages(dialog, add_prefix=True)
|
241 |
+
movieid2name = dialog['movieMentions']
|
242 |
+
formatted_msgs = [self._process_utt(utt, movieid2name) for utt in msgs]
|
243 |
+
yield Idx, {
|
244 |
+
"messages": formatted_msgs,
|
245 |
+
}
|
246 |
+
Idx += 1
|
vocab.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|