|
import os, sys |
|
import nltk |
|
from collections import Counter |
|
import pickle |
|
from datasets import load_dataset |
|
from tqdm import tqdm |
|
import csv |
|
import json |
|
import re |
|
|
|
def tokenize(message): |
|
""" |
|
Text processing: Sentence tokenize, then concatenate the word_tokenize of each sentence. Then lower. |
|
:param message: |
|
:return: |
|
""" |
|
sentences = nltk.sent_tokenize(message) |
|
tokenized = [] |
|
for sentence in sentences: |
|
tokenized += nltk.word_tokenize(sentence) |
|
return [word.lower() for word in tokenized] |
|
|
|
|
|
def load_movie_mappings(path): |
|
id2name = {} |
|
db2id = {} |
|
|
|
with open(path, 'r') as f: |
|
reader = csv.reader(f) |
|
|
|
for row in reader: |
|
if row[0] != "index": |
|
id2name[int(row[0])] = row[1] |
|
|
|
db2id[int(row[2])] = int(row[0]) |
|
|
|
del db2id[-1] |
|
date_pattern = re.compile(r'\(\d{4}\)') |
|
|
|
|
|
db2name = {db: date_pattern.sub('', id2name[id]).strip(" ") for db, id in db2id.items()} |
|
n_redial_movies = len(db2id.values()) |
|
|
|
|
|
|
|
return id2name, db2name |
|
|
|
|
|
def get_vocab(dataset, db2name): |
|
""" |
|
get the vocabulary from the train data |
|
:return: vocabulary |
|
""" |
|
print(f"Loading vocabulary from {dataset} dataset") |
|
counter = Counter() |
|
|
|
datasets = load_dataset(dataset, download_mode="force_redownload") |
|
date_pattern = re.compile(r'@(\d+)') |
|
for subset in ["train", "validation", "test"]: |
|
for conversation in tqdm(datasets[subset]): |
|
for message in conversation["messages"]: |
|
|
|
text = tokenize(date_pattern.sub(" ", message)) |
|
counter.update([word.lower() for word in text]) |
|
|
|
for movieId in db2name: |
|
tokenized_movie = tokenize(db2name[movieId]) |
|
counter.update([word.lower() for word in tokenized_movie]) |
|
|
|
kept_vocab = counter.most_common(15000) |
|
vocab = [x[0] for x in kept_vocab] |
|
print("Vocab covers {} word instances over {}".format( |
|
sum([x[1] for x in kept_vocab]), |
|
sum([counter[x] for x in counter]) |
|
)) |
|
|
|
vocab = ['<pad>', '<s>', '</s>', '<unk>', '\n'] + vocab |
|
|
|
return vocab |
|
|
|
if __name__ == '__main__': |
|
import os |
|
dataset = 'redial' |
|
base_dir = os.path.dirname(os.path.abspath(__file__)) |
|
id2entity, db2name = load_movie_mappings(os.path.join(base_dir, "movies_merged.csv")) |
|
|
|
with open(os.path.join(base_dir, 'id2entity.json'), 'w') as f: |
|
json.dump(id2entity, f) |
|
|
|
|
|
|
|
|
|
|