Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
File size: 4,663 Bytes
14cb2fa
 
 
2d1c9d2
14cb2fa
 
2d1c9d2
 
 
 
14cb2fa
 
 
e78f69c
14cb2fa
 
2d1c9d2
14cb2fa
 
b793b52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e78f69c
 
 
 
 
 
 
 
b793b52
e78f69c
 
 
 
 
 
 
 
 
 
 
 
 
2d1c9d2
14cb2fa
 
 
 
 
 
2d1c9d2
14cb2fa
 
 
 
 
 
2d1c9d2
14cb2fa
 
 
 
 
e480913
2d1c9d2
ba55817
 
2d1c9d2
 
e480913
14cb2fa
 
2d1c9d2
e480913
e78f69c
e480913
 
 
 
 
e78f69c
e480913
e78f69c
e480913
 
e78f69c
e480913
e78f69c
2d1c9d2
 
 
e480913
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
"""
- Wiki-One https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz
- NELL-One https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz

wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz
tar -xzf nell.tar.gz

wget https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz
tar -xzf wiki.tar.gz

"""
import os
import json
import re
from itertools import chain

data_dir_nell = "NELL"
os.makedirs("data", exist_ok=True)

short = ['alcs', "uk", "us", "usa", "npr", "nbc", "bbc", "cnn", "abc", "cbs", "nfl", "mlb", "nba", "nhl", "pga", "ncaa",
         "wjhu", "pbs", "un"]
non_entity_types = [
    'academicfield',
    'agent',
    'agriculturalproduct',
    'amphibian',
    'animal',
    'aquarium',
    'arachnid',
    'architect',
    'arthropod',
    'bakedgood',
    'bathroomitem',
    'bedroomitem',
    'beverage',
    'bird',
    'blog',
    'bodypart',
    'bone',
    'candy',
    'cave',
    'chemical',
    'clothing',
    'coffeedrink',
    'condiment',
    'crimeorcharge',
    'crustacean',
    'date',
    'dateliteral',
    'economicsector',
    'fish',
    'food',
    'fruit',
    'fungus',
    'furniture',
    'grain',
    'hallwayitem',
    'hobby',
    'insect',
    'invertebrate',
    'jobposition',
    'kitchenitem',
    'landscapefeatures',
    'legume',
    'location',
    'mammal',
    'meat',
    'mlsoftware',
    'mollusk',
    'month',
    'nut',
    'officebuildingroom',
    'physiologicalcondition',
    'plant',
    'politicsissue',
    'profession',
    'professionalorganization',
    'reptile',
    'room',
    'sport',
    'tableitem',
    'tradeunion',
    'vegetable',
    'vehicle',
    'vertebrate',
    'weapon',
    'wine'
]


def clean(token):
    _, _type, token = token.split(":")
    token = token.replace("_", " ")
    token = token.replace("__", "")
    token = re.sub(r"00\d\Z", "", token)
    token = re.sub(r"\An(\d+)", r"\1", token)
    if _type in non_entity_types:
        return token, _type
    new_token = []
    for _t in token.split(" "):
        if len(_t) == 0:
            continue
        if _t in short:
            _t = _t.upper()
        else:
            _t = _t.capitalize()
        new_token.append(_t)
    return " ".join(new_token), _type


if not os.path.exists(data_dir_nell):
    raise ValueError("Please download the dataset first\n"
                     "wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz\n"
                     "tar -xzf nell.tar.gz")


def read_file(_file):
    with open(_file, 'r') as f_reader:
        tmp = json.load(f_reader)
    flatten = list(chain(*[[{"relation": r, "head": h, "tail": t} for (h, r, t) in v] for v in tmp.values()]))
    return flatten


def read_vocab(_file):
    with open(_file) as f_reader:
        ent2ids = json.load(f_reader)
    return sorted(list(ent2ids.keys()))


if __name__ == '__main__':
    # Process raw data
    vocab = read_vocab(f"{data_dir_nell}/ent2ids")
    vocab = [clean(i) for i in vocab if len(i.split(":")) > 2]
    vocab = ["\t".join(i) for i in vocab if len(i[0]) > 0 and len(i[1]) > 0]
    with open("data/nell.vocab.txt", 'w') as f:
        f.write("\n".join(vocab))
    vocab_term = [i.split('\t')[0] for i in vocab]

    for i, s in zip(['dev_tasks.json', 'test_tasks.json', 'train_tasks.json'], ['validation', 'test', 'train']):
        d = read_file(f"{data_dir_nell}/{i}")

        for _d in d:
            head = _d.pop("head")
            tail = _d.pop("tail")

            head_entity, head_type = clean(head)
            _d['head'] = head_entity
            _d['head_type'] = head_type
            assert head_entity in vocab_term, head_entity

            tail_entity, tail_type = clean(tail)
            _d['tail'] = tail_entity
            _d['tail_type'] = tail_type
            assert tail_entity in vocab_term, tail_entity

        with open(f"data/nell.{s}.jsonl", "w") as f:
            f.write("\n".join([json.dumps(_d) for _d in d]))

    # Filter entity relation
    full_data = {}
    for s in ["train", "validation", "test"]:
        with open(f"data/nell.{s}.jsonl") as f:
            data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
            data = [i for i in data if i['head_type'] not in non_entity_types and i['tail_type'] not in non_entity_types]
        with open(f"data/nell_filter.{s}.jsonl", "w") as f:
            f.write('\n'.join([json.dumps(i) for i in data]))

    with open("data/nell.vocab.txt") as f:
        vocab = [i.split("\t") for i in f.read().split('\n')]

    vocab = ["\t".join([a, b]) for a, b in vocab if b not in non_entity_types]
    with open("data/nell_filter.vocab.txt", 'w') as f:
        f.write('\n'.join(vocab))