Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
asahi417 commited on
Commit
e78f69c
·
1 Parent(s): 5743291
data/nell.test.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a94b344bf4b4b721f9ca1b96813b497c1f87eac795e393e321d370c5cb1dd1e
3
- size 275455
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:134f41e3db58601aba6d3f903c4e243b5b7e846d61eac37e36c68c130415981e
3
+ size 510425
data/nell.train.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:59fcc5f746777fcda4058a0bf8cb6a55e03bde3b3ffdf6716a259a0fe2740374
3
- size 1071208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fd1b662dba39038af6e63cf343152430290528f8a55265194316cf252646712
3
+ size 1997174
data/nell.validation.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dce6d2c53f2d4d9e7390033fcc787b5b405a06d13141e5affa5b3b43561657f5
3
- size 116970
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a51f96df896ae77a46e0684f0b10c5d0b055083daec0b19647abb688c57ca39
3
+ size 222265
data/wiki.test.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c7791ec3b056017a9c6bf0ca70714c695459840bd8bb86362c0c81ea46b7ab46
3
- size 938201
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e16fa1f1e7a2e0b6987ba4cde8944771b5b9bd3465f8b872b2cdc324eacd05f0
3
+ size 2044049
data/wiki.train.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f50246ef47702b72d633e1807b79d4915e2e2054bdc6cb4c4c2aa9b9ab1b13f
3
- size 3726937
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56b7715a33bdcaced2fbfdb22784d172840c9f2c21fd168defd2e79cff93bd15
3
+ size 8154793
data/wiki.validation.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c9097f4c1d08f56b050cddddbc15dba0c282412ec519bf994a768930db825316
3
- size 401621
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:200139f320505ba16456a9b2c6e0b9eefd65c111fb990c9a06b6ea66d4d138bf
3
+ size 883589
fewshot_link_prediction.py CHANGED
@@ -78,7 +78,11 @@ class FewShotLinkPrediction(datasets.GeneratorBasedBuilder):
78
  {
79
  "relation": datasets.Value("string"),
80
  "head": datasets.Value("string"),
 
 
81
  "tail": datasets.Value("string"),
 
 
82
  }
83
  ),
84
  supervised_keys=None,
 
78
  {
79
  "relation": datasets.Value("string"),
80
  "head": datasets.Value("string"),
81
+ "head_entity": datasets.Value("string"),
82
+ "head_type": datasets.Value("string"),
83
  "tail": datasets.Value("string"),
84
+ "tail_entity": datasets.Value("string"),
85
+ "tail_type": datasets.Value("string"),
86
  }
87
  ),
88
  supervised_keys=None,
process.py CHANGED
@@ -11,12 +11,38 @@ tar -xzf wiki.tar.gz
11
  """
12
  import os
13
  import json
 
14
  from itertools import chain
15
 
16
  data_dir_nell = "NELL"
17
  data_dir_wiki = "Wiki"
18
  os.makedirs("data", exist_ok=True)
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  if not os.path.exists(data_dir_nell):
21
  raise ValueError("Please download the dataset first\n"
22
  "wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz\n"
@@ -55,9 +81,23 @@ if __name__ == '__main__':
55
 
56
  for i, s in zip(['dev_tasks.json', 'test_tasks.json', 'train_tasks.json'], ['validation', 'test', 'train']):
57
  d = read_file(f"{data_dir_nell}/{i}")
 
 
 
 
 
 
 
 
 
58
  with open(f"data/nell.{s}.jsonl", "w") as f:
59
  f.write("\n".join([json.dumps(_d) for _d in d]))
60
 
61
  d = read_file(f"{data_dir_wiki}/{i}")
 
 
 
 
 
62
  with open(f"data/wiki.{s}.jsonl", "w") as f:
63
  f.write("\n".join([json.dumps(_d) for _d in d]))
 
11
  """
12
  import os
13
  import json
14
+ import re
15
  from itertools import chain
16
 
17
  data_dir_nell = "NELL"
18
  data_dir_wiki = "Wiki"
19
  os.makedirs("data", exist_ok=True)
20
 
21
+ short = ['alcs', "uk", "us", "usa"]
22
+
23
+
24
+ def clean(token):
25
+ _, _type, token = token.split(":")
26
+ token = token.replace("_", " ")
27
+ token = token.replace("__", "")
28
+ token = re.sub(r"00\d\Z", "", token)
29
+ token = re.sub(r"\An(\d+)", r"\1", token)
30
+ if _type in ['animal', 'sea_creatures', 'bodypart', "arthropod", "insect", "crustacean", "invertebrate", "arachnid",
31
+ "crustacean", "agriculturalproduct", "reptile", "mammal", "amphibian", "sport", "hobby", "vegetable",
32
+ "beverage", "fruit", "grain", "coffeedrink", ]:
33
+ return token, _type
34
+ new_token = []
35
+ for _t in token.split(" "):
36
+ if len(_t) == 0:
37
+ continue
38
+ if _t in short:
39
+ _t = _t.upper()
40
+ else:
41
+ _t = _t.capitalize()
42
+ new_token.append(_t)
43
+ return " ".join(new_token), _type
44
+
45
+
46
  if not os.path.exists(data_dir_nell):
47
  raise ValueError("Please download the dataset first\n"
48
  "wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz\n"
 
81
 
82
  for i, s in zip(['dev_tasks.json', 'test_tasks.json', 'train_tasks.json'], ['validation', 'test', 'train']):
83
  d = read_file(f"{data_dir_nell}/{i}")
84
+ for _d in d:
85
+ head_entity, head_type = clean(_d['head'])
86
+ _d['head_entity'] = head_entity
87
+ _d['head_type'] = head_type
88
+
89
+ tail_entity, tail_type = clean(_d['tail'])
90
+ _d['tail_entity'] = tail_entity
91
+ _d['tail_type'] = tail_type
92
+
93
  with open(f"data/nell.{s}.jsonl", "w") as f:
94
  f.write("\n".join([json.dumps(_d) for _d in d]))
95
 
96
  d = read_file(f"{data_dir_wiki}/{i}")
97
+ for _d in d:
98
+ _d['head_entity'] = ''
99
+ _d['head_type'] = ''
100
+ _d['tail_entity'] = ''
101
+ _d['tail_type'] = ''
102
  with open(f"data/wiki.{s}.jsonl", "w") as f:
103
  f.write("\n".join([json.dumps(_d) for _d in d]))