rajmohanc commited on
Commit
32f370b
·
verified ·
1 Parent(s): 4f58b4a

Upload turl_table_col_type.py

Browse files

Upload TURL Table Column Type Annotation Dataset Loading Script

Files changed (1) hide show
  1. turl_table_col_type.py +150 -0
turl_table_col_type.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This dataset is for the task of column type annotation"""
2
+ import json
3
+ import datasets
4
+
5
+ _CITATION = """\
6
+ @article{deng2020turl,
7
+ title={TURL: table understanding through representation learning},
8
+ author={Deng, Xiang and Sun, Huan and Lees, Alyssa and Wu, You and Yu, Cong},
9
+ journal={ACM SIGMOD Record},
10
+ volume={51},
11
+ number={1},
12
+ pages={33--40},
13
+ year={2022},
14
+ publisher={ACM New York, NY, USA}
15
+ }"""
16
+
17
+ _DESCRIPTION = """\
18
+ Column Type Annotation(TURL)
19
+ """
20
+
21
+ _HOMEPAGE = "https://github.com/sunlab-osu/TURL"
22
+
23
+ _GIT_ARCHIVE_URL = "https://huggingface.co/datasets/stanford-crfm/helm-scenarios/tree/main/turl-column-type-annotation"
24
+
25
+ _LICENSE = "CC BY 4.0"
26
+
27
+ class ColumnAnnotation(datasets.GeneratorBasedBuilder):
28
+ """The TURL Column Annotation dataset"""
29
+
30
+ def _info(self):
31
+ return datasets.DatasetInfo(
32
+ description=_DESCRIPTION,
33
+ features=datasets.Features(
34
+ {
35
+ "page_title" : datasets.Value("string"),
36
+ "section_title" : datasets.Value("string"),
37
+ "table_caption" : datasets.Value("string"),
38
+ "table": {"header": datasets.features.Sequence(datasets.Value("string")),
39
+ "rows": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string")))},
40
+ "vocab": datasets.features.Sequence(datasets.Value("string")),
41
+ "colname": datasets.Value("string"),
42
+ "annotations": datasets.features.Sequence(datasets.Value("string")),
43
+ }
44
+ ),
45
+ supervised_keys=None,
46
+ homepage=_HOMEPAGE,
47
+ license=_LICENSE,
48
+ citation=_CITATION,
49
+ )
50
+
51
+ # To understand the json fields extracted here, refer to https://github.com/sunlab-osu/TURL?tab=readme-ov-file#column-type-annotation
52
+ def _load_table(self, table_data):
53
+ headers = table_data[5]
54
+ col_count = len(table_data[6])
55
+ row_count = max([x[-1][0][0] for x in table_data[6]])
56
+ rows = []
57
+ for i in range(row_count):
58
+ row = []
59
+ for j in range(col_count):
60
+ try:
61
+ column_json = table_data[6][j]
62
+ k = -1
63
+ while k < row_count:
64
+ k = k + 1
65
+ if column_json[k][0][0] == i:
66
+ break
67
+
68
+ if k < row_count:
69
+ val = table_data[6][j][k][1][1]
70
+ else:
71
+ val = ''
72
+ except:
73
+ val = ''
74
+ row.append(val)
75
+ if not all(value == '' for value in row):
76
+ rows.append(row)
77
+ return {
78
+ "header": headers,
79
+ "rows": rows,
80
+ }
81
+
82
+ def _split_generators(self, dl_manager):
83
+ """Returns SplitGenerators."""
84
+
85
+ from huggingface_hub import hf_hub_download
86
+
87
+ # Specify the repository and sub-directory
88
+ repo_id = "stanford-crfm/helm-scenarios"
89
+ sub_dir = "turl-column-type-annotation"
90
+
91
+ # Specify file names
92
+ train_file_name = "train.table_col_type.json"
93
+ dev_file_name = "dev.table_col_type.json"
94
+ test_file_name = "test.table_col_type.json"
95
+ vocab_file_name = "type_vocab.txt"
96
+
97
+ # Download files from hf
98
+ train_file_path = f"{sub_dir}/{train_file_name}"
99
+ train_file_path = hf_hub_download(repo_id, filename=train_file_path, repo_type="dataset", revision="main")
100
+
101
+ dev_file_path = f"{sub_dir}/{dev_file_name}"
102
+ dev_file_path = hf_hub_download(repo_id, filename=dev_file_path, repo_type="dataset", revision="main")
103
+
104
+ test_file_path = f"{sub_dir}/{test_file_name}"
105
+ test_file_path = hf_hub_download(repo_id, filename=test_file_path, repo_type="dataset", revision="main")
106
+
107
+ vocab_file_path = f"{sub_dir}/{vocab_file_name}"
108
+ vocab_file_path = hf_hub_download(repo_id, filename=vocab_file_path, repo_type="dataset", revision="main")
109
+
110
+ return [
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TRAIN,
113
+ gen_kwargs={"vocabpath": vocab_file_path, "filepath": train_file_path},
114
+ ),
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.VALIDATION,
117
+ gen_kwargs={"vocabpath": vocab_file_path, "filepath": dev_file_path},
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TEST,
121
+ gen_kwargs={"vocabpath": vocab_file_path, "filepath": test_file_path},
122
+ ),
123
+ ]
124
+
125
+ def _generate_examples(self, vocabpath, filepath):
126
+ with open(filepath, 'r', encoding='utf-8') as json_file:
127
+ data = json.load(json_file)
128
+
129
+
130
+ with open(vocabpath, 'r', encoding='utf-8') as txt_file:
131
+ lines = txt_file.readlines()
132
+ vocab = [line[:-1].split('\t')[1] for line in lines]
133
+
134
+ index = 0
135
+ for idx, table_data in enumerate(data):
136
+ # Load table contents using the _load_table method
137
+ table_content = self._load_table(table_data)
138
+ for col_idx in range(len(table_data[5])):
139
+ yield index, {
140
+ "page_title" : table_data[1],
141
+ "section_title" : table_data[3],
142
+ "table_caption" : table_data[4],
143
+ "table": table_content,
144
+ "vocab": vocab,
145
+ "colname": table_data[5][col_idx],
146
+ "annotations": table_data[7][col_idx],
147
+ }
148
+ index += 1
149
+
150
+