introvoyz041 commited on
Commit
2ec6b0e
·
verified ·
1 Parent(s): 3fb346d

Migrated from GitHub

Browse files
data/create_label_dict.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from pathlib import Path
3
+ from tqdm import tqdm
4
+ import json
5
+
6
+
7
+ MASK = '[mask]'
8
+ PAD = '[pad]'
9
+ UNK = '[unk]'
10
+ special_tokens = [MASK,PAD,UNK]
11
+
12
+ corpus = 'en_city' # en_city/en_disease/de_city/de_disease
13
+
14
+
15
+ def prep_data(corpus = 'en_city'):
16
+ original_path = 'data/wikisection/'
17
+ processed_path = 'processed_data/'
18
+
19
+ Path(processed_path).mkdir(parents=True, exist_ok=True)
20
+ if corpus == 'en_city':
21
+ original_path = original_path + 'wikisection_en_city_'
22
+ processed_path = processed_path + 'wikisection_en_city_'
23
+ elif corpus == 'en_disease':
24
+ original_path = original_path + 'wikisection_en_disease_'
25
+ processed_path = processed_path + 'wikisection_en_disease_'
26
+ elif corpus == 'de_city':
27
+ original_path = original_path + 'wikisection_de_city_'
28
+ processed_path = processed_path + 'wikisection_de_city_'
29
+ elif corpus == 'de_disease':
30
+ original_path = original_path + 'wikisection_de_disease_'
31
+ processed_path = processed_path + 'wikisection_de_disease_'
32
+ else:
33
+ return None
34
+ filenames = ['train.json','validation.json','test.json']
35
+
36
+ labels = set()
37
+
38
+ for fn in filenames:
39
+ print('Now processing: ' + str(fn))
40
+ with open(original_path + fn, 'r+') as f:
41
+ data = json.load(f)
42
+
43
+ processed_data = []
44
+ for data_idx in tqdm(range(0, len(data))):
45
+ data_item = data[data_idx]
46
+ t = data_item['text']
47
+ a = data_item['annotations']
48
+ sections = []
49
+ for ai in a:
50
+ labels.add(ai['sectionLabel'])
51
+
52
+ #create label dict
53
+ print('Now creating label dict')
54
+ labels = [PAD,MASK,UNK] + list(labels)
55
+ label_dict = dict([(v, i) for i, v in enumerate(labels)])
56
+ with open(processed_path + 'label_dict', 'w+') as f:
57
+ for key in label_dict:
58
+ f.write(str(key) + ' ' + str(label_dict[key]) + '\n')
59
+ print('Done!')
60
+
61
+ if __name__ == "__main__":
62
+ parser = argparse.ArgumentParser()
63
+ parser.add_argument('--corpus', help="en_city/en_disease/de_city/de_disease")
64
+
65
+ args = parser.parse_args()
66
+
67
+
68
+ prep_data(args.corpus)
69
+
data/prepare_pairwise_sentence_embeddings.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Create pairwise sentence ebmeddings.
3
+ """
4
+
5
+ import numpy as np
6
+ import json
7
+ import tensorflow as tf
8
+ from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel, TFAutoModel, TFBertForNextSentencePrediction, BertForNextSentencePrediction
9
+ import nltk
10
+ from nltk.tokenize import sent_tokenize
11
+ import math
12
+ import argparse
13
+ import torch
14
+ from pathlib import Path
15
+ from tqdm import tqdm
16
+
17
+ nltk.download('punkt')
18
+
19
+
20
+ MASK = '[mask]'
21
+ PAD = '[pad]'
22
+ UNK = '[unk]'
23
+ special_tokens = [MASK,PAD,UNK]
24
+
25
+ corpus = 'en_city' # en_city/en_disease/de_city/de_disease
26
+
27
+ def assign_GPU_pt(Tokenizer_output):
28
+ output = {}
29
+ for i in Tokenizer_output:
30
+ output[i] = Tokenizer_output[i].to('cuda:0')
31
+
32
+ return output
33
+
34
+ def prep_data(model = 'BERT', corpus = 'en_city', max_sent = 512, test_d_size = None, is_tf = True,train=True,validation=True,test=True):
35
+ original_path = 'data/wikisection/'
36
+ processed_path = 'processed_data/' + model + '/'
37
+
38
+ Path(processed_path).mkdir(parents=True, exist_ok=True)
39
+ if corpus == 'en_city':
40
+ original_path = original_path + 'wikisection_en_city_'
41
+ processed_path = processed_path + 'wikisection_en_city_pairwise_'
42
+ elif corpus == 'en_disease':
43
+ original_path = original_path + 'wikisection_en_disease_'
44
+ processed_path = processed_path + 'wikisection_en_disease_pairwise_'
45
+ elif corpus == 'de_city':
46
+ original_path = original_path + 'wikisection_de_city_'
47
+ processed_path = processed_path + 'wikisection_de_city_pairwise_'
48
+ elif corpus == 'de_disease':
49
+ original_path = original_path + 'wikisection_de_disease_'
50
+ processed_path = processed_path + 'wikisection_de_disease_pairwise_'
51
+ else:
52
+ return None
53
+ filenames = []
54
+ if train:
55
+ filenames.append('train.json')
56
+ if validation:
57
+ filenames.append('validation.json')
58
+ if test:
59
+ filenames.append('test.json')
60
+
61
+ model_dict = {
62
+ 'tf': {
63
+ 'ALBERT': 'albert-base-v2',
64
+ 'BERT': 'bert-base-cased',
65
+ 'BERTL': 'bert-large-cased',
66
+ 'BLUEBERT': 'ttumyche/bluebert',
67
+ 'DEBERT': 'bert-base-german-cased',
68
+ 'XLNET': 'xlnet-base-cased'
69
+ },
70
+ 'pytorch': {
71
+ 'BIOCLINICALBERT': 'emilyalsentzer/Bio_ClinicalBERT',
72
+ 'BIOMED_ROBERTA': 'allenai/biomed_roberta_base',
73
+ }
74
+
75
+ }
76
+
77
+ assert (is_tf and model in model_dict['tf']) or (not is_tf and model in model_dict['pytorch']), 'Model found for ' + model + (is_tf and ' on tf' or ' on pytorch')
78
+ # if model == 'BERT':
79
+ # model_name = 'bert-base-cased'
80
+ # elif model == 'BERTL':
81
+ # model_name = 'bert-large-cased'
82
+ # elif model == 'BLUEBERT':
83
+ # model_name = 'ttumyche/bluebert'
84
+ # elif model == 'BIOCLINICALBERT':
85
+ # model_name = 'emilyalsentzer/Bio_ClinicalBERT'
86
+ # assert not is_tf, 'Not TF model found for ' + model_name
87
+ # elif model == 'DEBERT':
88
+ # model_name = 'bert-base-german-cased'
89
+ # elif model == 'XLNET':
90
+ # model_name = 'xlnet-base-cased'
91
+ # elif model == 'ALBERT':
92
+ # model_name = 'albert-base-v2'
93
+ # elif model == 'ROBERTA':
94
+ # model_name = 'roberta-base'
95
+ # elif model == 'BIOMED_ROBERTA':
96
+ # model_name = 'allenai/biomed_roberta_base'
97
+ # assert not is_tf, 'Not TF model found for ' + model_name
98
+
99
+ model_name = is_tf and model_dict['tf'][model] or model_dict['pytorch'][model]
100
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
101
+
102
+ if is_tf and (model=='BERT' or model=='DEBERT'):
103
+ Model = TFBertForNextSentencePrediction.from_pretrained(model_name)
104
+ elif is_tf:
105
+ Model = TFAutoModel.from_pretrained(model_name)
106
+ elif not is_tf and model=='BIOCLINICALBERT':
107
+ Model = BertForNextSentencePrediction.from_pretrained(model_name)
108
+ else:
109
+ Model = AutoModel.from_pretrained(model_name, return_dict=True)
110
+
111
+
112
+ labels = set()
113
+ sep_id = tokenizer.convert_tokens_to_ids(tokenizer.sep_token)
114
+
115
+ for fn in filenames:
116
+ print('Now processing: ' + str(fn))
117
+ with open(original_path + fn, 'r+') as f:
118
+ data = json.load(f)
119
+
120
+ # test
121
+ if test_d_size and test_d_size > 0:
122
+ data = data[:test_d_size]
123
+
124
+ processed_data = []
125
+ for data_idx in tqdm(range(0, len(data))):
126
+ data_item = data[data_idx]
127
+ t = data_item['text']
128
+ a = data_item['annotations']
129
+ sections = []
130
+ for ai in a:
131
+ si = {}
132
+ si['text'] = t[ai['begin']: ai['begin'] + ai['length']]
133
+ si['label'] = ai['sectionLabel']
134
+ sections.append(si)
135
+ labels.add(ai['sectionLabel'])
136
+ sents = []
137
+ current_section = ''
138
+
139
+ sentences = []
140
+ for si in sections:
141
+ st = sent_tokenize(si['text'])
142
+ sentences.extend(st)
143
+
144
+ sentences = sentences[:max_sent]
145
+
146
+ for i in range(len(sentences) - 1):
147
+ # create pairwise sentence embeddings
148
+ if is_tf:
149
+ encoding = tokenizer(sentences[i],sentences[i+1], return_tensors="tf",padding=True,truncation=True,max_length=512)
150
+ if model == 'BERT' or model == 'DEBERT':
151
+ # for BERT and DEBERT with Tensorflow
152
+ # we have extracted CLS and SEP embeddings
153
+ output = Model(encoding['input_ids'], token_type_ids=encoding['token_type_ids'], output_hidden_states=True, return_dict=True)
154
+ sep_idx = np.where(encoding['input_ids'][0]==sep_id)[0][0]
155
+ sents.append({'cls': str(output['hidden_states'][-1][:,0,:].numpy().tolist()[0]), 'sep': str(output['hidden_states'][-1][:,sep_idx,:].numpy().tolist()[0])})
156
+ elif model == 'XLNET':
157
+ # for XLNET with Tensorflow
158
+ output = Model(encoding['input_ids'], token_type_ids=encoding['token_type_ids'], return_dict=True)
159
+ sep_idx = np.where(encoding['input_ids'][0]==sep_id)[0][0]
160
+ sents.append({'cls': str(output['last_hidden_state'][:,-1,:].numpy().tolist()[0]), 'sep': str(output['last_hidden_state'][:,sep_idx,:].numpy().tolist()[0])})
161
+ elif model == 'ALBERT':
162
+ # for ALBERT with Tensorflow
163
+ output = Model(encoding['input_ids'], token_type_ids=encoding['token_type_ids'], return_dict=True)
164
+ sep_idx = np.where(encoding['input_ids'][0]==sep_id)[0][0]
165
+ sents.append({'cls': str(output['last_hidden_state'][:,0,:].numpy().tolist()[0]), 'sep': str(output['last_hidden_state'][:,sep_idx,:].numpy().tolist()[0])})
166
+ elif model == 'ROBERTA':
167
+ # for ROBERTA with Tensorflow
168
+ output = Model(encoding['input_ids'], return_dict=True)
169
+ sep_idx = np.where(encoding['input_ids'][0]==sep_id)[0][0]
170
+ sents.append({'cls': str(output['last_hidden_state'][:,0,:].numpy().tolist()[0]), 'sep': str(output['last_hidden_state'][:,sep_idx,:].numpy().tolist()[0])})
171
+ elif not is_tf and model == 'BIOCLINICALBERT':
172
+ # for BIOCLINICALBERT with Pytorch
173
+ encoding = tokenizer(sentences[i],sentences[i+1], return_tensors="pt",padding=True,truncation=True,max_length=512)
174
+ sep_idx = np.where(encoding['input_ids'][0]==sep_id)[0][0]
175
+ if torch.cuda.is_available():
176
+ dev = "cuda:0"
177
+ inputs = assign_GPU_pt(encoding)
178
+ Model = Model.to(dev)
179
+ output = Model(**inputs, output_hidden_states = True, return_dict=True)
180
+ sents.append({'cls': str(output['hidden_states'][:,0,:].cpu().detach().numpy().tolist()[0]), 'sep': str(output['hidden_states'][:,sep_idx,:].cpu().detach().numpy().tolist()[0])})
181
+ else:
182
+ output = Model(**encoding, output_hidden_states = True, return_dict=True)
183
+ sents.append({'cls': str(output['hidden_states'][:,0,:].detach().numpy().tolist()[0]), 'sep': str(output['hidden_states'][:,sep_idx,:].detach().numpy().tolist()[0])})
184
+ elif not is_tf and model == 'BIOMED_ROBERTA':
185
+ # for BIOMED_ROBERTA with Pytorch
186
+ encoding = tokenizer(sentences[i],sentences[i+1], return_tensors="pt",padding=True,truncation=True,max_length=512)
187
+ sep_idx = np.where(encoding['input_ids'][0]==sep_id)[0][0]
188
+ if torch.cuda.is_available():
189
+ dev = "cuda:0"
190
+ inputs = assign_GPU_pt(encoding)
191
+ Model = Model.to(dev)
192
+ output = Model(**inputs)
193
+ sents.append({'cls': str(output.last_hidden_state[:,0,:].cpu().detach().numpy().tolist()[0]), 'sep': str(output.last_hidden_state[:,sep_idx,:].cpu().detach().numpy().tolist()[0])})
194
+ else:
195
+ output = Model(**encoding, output_hidden_states = True, return_dict=True)
196
+ sents.append({'cls': str(output.last_hidden_state[:,0,:].detach().numpy().tolist()[0]), 'sep': str(output.last_hidden_state[:,sep_idx,:].detach().numpy().tolist()[0])})
197
+ processed_data.append(sents)
198
+
199
+ with open(processed_path + fn, 'w+') as f:
200
+ json.dump(processed_data,f)
201
+
202
+ print('Done!')
203
+
204
+ if __name__ == "__main__":
205
+ parser = argparse.ArgumentParser()
206
+ parser.add_argument('--model', help="BERT/BERTL/BLUEBERT/BIOCLINICALBERT/DEBERT")
207
+ parser.add_argument('--corpus', help="en_city/en_disease/de_city/de_disease")
208
+ parser.add_argument('--max_sent', help="maximum number of sentence in a document", default='512')
209
+ parser.add_argument('--test_d_size', help="for testing only: number of data used create output", default='None')
210
+ parser.add_argument('--tf', help="Tensorflow if true; Pytorch if false (T/F)", default='T')
211
+ parser.add_argument('--train',default='T')
212
+ parser.add_argument('--validation',default='T')
213
+ parser.add_argument('--test',default='T')
214
+
215
+ args = parser.parse_args()
216
+
217
+
218
+ assert args.max_sent.isdigit(), 'max_sent should be an integer'
219
+ max_sent = int(args.max_sent)
220
+
221
+ assert args.test_d_size.isdigit() or args.test_d_size == 'None', 'test_d_size should be an integer or None'
222
+
223
+ if args.test_d_size.isdigit():
224
+ test_d_size = int(args.test_d_size)
225
+ else:
226
+ test_d_size = None
227
+
228
+ assert args.tf == 'T' or args.tf == 'F', 'tf can be T or F'
229
+ if args.tf == 'T':
230
+ is_tf = True
231
+ elif args.tf == 'F':
232
+ is_tf = False
233
+
234
+
235
+ prep_data(args.model,args.corpus,max_sent,test_d_size,is_tf,args.train=='T',args.validation=='T',args.test=='T')
236
+
data/prepare_single_sentence_embeddings.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import json
3
+ import tensorflow as tf
4
+ from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel, TFAutoModel
5
+ import nltk
6
+ from nltk.tokenize import sent_tokenize
7
+ import math
8
+ import argparse
9
+ import torch
10
+ from pathlib import Path
11
+ from tqdm import tqdm
12
+
13
+ nltk.download('punkt')
14
+
15
+
16
+ MASK = '[mask]'
17
+ PAD = '[pad]'
18
+ UNK = '[unk]'
19
+ special_tokens = [MASK,PAD,UNK]
20
+
21
+ corpus = 'en_city' # en_city/en_disease/de_city/de_disease
22
+
23
+ def assign_GPU_pt(Tokenizer_output):
24
+ output = {}
25
+ for i in Tokenizer_output:
26
+ output[i] = Tokenizer_output[i].to('cuda:0')
27
+ return output
28
+
29
+ def prep_data(model = 'BERT', corpus = 'en_city', max_sent = 512, test_d_size = None, is_tf = True,train=True,validation=True,test=True):
30
+ original_path = 'data/wikisection/'
31
+ processed_path = 'processed_data/' + model + '/'
32
+
33
+ Path(processed_path).mkdir(parents=True, exist_ok=True)
34
+ if corpus == 'en_city':
35
+ original_path = original_path + 'wikisection_en_city_'
36
+ processed_path = processed_path + 'wikisection_en_city_'
37
+ elif corpus == 'en_disease':
38
+ original_path = original_path + 'wikisection_en_disease_'
39
+ processed_path = processed_path + 'wikisection_en_disease_'
40
+ elif corpus == 'de_city':
41
+ original_path = original_path + 'wikisection_de_city_'
42
+ processed_path = processed_path + 'wikisection_de_city_'
43
+ elif corpus == 'de_disease':
44
+ original_path = original_path + 'wikisection_de_disease_'
45
+ processed_path = processed_path + 'wikisection_de_disease_'
46
+ else:
47
+ return None
48
+ filenames = []
49
+ if train:
50
+ filenames.append('train.json')
51
+ if validation:
52
+ filenames.append('validation.json')
53
+ if test:
54
+ filenames.append('test.json')
55
+
56
+ model_dict = {
57
+ 'tf': {
58
+ 'ALBERT': 'albert-base-v2',
59
+ 'BERT': 'bert-base-cased',
60
+ 'BERTL': 'bert-large-cased',
61
+ 'BLUEBERT': 'ttumyche/bluebert',
62
+ 'DEBERT': 'bert-base-german-cased',
63
+ 'XLNET': 'xlnet-base-cased'
64
+ },
65
+ 'pytorch': {
66
+ 'BIOCLINICALBERT': 'emilyalsentzer/Bio_ClinicalBERT',
67
+ 'BIOMED_ROBERTA': 'allenai/biomed_roberta_base',
68
+ }
69
+
70
+ }
71
+
72
+ assert (is_tf and model in model_dict['tf']) or (not is_tf and model in model_dict['pytorch']), 'Model found for ' + model + (is_tf and ' on tf' or ' on pytorch')
73
+ model_name = is_tf and model_dict['tf'][model] or model_dict['pytorch'][model]
74
+
75
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
76
+
77
+ if is_tf:
78
+ Model = TFAutoModel.from_pretrained(model_name)
79
+ else:
80
+ Model = AutoModel.from_pretrained(model_name, return_dict=True)
81
+
82
+
83
+ labels = set()
84
+
85
+ for fn in filenames:
86
+ print('Now processing: ' + str(fn))
87
+ with open(original_path + fn, 'r+') as f:
88
+ data = json.load(f)
89
+
90
+ # test
91
+ if test_d_size and test_d_size > 0:
92
+ data = data[:test_d_size]
93
+
94
+ processed_data = []
95
+ for data_idx in tqdm(range(0, len(data))):
96
+ data_item = data[data_idx]
97
+ t = data_item['text']
98
+ a = data_item['annotations']
99
+ sections = []
100
+ for ai in a:
101
+ si = {}
102
+ si['text'] = t[ai['begin']: ai['begin'] + ai['length']]
103
+ si['label'] = ai['sectionLabel']
104
+ sections.append(si)
105
+ labels.add(ai['sectionLabel'])
106
+ sents = []
107
+ current_section = ''
108
+ for si in sections:
109
+ st = sent_tokenize(si['text'])
110
+ for sti in st:
111
+ if len(sents) >= max_sent:
112
+ # truncate
113
+ break
114
+ begin = 0 if (current_section == si['label']) else 1
115
+ current_section = si['label']
116
+ if is_tf:
117
+ if model == 'XLNET':
118
+ inputs = tokenizer(sti, return_tensors="tf",padding=True,truncation=True,max_length=512)
119
+ outputs = Model(inputs)
120
+ # each sentence dictionary has 'cls', 'label' and 'begin' keys, the values are the sentence embedding, the topic label and begin sentence indicator
121
+ sents.append({'cls': str(outputs[0][:,-1,:].numpy().tolist()[0]), 'label': si['label'], 'begin': begin})
122
+ else:
123
+ inputs = tokenizer(sti, return_tensors="tf",padding=True,truncation=True,max_length=512)
124
+ outputs = Model(inputs)
125
+ sents.append({'cls': str(outputs[0][:,0,:].numpy().tolist()[0]), 'label': si['label'], 'begin': begin})
126
+ else:
127
+ if torch.cuda.is_available():
128
+ dev = "cuda:0"
129
+ inputs = assign_GPU_pt(tokenizer(sti, return_tensors="pt",padding=True,truncation=True,max_length=512))
130
+ Model = Model.to(dev)
131
+ outputs = Model(**inputs)
132
+ sents.append({'cls': str(outputs.last_hidden_state[:,0,:].cpu().detach().numpy().tolist()[0]), 'label': si['label'], 'begin': begin})
133
+ else:
134
+ dev = "cpu"
135
+ inputs = tokenizer(sti, return_tensors="pt",padding=True,truncation=True,max_length=512)
136
+ outputs = Model(**inputs)
137
+ sents.append({'cls': str(outputs.last_hidden_state[:,0,:].detach().numpy().tolist()[0]), 'label': si['label'], 'begin': begin})
138
+ doc_len = len(sents)
139
+ #for i in range(0,(max_sent-len(sents))):
140
+ # padding
141
+ #sents.append({'cls': str(np.zeros(768).tolist()), 'label': '[pad]', 'begin': '[pad]'})
142
+ processed_data.append({'sent':sents, 'doc_len': doc_len})
143
+
144
+ with open(processed_path + fn, 'w+') as f:
145
+ json.dump(processed_data,f)
146
+
147
+ print('Done!')
148
+
149
+ if __name__ == "__main__":
150
+ parser = argparse.ArgumentParser()
151
+ parser.add_argument('--model', help="BERT/BERTL/BLUEBERT/BIOCLINICALBERT/DEBERT")
152
+ parser.add_argument('--corpus', help="en_city/en_disease/de_city/de_disease")
153
+ parser.add_argument('--max_sent', help="maximum number of sentence in a document", default='512')
154
+ parser.add_argument('--test_d_size', help="for testing only: number of data used create output", default='None')
155
+ parser.add_argument('--tf', help="Tensorflow if true; Pytorch if false (T/F)", default='T')
156
+ parser.add_argument('--train',default='T')
157
+ parser.add_argument('--validation',default='T')
158
+ parser.add_argument('--test',default='T')
159
+
160
+ args = parser.parse_args()
161
+
162
+
163
+ assert args.max_sent.isdigit(), 'max_sent should be an integer'
164
+ max_sent = int(args.max_sent)
165
+
166
+ assert args.test_d_size.isdigit() or args.test_d_size == 'None', 'test_d_size should be an integer or None'
167
+
168
+ if args.test_d_size.isdigit():
169
+ test_d_size = int(args.test_d_size)
170
+ else:
171
+ test_d_size = None
172
+
173
+
174
+ prep_data(args.model,args.corpus,max_sent,test_d_size,args.tf.upper() == 'T',args.train.upper()=='T',args.validation.upper()=='T',args.test.upper()=='T')
175
+
data/transformer_squared_pairwise_only.py ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import numpy as np
3
+ import tensorflow.keras.backend as K
4
+ from tensorflow.keras import activations
5
+ import random
6
+ from itertools import product, combinations
7
+ import math
8
+ import json
9
+ import argparse
10
+ from sklearn.utils import shuffle
11
+ import glob
12
+ import os
13
+ import segeval
14
+ from tqdm import tqdm
15
+
16
+ TRAIN_DATA = []
17
+ DEV_DATA = []
18
+ TEST_DATA = []
19
+ PAD = '[pad]'
20
+
21
+ #------------------------model-------------------
22
+ def get_angles(pos, i, d_model):
23
+ angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
24
+ return pos * angle_rates
25
+
26
+ def positional_encoding(position, d_model):
27
+ angle_rads = get_angles(np.arange(position)[:, np.newaxis],
28
+ np.arange(d_model)[np.newaxis, :],
29
+ d_model)
30
+ # apply sin to even indices in the array; 2i
31
+ angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
32
+
33
+ # apply cos to odd indices in the array; 2i+1
34
+ angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
35
+
36
+ pos_encoding = angle_rads[np.newaxis, ...]
37
+
38
+ return tf.cast(pos_encoding, dtype=tf.float32)
39
+
40
+ def scaled_dot_product_attention(q, k, v, mask):
41
+ """Calculate the attention weights.
42
+ q, k, v must have matching leading dimensions.
43
+ k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
44
+ The mask has different shapes depending on its type(padding or look ahead)
45
+ but it must be broadcastable for addition.
46
+
47
+ Args:
48
+ q: query shape == (..., seq_len_q, depth)
49
+ k: key shape == (..., seq_len_k, depth)
50
+ v: value shape == (..., seq_len_v, depth_v)
51
+ mask: Float tensor with shape broadcastable
52
+ to (..., seq_len_q, seq_len_k). Defaults to None.
53
+
54
+ Returns:
55
+ output, attention_weights
56
+ """
57
+
58
+ matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
59
+
60
+ # scale matmul_qk
61
+ dk = tf.cast(tf.shape(k)[-1], tf.float32)
62
+ scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
63
+
64
+ # add the mask to the scaled tensor.
65
+ if mask is not None:
66
+ scaled_attention_logits += (mask * -1e9)
67
+
68
+ # softmax is normalized on the last axis (seq_len_k) so that the scores
69
+ # add up to 1.
70
+ attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
71
+
72
+ output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
73
+
74
+ return output, attention_weights
75
+
76
+ class MultiHeadAttention(tf.keras.layers.Layer):
77
+ def __init__(self, d_model, num_heads):
78
+ super(MultiHeadAttention, self).__init__()
79
+ self.num_heads = num_heads
80
+ self.d_model = d_model
81
+
82
+ assert d_model % self.num_heads == 0
83
+
84
+ self.depth = d_model // self.num_heads
85
+
86
+ self.wq = tf.keras.layers.Dense(d_model)
87
+ self.wk = tf.keras.layers.Dense(d_model)
88
+ self.wv = tf.keras.layers.Dense(d_model)
89
+
90
+ self.dense = tf.keras.layers.Dense(d_model)
91
+
92
+ def split_heads(self, x, batch_size):
93
+ """Split the last dimension into (num_heads, depth).
94
+ Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
95
+ """
96
+ x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
97
+ return tf.transpose(x, perm=[0, 2, 1, 3])
98
+
99
+ def call(self, v, k, q, mask):
100
+ batch_size = tf.shape(q)[0]
101
+
102
+ q = self.wq(q) # (batch_size, seq_len, d_model)
103
+ k = self.wk(k) # (batch_size, seq_len, d_model)
104
+ v = self.wv(v) # (batch_size, seq_len, d_model)
105
+
106
+ q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
107
+ k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
108
+ v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
109
+
110
+ # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
111
+ # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
112
+ scaled_attention, attention_weights = scaled_dot_product_attention(
113
+ q, k, v, mask)
114
+
115
+ scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
116
+
117
+ concat_attention = tf.reshape(scaled_attention,
118
+ (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
119
+
120
+ output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
121
+
122
+ return output, attention_weights
123
+
124
+ def point_wise_feed_forward_network(d_model, dff):
125
+ return tf.keras.Sequential([
126
+ tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
127
+ tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
128
+ ])
129
+
130
+ class EncoderLayer(tf.keras.layers.Layer):
131
+ def __init__(self, d_model, num_heads, dff, rate=0.1):
132
+ super(EncoderLayer, self).__init__()
133
+
134
+ self.mha = MultiHeadAttention(d_model, num_heads)
135
+ self.ffn = point_wise_feed_forward_network(d_model, dff)
136
+
137
+ self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
138
+ self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
139
+
140
+ self.dropout1 = tf.keras.layers.Dropout(rate)
141
+ self.dropout2 = tf.keras.layers.Dropout(rate)
142
+
143
+ def call(self, x, training, mask):
144
+
145
+ attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
146
+ attn_output = self.dropout1(attn_output, training=training)
147
+ out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
148
+
149
+ ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
150
+ ffn_output = self.dropout2(ffn_output, training=training)
151
+ out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
152
+
153
+ return out2
154
+
155
+ class Encoder(tf.keras.layers.Layer):
156
+ def __init__(self, num_layers, d_model, num_heads, dff,
157
+ maximum_position_encoding, rate=0.1):
158
+ super(Encoder, self).__init__()
159
+
160
+ self.d_model = d_model
161
+ self.num_layers = num_layers
162
+
163
+ self.embedding = tf.keras.layers.InputLayer(input_shape=(maximum_position_encoding,self.d_model))
164
+ self.pos_encoding = positional_encoding(maximum_position_encoding,
165
+ self.d_model)
166
+
167
+
168
+ self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate)
169
+ for _ in range(num_layers)]
170
+
171
+ self.dropout = tf.keras.layers.Dropout(rate)
172
+
173
+ def call(self, x, training, mask):
174
+
175
+ seq_len = tf.shape(x)[1]
176
+
177
+ # adding embedding and position encoding.
178
+ x = self.embedding(x) # (batch_size, input_seq_len, d_model)
179
+ x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
180
+ x += self.pos_encoding[:, :seq_len, :]
181
+
182
+ x = self.dropout(x, training=training)
183
+
184
+ for i in range(self.num_layers):
185
+ x = self.enc_layers[i](x, training, mask)
186
+
187
+ return x # (batch_size, input_seq_len, d_model)
188
+
189
+
190
+ class Joint_Tobert_TextSeg(tf.keras.Model):
191
+ def __init__(self, seq_len=150, embed_dim=768, begin_output_dim=4, label_output_dim=4, drop_rate=0.1, attn_head=8, encoder_layers=2, encoder_dff=30):
192
+ super(Joint_Tobert_TextSeg, self).__init__()
193
+ self.encoder_block = Encoder(num_layers=encoder_layers, d_model=embed_dim, num_heads=attn_head,
194
+ dff=encoder_dff,
195
+ maximum_position_encoding=seq_len)
196
+ self.masking1 = tf.keras.layers.Masking()
197
+ self.masking2 = tf.keras.layers.Masking()
198
+ self.dropout1 = tf.keras.layers.Dropout(drop_rate)
199
+ self.dense1 = tf.keras.layers.Dense(begin_output_dim, activation="softmax", name="begin_output")
200
+ self.dense2 = tf.keras.layers.Dense(label_output_dim, activation="softmax", name="label_output")
201
+
202
+ def call(self, inputs, training=False):
203
+ x = self.encoder_block(inputs[0], training=training, mask=inputs[1])
204
+ x1 = self.masking1(tf.math.multiply(x, inputs[2]))
205
+ x1 = self.dropout1(x1,training=training)
206
+ x2 = self.masking2(tf.math.multiply(x, inputs[3]))
207
+ x2 = self.dropout1(x2,training=training)
208
+ return [self.dense1(x1),self.dense2(x2)]
209
+
210
+
211
+ #--------------------data--------------------------
212
+
213
+ def get_begin_dict():
214
+ begin_dict = {
215
+ PAD: 0,
216
+ 0: 1,
217
+ 1: 2
218
+ }
219
+ return begin_dict
220
+
221
+ def get_label_dict(model = 'BERT', corpus='en_city'):
222
+ label_dict = {}
223
+ with open(os.path.join('processed_data', 'wikisection_' + corpus + '_label_dict'), 'r+') as f:
224
+ for l in f.readlines():
225
+ key, value = l.strip().split()
226
+ label_dict[key] = int(value)
227
+ return label_dict
228
+
229
+ def load_train_data(model = 'BERT', corpus='en_city'):
230
+ print('loading train data',end='...')
231
+ with open(os.path.join('processed_data', model, 'wikisection_' + corpus + '_train.json'), 'r+') as f:
232
+ original = json.load(f)
233
+ with open(os.path.join('processed_data', model, 'wikisection_' + corpus + '_pairwise_train.json'), 'r+') as f:
234
+ pairwise = json.load(f)
235
+
236
+ assert len(original) == len(pairwise), 'data size not match'
237
+ train_data = []
238
+ for idx in tqdm(range(len(original))):
239
+ data = original[idx]
240
+ data['pairwise'] = pairwise[idx]
241
+ train_data.append(data)
242
+ TRAIN_DATA.extend(train_data)
243
+
244
+ print('Done!')
245
+
246
+
247
+ def load_dev_data(model = 'BERT', corpus='en_city'):
248
+ print('loading dev data',end='...')
249
+ with open(os.path.join('processed_data', model, 'wikisection_' + corpus + '_validation.json'), 'r+') as f:
250
+ original = json.load(f)
251
+ with open(os.path.join('processed_data', model, 'wikisection_' + corpus + '_pairwise_validation.json'), 'r+') as f:
252
+ pairwise = json.load(f)
253
+
254
+ assert len(original) == len(pairwise), 'data size not match'
255
+ dev_data = []
256
+ for idx in tqdm(range(len(original))):
257
+ data = original[idx]
258
+ data['pairwise'] = pairwise[idx]
259
+ dev_data.append(data)
260
+
261
+ DEV_DATA.extend(dev_data)
262
+ print('Done!')
263
+
264
+ def load_test_data(model = 'BERT', corpus='en_city'):
265
+ print('loading test data',end='...')
266
+ with open(os.path.join('processed_data', model, 'wikisection_' + corpus + '_test.json'), 'r+') as f:
267
+ original = json.load(f)
268
+ with open(os.path.join('processed_data', model, 'wikisection_' + corpus + '_pairwise_test.json'), 'r+') as f:
269
+ pairwise = json.load(f)
270
+
271
+ assert len(original) == len(pairwise), 'data size not match'
272
+ test_data = []
273
+ for idx in tqdm(range(len(original))):
274
+ data = original[idx]
275
+ data['pairwise'] = pairwise[idx]
276
+ test_data.append(data)
277
+
278
+ TEST_DATA.extend(test_data)
279
+ print('Done!')
280
+
281
+ def get_test_true_begin(seq_len = 150, flattened = True):
282
+ batch_output = []
283
+ begin_dict = get_begin_dict()
284
+ if flattened:
285
+ for doc in TEST_DATA:
286
+ for sent in doc['sent'][:seq_len]:
287
+ batch_output.append(begin_dict[sent['begin']])
288
+ for i in range(max(seq_len-len(doc['sent']), 0)):
289
+ batch_output.append(begin_dict[PAD])
290
+ else:
291
+ for doc in TEST_DATA:
292
+ doc_output = []
293
+ for sent in doc['sent'][:seq_len]:
294
+ doc_output.append(begin_dict[sent['begin']])
295
+ for i in range(max(seq_len-len(doc['sent']), 0)):
296
+ doc_output.append(begin_dict[PAD])
297
+ batch_output.append(doc_output)
298
+ return batch_output
299
+
300
+ def get_test_true_label(model='BERT', seq_len = 150, flattened = True, corpus = 'en_city'):
301
+ batch_output = []
302
+ label_dict = get_label_dict(model, corpus)
303
+ if flattened:
304
+ for doc in TEST_DATA:
305
+ for sent in doc['sent'][:seq_len]:
306
+ batch_output.append(label_dict[sent['label']])
307
+ for i in range(max(seq_len-len(doc['sent']), 0)):
308
+ batch_output.append(label_dict[PAD])
309
+ else:
310
+ for doc in TEST_DATA:
311
+ doc_output = []
312
+ for sent in doc['sent'][:seq_len]:
313
+ doc_output.append(label_dict[sent['label']])
314
+ for i in range(max(seq_len-len(doc['sent']), 0)):
315
+ doc_output.append(label_dict[PAD])
316
+ batch_output.append(doc_output)
317
+ return batch_output
318
+
319
+ def gen_batch_input(data, model = 'BERT', seq_len=72, embed_dim=768, mask_b_rate = 1, mask_i_rate = 0.5, corpus = 'en_city'):
320
+ """
321
+ :param mask_b_rate: probability of begin segment remains
322
+ :param mask_i_rate: probability of inner segment remains
323
+ """
324
+ begin_dict = get_begin_dict()
325
+ label_dict = get_label_dict(model, corpus)
326
+ batch_input_cls = []
327
+ batch_input_eval_mask = []
328
+ batch_input_transformer_mask = []
329
+ batch_segment_output = []
330
+ batch_label_output = []
331
+ for doc in data:
332
+ doc_chunk_len = len(doc['sent'])
333
+ input_cls = [[0.0001] * embed_dim] + [eval(sent['cls']) for sent in doc['pairwise']] + [[0.] * embed_dim] * max(seq_len - doc_chunk_len, 0)
334
+ input_cls = input_cls[:seq_len]
335
+ input_eval_mask = []
336
+ input_transformer_mask = []
337
+ for sent in doc['sent']:
338
+ r = np.random.random()
339
+ if (sent['begin'] == 0 and r > mask_i_rate) or (sent['begin'] == 1 and r > mask_b_rate):
340
+ # replace output to 0
341
+ input_eval_mask.append(1.)
342
+ else:
343
+ input_eval_mask.append(0.)
344
+ input_transformer_mask.append(0.)
345
+ input_eval_mask = input_eval_mask + [1.] * max(seq_len - doc_chunk_len, 0)
346
+ input_eval_mask = input_eval_mask[:seq_len]
347
+ input_transformer_mask = input_transformer_mask + [1.] * max(seq_len - doc_chunk_len, 0)
348
+ input_transformer_mask = input_transformer_mask[:seq_len]
349
+ segment_output = [ [begin_dict[sent['begin']]] for sent in doc['sent'] ]
350
+ segment_output = segment_output + [[begin_dict[PAD]]] * max(seq_len - doc_chunk_len, 0)
351
+ segment_output = segment_output[:seq_len]
352
+ label_output = [ [label_dict[sent['label']]] for sent in doc['sent'] ]
353
+ label_output = label_output + [[label_dict[PAD]]] * max(seq_len - doc_chunk_len, 0)
354
+ label_output = label_output[:seq_len]
355
+ batch_input_cls.append(input_cls)
356
+ batch_input_eval_mask.append(input_eval_mask)
357
+ batch_input_transformer_mask.append(input_transformer_mask)
358
+ batch_segment_output.append(segment_output)
359
+ batch_label_output.append(label_output)
360
+
361
+ return [np.asarray(batch_input_cls),np.asarray(batch_input_transformer_mask)[:,np.newaxis,np.newaxis,:],np.not_equal(np.expand_dims(batch_input_eval_mask,axis=-1),1).astype(float),np.not_equal(np.expand_dims(batch_input_transformer_mask,axis=-1),1).astype(float)], [np.asarray(batch_segment_output),np.asarray(batch_label_output)]
362
+
363
+ def train_generator(batch_size, model = 'BERT', seq_len=72, embed_dim=768, mask_b_rate = 1, mask_i_rate = 0.5, corpus = 'en_city'):
364
+ _train_data = TRAIN_DATA
365
+ while True:
366
+ _train_data = shuffle(_train_data)
367
+ for i in range(0,len(_train_data),batch_size):
368
+ yield gen_batch_input(_train_data[i:i+batch_size], model=model, seq_len=seq_len,embed_dim=embed_dim, mask_b_rate = mask_b_rate, mask_i_rate = mask_i_rate, corpus = corpus)
369
+
370
+ def dev_generator(batch_size, model = 'BERT', seq_len=72, embed_dim=768, mask_b_rate = 1, mask_i_rate = 0.5, corpus = 'en_city'):
371
+ while True:
372
+ for i in range(0,len(DEV_DATA),batch_size):
373
+ yield gen_batch_input(DEV_DATA[i:i+batch_size], model=model,seq_len=seq_len,embed_dim=embed_dim, mask_b_rate = mask_b_rate, mask_i_rate = mask_i_rate, corpus = corpus)
374
+
375
+ def test_generator(batch_size, model = 'BERT', seq_len=72, embed_dim=768, corpus = 'en_city'):
376
+ while True:
377
+ for i in range(0,len(TEST_DATA),batch_size):
378
+ #taking every sentence into account
379
+ yield gen_batch_input(TEST_DATA[i:i+batch_size], model=model,seq_len=seq_len,embed_dim=embed_dim, mask_b_rate = 1, mask_i_rate = 1, corpus = corpus)
380
+
381
+
382
+
383
+
384
+ if __name__ == "__main__":
385
+
386
+ parser = argparse.ArgumentParser()
387
+ parser.add_argument('--model', help="BERT/BLUEBERT", default='en_city')
388
+ parser.add_argument('--corpus', help="corpus:en_city/en_disease/de_city/de_disease", default='en_city')
389
+ parser.add_argument('--seq_len', help="maximum number of document chunks", default='150')
390
+ parser.add_argument('--embed_dim', help="input chunk cls dimension", default='768')
391
+ parser.add_argument('--attn_head', help="number of attention heads", default='12')
392
+ parser.add_argument('--encoder_layers', help="number of encoder layers", default='10')
393
+ parser.add_argument('--encoder_dff', help="dimension of feed forward pointwise network", default='768')
394
+ parser.add_argument('--epoch', help="number of epochs in training", default='100')
395
+ parser.add_argument('--batch_size', help="batch size", default='32')
396
+ parser.add_argument('--mask_b_rate', help="probability of begin segment remains", default='1')
397
+ parser.add_argument('--mask_i_rate', help="probability of inner segment remains", default='0.5')
398
+ parser.add_argument('--train_step', help="number of training steps for each epoch", default='100')
399
+ parser.add_argument('--val_step', help="number of validation steps for each epoch", default='100')
400
+ parser.add_argument('--lr', help="learning rate", default='0.0001')
401
+ parser.add_argument('--patience', help="learning rate", default='10')
402
+ parser.add_argument('--output_model', help="output model name", default='model_test')
403
+ parser.add_argument('--output_result', help="output model name prefix", default='result_test')
404
+
405
+ args = parser.parse_args()
406
+
407
+ assert args.corpus in ['en_city','en_disease','de_city','de_disease'], "Invalid corpus"
408
+ assert args.seq_len.isdigit(),'seq_len must be integer'
409
+ assert args.embed_dim.isdigit(),'embed_dim must be integer'
410
+ assert args.attn_head.isdigit(),'attn_head must be integer'
411
+ assert args.epoch.isdigit(),'epoch must be integer'
412
+ assert args.batch_size.isdigit(),'batch_size must be integer'
413
+ assert args.train_step.isdigit(),'train_step must be integer'
414
+ assert args.val_step.isdigit(),'val_step must be integer'
415
+ assert args.encoder_layers.isdigit(),'encoder_layers must be integer'
416
+ assert args.encoder_dff.isdigit(),'encoder_dff must be integer'
417
+ assert args.patience.isdigit(),'patience must be integer'
418
+ try:
419
+ float(args.lr)
420
+ float(args.mask_b_rate)
421
+ float(args.mask_i_rate)
422
+ except ValueError:
423
+ raise "lr/mask_b_rate/mask_i_rate must be float"
424
+
425
+ print(args)
426
+
427
+ load_train_data(model = args.model, corpus=args.corpus)
428
+ load_dev_data(model = args.model, corpus=args.corpus)
429
+ optimizer = tf.keras.optimizers.Adam(learning_rate=float(args.lr))
430
+
431
+ model = Joint_Tobert_TextSeg(seq_len=int(args.seq_len), embed_dim=int(args.embed_dim), begin_output_dim=len(get_begin_dict()), label_output_dim=len(get_label_dict(model = args.model, corpus=args.corpus)), attn_head=int(args.attn_head), encoder_layers = int(args.encoder_layers),encoder_dff = int(args.encoder_dff))
432
+
433
+ loss = tf.keras.losses.SparseCategoricalCrossentropy()
434
+ print('Compiling model')
435
+ model.compile(loss=loss, optimizer=optimizer,metrics = tf.keras.metrics.SparseCategoricalAccuracy())
436
+ print('Training model')
437
+ model.fit(train_generator(batch_size = int(args.batch_size), model=args.model, seq_len=int(args.seq_len), embed_dim=int(args.embed_dim), mask_b_rate = float(args.mask_b_rate), mask_i_rate = float(args.mask_i_rate), corpus = args.corpus),
438
+ steps_per_epoch=int(args.train_step),
439
+ epochs=int(args.epoch),
440
+ validation_data=dev_generator(batch_size = int(args.batch_size), model=args.model, seq_len=int(args.seq_len), embed_dim=int(args.embed_dim), mask_b_rate = float(args.mask_b_rate), mask_i_rate = float(args.mask_i_rate), corpus = args.corpus),
441
+ validation_steps=int(args.val_step),
442
+ callbacks=[
443
+ tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=int(args.patience))
444
+ ],)
445
+ # print('Saving model')
446
+ # model.save(args.output_model)
447
+
448
+ print('Evaluating model')
449
+
450
+ del train_generator
451
+ del dev_generator
452
+ del TRAIN_DATA
453
+ del DEV_DATA
454
+ load_test_data(model = args.model, corpus=args.corpus)
455
+ true_begin = get_test_true_begin(seq_len=int(args.seq_len))
456
+ true_label = get_test_true_label(model=args.model, seq_len=int(args.seq_len), corpus = args.corpus)
457
+ predictions = model.predict(test_generator(batch_size = int(args.batch_size), model=args.model, seq_len=int(args.seq_len), embed_dim=int(args.embed_dim), corpus = args.corpus),steps=math.ceil(len(TEST_DATA)/int(args.batch_size)))
458
+ begin_predictions_prob = predictions[0]
459
+ begin_predictions = np.argmax(begin_predictions_prob,axis=-1)
460
+ label_predictions = predictions[1]
461
+ #print(begin_predictions.shape)
462
+ label_predictions
463
+ #pred_prob_flattened = []
464
+
465
+ # for j in predictions.tolist():
466
+ # for i in j:
467
+ # pred_prob_flattened.append(i)
468
+ begin_pred_flattened = []
469
+ for i in begin_predictions:
470
+ begin_pred_flattened.extend(i)
471
+
472
+ label_pred_flattened = []
473
+ for i in label_predictions:
474
+ label_pred_flattened.extend(np.argmax(i,axis=-1))
475
+ # pred = np.argmax(np.asarray(predictions),axis= -1)
476
+
477
+ true_pred_begin_pairs = list(zip(get_test_true_begin(seq_len=int(args.seq_len),flattened=False),begin_predictions))
478
+
479
+ pk_scores = []
480
+ for pair in true_pred_begin_pairs:
481
+ # document
482
+ true_seg = []
483
+ pred_seg = []
484
+ true_count = 0
485
+ pred_count = 0
486
+ for j in range(len(pair[0])):
487
+ if pair[0][j] == 0:
488
+ break
489
+ if pair[0][j] == 2:
490
+ if true_count != 0:
491
+ true_seg.append(true_count)
492
+ true_count = 1
493
+ else:
494
+ true_count += 1
495
+ if pair[1][j] == 2:
496
+ if pred_count != 0:
497
+ pred_seg.append(pred_count)
498
+ pred_count = 1
499
+ else:
500
+ pred_count += 1
501
+ true_seg.append(true_count)
502
+ pred_seg.append(pred_count)
503
+ pk_scores.append(segeval.pk(pred_seg,true_seg,window_size=10))
504
+
505
+ # print('Outputting result')
506
+ # with open('results/' + 'tobert_masked_crf_' + args.output_result + '_prob', 'w+') as f:
507
+ # for item in list(zip(true_targets,pred_prob_flattened)):
508
+ # f.write(str(item[0]) + ',' + str(item[1]) + '\n')
509
+ # with open('results/'+ 'tobert_masked_joint_' + args.output_result + '_begin_record', 'w+') as f:
510
+ # for item in list(zip(true_begin,begin_pred_flattened)):
511
+ # f.write(str(item[0]) + ',' + str(item[1]) + '\n')
512
+
513
+ # with open('results/'+ 'tobert_masked_joint_' + args.output_result + '_label_record', 'w+') as f:
514
+ # for item in list(zip(true_label,label_pred_flattened)):
515
+ # f.write(str(item[0]) + ',' + str(item[1]) + '\n')
516
+
517
+ print('outputing metrics')
518
+ print('pk: ' + str(sum(pk_scores)/len(pk_scores)))
519
+ with open('results/' + 'tobert_masked_joint_' + args.output_result + '_metrics.txt', 'w+') as f:
520
+ # eval_metrics = model.evaluate(test_generator(batch_size = int(args.batch_size), seq_len=int(args.seq_len), embed_dim=int(args.embed_dim)),steps=math.ceil(len(TEST_DATA)/int(args.batch_size)))
521
+ # eval_metrics = list(zip(model.metrics_names,eval_metrics))
522
+ f.write(str(args) + '\n')
523
+ f.write('pk: ' + str(sum(pk_scores)/len(pk_scores)) + '\n')
524
+ # for i in eval_metrics:
525
+ # f.write(str(i[0]) + ': ' + str(i[1]) + '\n')
526
+
data/transformer_squared_single_pairwise.py ADDED
@@ -0,0 +1,556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import numpy as np
3
+ import tensorflow.keras.backend as K
4
+ from tensorflow.keras import activations
5
+ import random
6
+ from itertools import product, combinations
7
+ import math
8
+ import json
9
+ import argparse
10
+ from sklearn.metrics import f1_score
11
+ from sklearn.utils import shuffle
12
+ import glob
13
+ import os
14
+ import segeval
15
+ from tqdm import tqdm
16
+
17
+ TRAIN_DATA = []
18
+ DEV_DATA = []
19
+ TEST_DATA = []
20
+ PAD = '[pad]'
21
+
22
+ #------------------------model-------------------
23
+ def get_angles(pos, i, d_model):
24
+ angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
25
+ return pos * angle_rates
26
+
27
+ def positional_encoding(position, d_model):
28
+ angle_rads = get_angles(np.arange(position)[:, np.newaxis],
29
+ np.arange(d_model)[np.newaxis, :],
30
+ d_model)
31
+ # apply sin to even indices in the array; 2i
32
+ angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
33
+
34
+ # apply cos to odd indices in the array; 2i+1
35
+ angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
36
+
37
+ pos_encoding = angle_rads[np.newaxis, ...]
38
+
39
+ return tf.cast(pos_encoding, dtype=tf.float32)
40
+
41
+ def scaled_dot_product_attention(q, k, v, mask):
42
+ """Calculate the attention weights.
43
+ q, k, v must have matching leading dimensions.
44
+ k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
45
+ The mask has different shapes depending on its type(padding or look ahead)
46
+ but it must be broadcastable for addition.
47
+
48
+ Args:
49
+ q: query shape == (..., seq_len_q, depth)
50
+ k: key shape == (..., seq_len_k, depth)
51
+ v: value shape == (..., seq_len_v, depth_v)
52
+ mask: Float tensor with shape broadcastable
53
+ to (..., seq_len_q, seq_len_k). Defaults to None.
54
+
55
+ Returns:
56
+ output, attention_weights
57
+ """
58
+
59
+ matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
60
+
61
+ # scale matmul_qk
62
+ dk = tf.cast(tf.shape(k)[-1], tf.float32)
63
+ scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
64
+
65
+ # add the mask to the scaled tensor.
66
+ if mask is not None:
67
+ scaled_attention_logits += (mask * -1e9)
68
+
69
+ # softmax is normalized on the last axis (seq_len_k) so that the scores
70
+ # add up to 1.
71
+ attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
72
+
73
+ output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
74
+
75
+ return output, attention_weights
76
+
77
+ class MultiHeadAttention(tf.keras.layers.Layer):
78
+ def __init__(self, d_model, num_heads):
79
+ super(MultiHeadAttention, self).__init__()
80
+ self.num_heads = num_heads
81
+ self.d_model = d_model
82
+
83
+ assert d_model % self.num_heads == 0
84
+
85
+ self.depth = d_model // self.num_heads
86
+
87
+ self.wq = tf.keras.layers.Dense(d_model)
88
+ self.wk = tf.keras.layers.Dense(d_model)
89
+ self.wv = tf.keras.layers.Dense(d_model)
90
+
91
+ self.dense = tf.keras.layers.Dense(d_model)
92
+
93
+ def split_heads(self, x, batch_size):
94
+ """Split the last dimension into (num_heads, depth).
95
+ Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
96
+ """
97
+ x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
98
+ return tf.transpose(x, perm=[0, 2, 1, 3])
99
+
100
+ def call(self, v, k, q, mask):
101
+ batch_size = tf.shape(q)[0]
102
+
103
+ q = self.wq(q) # (batch_size, seq_len, d_model)
104
+ k = self.wk(k) # (batch_size, seq_len, d_model)
105
+ v = self.wv(v) # (batch_size, seq_len, d_model)
106
+
107
+ q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
108
+ k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
109
+ v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
110
+
111
+ # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
112
+ # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
113
+ scaled_attention, attention_weights = scaled_dot_product_attention(
114
+ q, k, v, mask)
115
+
116
+ scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
117
+
118
+ concat_attention = tf.reshape(scaled_attention,
119
+ (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
120
+
121
+ output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
122
+
123
+ return output, attention_weights
124
+
125
+ def point_wise_feed_forward_network(d_model, dff):
126
+ return tf.keras.Sequential([
127
+ tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
128
+ tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
129
+ ])
130
+
131
+ class EncoderLayer(tf.keras.layers.Layer):
132
+ def __init__(self, d_model, num_heads, dff, rate=0.1):
133
+ super(EncoderLayer, self).__init__()
134
+
135
+ self.mha = MultiHeadAttention(d_model, num_heads)
136
+ self.ffn = point_wise_feed_forward_network(d_model, dff)
137
+
138
+ self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
139
+ self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
140
+
141
+ self.dropout1 = tf.keras.layers.Dropout(rate)
142
+ self.dropout2 = tf.keras.layers.Dropout(rate)
143
+
144
+ def call(self, x, training, mask):
145
+
146
+ attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
147
+ attn_output = self.dropout1(attn_output, training=training)
148
+ out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
149
+
150
+ ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
151
+ ffn_output = self.dropout2(ffn_output, training=training)
152
+ out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
153
+
154
+ return out2
155
+
156
+ class Encoder(tf.keras.layers.Layer):
157
+ def __init__(self, num_layers, d_model, num_heads, dff,
158
+ maximum_position_encoding, rate=0.1):
159
+ super(Encoder, self).__init__()
160
+
161
+ self.d_model = d_model
162
+ self.num_layers = num_layers
163
+
164
+ self.embedding = tf.keras.layers.InputLayer(input_shape=(maximum_position_encoding,self.d_model))
165
+ self.pos_encoding = positional_encoding(maximum_position_encoding,
166
+ self.d_model)
167
+
168
+
169
+ self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate)
170
+ for _ in range(num_layers)]
171
+
172
+ self.dropout = tf.keras.layers.Dropout(rate)
173
+
174
+ def call(self, x, training, mask):
175
+
176
+ seq_len = tf.shape(x)[1]
177
+
178
+ # adding embedding and position encoding.
179
+ x = self.embedding(x) # (batch_size, input_seq_len, d_model)
180
+ x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
181
+ x += self.pos_encoding[:, :seq_len, :]
182
+
183
+ x = self.dropout(x, training=training)
184
+
185
+ for i in range(self.num_layers):
186
+ x = self.enc_layers[i](x, training, mask)
187
+
188
+ return x # (batch_size, input_seq_len, d_model)
189
+
190
+
191
+ class Joint_Tobert_TextSeg(tf.keras.Model):
192
+ def __init__(self, seq_len=150, embed_dim=768, begin_output_dim=4, label_output_dim=4, drop_rate=0.1, attn_head=8, encoder_layers=2, encoder_dff=30):
193
+ super(Joint_Tobert_TextSeg, self).__init__()
194
+ self.encoder_block = Encoder(num_layers=encoder_layers, d_model=embed_dim*2, num_heads=attn_head,
195
+ dff=encoder_dff,
196
+ maximum_position_encoding=seq_len)
197
+ self.masking1 = tf.keras.layers.Masking()
198
+ self.masking2 = tf.keras.layers.Masking()
199
+ self.masking3 = tf.keras.layers.Masking()
200
+ self.dropout1 = tf.keras.layers.Dropout(drop_rate)
201
+ self.dense1 = tf.keras.layers.Dense(begin_output_dim, activation="softmax", name="begin_output")
202
+ self.dense2 = tf.keras.layers.Dense(label_output_dim, activation="softmax", name="label_output")
203
+ self.dense3 = tf.keras.layers.Dense(embed_dim, activation="tanh")
204
+
205
+ def call(self, inputs, training=False):
206
+ x = self.encoder_block(inputs[0], training=training, mask=inputs[1])
207
+ x1 = self.masking1(tf.math.multiply(x, inputs[2]))
208
+ x1 = self.dropout1(x1,training=training)
209
+ x2 = self.masking2(tf.math.multiply(x, inputs[3]))
210
+ x2 = self.dropout1(x2,training=training)
211
+ # x3 = self.dense3(inputs[0])
212
+ # x3_forward = x1[:,:-1,:]
213
+ # x3_backward = x1[:,1:,:]
214
+ # sim_mat = tf.matmul(x3_forward, tf.transpose(x3_backward,perm=(0,2,1)))
215
+ # sent_sim = tf.linalg.diag_part(sim_mat)
216
+ # sent_sim_score = tf.keras.activations.sigmoid(sent_sim)
217
+ # one_vector = tf.ones([tf.keras.backend.shape(sent_sim_score)[0], 1], dtype=tf.dtypes.float32)
218
+ # sent_sim_score = tf.concat([one_vector,sent_sim_score],-1)
219
+ # sent_sim_score = tf.expand_dims(sent_sim_score,-1)
220
+ # sent_sim_score = self.masking3(tf.math.multiply(sent_sim_score, inputs[3]))
221
+ return [self.dense1(x1),self.dense2(x2)]
222
+
223
+
224
+ #--------------------data--------------------------
225
+
226
+ def get_begin_dict():
227
+ begin_dict = {
228
+ PAD: 0,
229
+ 0: 1,
230
+ 1: 2
231
+ }
232
+ return begin_dict
233
+
234
+ def get_label_dict(model = 'BERT', corpus='en_city'):
235
+ label_dict = {}
236
+ with open(os.path.join('data', 'processed', model, 'wikisection_' + corpus + '_label_dict'), 'r+') as f:
237
+ for l in f.readlines():
238
+ key, value = l.strip().split()
239
+ label_dict[key] = int(value)
240
+ return label_dict
241
+
242
+ def load_train_data(model = 'BERT', corpus='en_city'):
243
+ print('loading train data',end='...')
244
+ with open(os.path.join('data', 'processed', model, 'wikisection_' + corpus + '_train.json'), 'r+') as f:
245
+ original = json.load(f)
246
+ with open(os.path.join('data', 'processed', model, 'wikisection_' + corpus + '_pairwise_train.json'), 'r+') as f:
247
+ pairwise = json.load(f)
248
+
249
+ assert len(original) == len(pairwise), 'data size not match'
250
+ train_data = []
251
+ for idx in tqdm(range(len(original))):
252
+ data = original[idx]
253
+ data['pairwise'] = pairwise[idx]
254
+ train_data.append(data)
255
+ TRAIN_DATA.extend(train_data)
256
+
257
+ print('Done!')
258
+
259
+
260
+ def load_dev_data(model = 'BERT', corpus='en_city'):
261
+ print('loading dev data',end='...')
262
+ with open(os.path.join('data', 'processed', model, 'wikisection_' + corpus + '_validation.json'), 'r+') as f:
263
+ original = json.load(f)
264
+ with open(os.path.join('data', 'processed', model, 'wikisection_' + corpus + '_pairwise_validation.json'), 'r+') as f:
265
+ pairwise = json.load(f)
266
+
267
+ assert len(original) == len(pairwise), 'data size not match'
268
+ dev_data = []
269
+ for idx in tqdm(range(len(original))):
270
+ data = original[idx]
271
+ data['pairwise'] = pairwise[idx]
272
+ dev_data.append(data)
273
+
274
+ DEV_DATA.extend(dev_data)
275
+ print('Done!')
276
+
277
+ def load_test_data(model = 'BERT', corpus='en_city'):
278
+ print('loading test data',end='...')
279
+ with open(os.path.join('data', 'processed', model, 'wikisection_' + corpus + '_test.json'), 'r+') as f:
280
+ original = json.load(f)
281
+ with open(os.path.join('data', 'processed', model, 'wikisection_' + corpus + '_pairwise_test.json'), 'r+') as f:
282
+ pairwise = json.load(f)
283
+
284
+ assert len(original) == len(pairwise), 'data size not match'
285
+ test_data = []
286
+ for idx in tqdm(range(len(original))):
287
+ data = original[idx]
288
+ data['pairwise'] = pairwise[idx]
289
+ test_data.append(data)
290
+
291
+ TEST_DATA.extend(test_data)
292
+ print('Done!')
293
+
294
+ def get_test_true_begin(seq_len = 150, flattened = True):
295
+ batch_output = []
296
+ begin_dict = get_begin_dict()
297
+ if flattened:
298
+ for doc in TEST_DATA:
299
+ for sent in doc['sent'][:seq_len]:
300
+ batch_output.append(begin_dict[sent['begin']])
301
+ for i in range(max(seq_len-len(doc['sent']), 0)):
302
+ batch_output.append(begin_dict[PAD])
303
+ else:
304
+ for doc in TEST_DATA:
305
+ doc_output = []
306
+ for sent in doc['sent'][:seq_len]:
307
+ doc_output.append(begin_dict[sent['begin']])
308
+ for i in range(max(seq_len-len(doc['sent']), 0)):
309
+ doc_output.append(begin_dict[PAD])
310
+ batch_output.append(doc_output)
311
+ return batch_output
312
+
313
+ def get_test_true_label(model='BERT', seq_len = 150, flattened = True, corpus = 'en_city'):
314
+ batch_output = []
315
+ label_dict = get_label_dict(model, corpus)
316
+ if flattened:
317
+ for doc in TEST_DATA:
318
+ for sent in doc['sent'][:seq_len]:
319
+ batch_output.append(label_dict[sent['label']])
320
+ for i in range(max(seq_len-len(doc['sent']), 0)):
321
+ batch_output.append(label_dict[PAD])
322
+ else:
323
+ for doc in TEST_DATA:
324
+ doc_output = []
325
+ for sent in doc['sent'][:seq_len]:
326
+ doc_output.append(label_dict[sent['label']])
327
+ for i in range(max(seq_len-len(doc['sent']), 0)):
328
+ doc_output.append(label_dict[PAD])
329
+ batch_output.append(doc_output)
330
+ return batch_output
331
+
332
+ def gen_batch_input(data, model = 'BERT', seq_len=72, embed_dim=768, mask_b_rate = 1, mask_i_rate = 0.5, corpus = 'en_city', pairwise_tok = 'cls'):
333
+ """
334
+ :param mask_b_rate: probability of begin segment remains
335
+ :param mask_i_rate: probability of inner segment remains
336
+ """
337
+ begin_dict = get_begin_dict()
338
+ label_dict = get_label_dict(model, corpus)
339
+ batch_input_combined = []
340
+ batch_input_eval_mask = []
341
+ batch_input_transformer_mask = []
342
+ batch_segment_output = []
343
+ batch_label_output = []
344
+ for doc in data:
345
+ doc_chunk_len = len(doc['sent'])
346
+ input_cls = [eval(sent['cls']) for sent in doc['sent']] + [[0.] * embed_dim] * max(seq_len - doc_chunk_len, 0)
347
+ input_cls = input_cls[:seq_len]
348
+ if pairwise_tok == 'cls':
349
+ input_pairwise_emb = [[0.0001] * embed_dim] + [eval(sent['cls']) for sent in doc['pairwise']] + [[0.] * embed_dim] * max(seq_len - doc_chunk_len, 0)
350
+ else:
351
+ input_pairwise_emb = [[0.0001] * embed_dim] + [eval(sent['sep']) for sent in doc['pairwise']] + [[0.] * embed_dim] * max(seq_len - doc_chunk_len, 0)
352
+ input_pairwise_emb = input_pairwise_emb[:seq_len]
353
+ input_combined = np.concatenate([np.asarray(input_cls),np.asarray(input_pairwise_emb)],axis = -1)
354
+ # print(input_combined.shape)
355
+ input_combined = input_combined.tolist()
356
+ input_eval_mask = []
357
+ input_transformer_mask = []
358
+ for sent in doc['sent']:
359
+ r = np.random.random()
360
+ if (sent['begin'] == 0 and r > mask_i_rate) or (sent['begin'] == 1 and r > mask_b_rate):
361
+ # replace output to 0
362
+ input_eval_mask.append(1.)
363
+ else:
364
+ input_eval_mask.append(0.)
365
+ input_transformer_mask.append(0.)
366
+ input_eval_mask = input_eval_mask + [1.] * max(seq_len - doc_chunk_len, 0)
367
+ input_eval_mask = input_eval_mask[:seq_len]
368
+ input_transformer_mask = input_transformer_mask + [1.] * max(seq_len - doc_chunk_len, 0)
369
+ input_transformer_mask = input_transformer_mask[:seq_len]
370
+ segment_output = [ [begin_dict[sent['begin']]] for sent in doc['sent'] ]
371
+ segment_output = segment_output + [[begin_dict[PAD]]] * max(seq_len - doc_chunk_len, 0)
372
+ segment_output = segment_output[:seq_len]
373
+ label_output = [ [label_dict[sent['label']]] for sent in doc['sent'] ]
374
+ label_output = label_output + [[label_dict[PAD]]] * max(seq_len - doc_chunk_len, 0)
375
+ label_output = label_output[:seq_len]
376
+ batch_input_combined.append(input_combined)
377
+ batch_input_eval_mask.append(input_eval_mask)
378
+ batch_input_transformer_mask.append(input_transformer_mask)
379
+ batch_segment_output.append(segment_output)
380
+ batch_label_output.append(label_output)
381
+
382
+ return [np.asarray(batch_input_combined),np.asarray(batch_input_transformer_mask)[:,np.newaxis,np.newaxis,:],np.not_equal(np.expand_dims(batch_input_eval_mask,axis=-1),1).astype(float),np.not_equal(np.expand_dims(batch_input_transformer_mask,axis=-1),1).astype(float)], [np.asarray(batch_segment_output),np.asarray(batch_label_output)]
383
+
384
+ def train_generator(batch_size, model = 'BERT', seq_len=72, embed_dim=768, mask_b_rate = 1, mask_i_rate = 0.5, corpus = 'en_city'):
385
+ _train_data = TRAIN_DATA
386
+ while True:
387
+ _train_data = shuffle(_train_data)
388
+ for i in range(0,len(_train_data),batch_size):
389
+ yield gen_batch_input(_train_data[i:i+batch_size], model=model, seq_len=seq_len,embed_dim=embed_dim, mask_b_rate = mask_b_rate, mask_i_rate = mask_i_rate, corpus = corpus)
390
+
391
+ def dev_generator(batch_size, model = 'BERT', seq_len=72, embed_dim=768, mask_b_rate = 1, mask_i_rate = 0.5, corpus = 'en_city'):
392
+ while True:
393
+ for i in range(0,len(DEV_DATA),batch_size):
394
+ yield gen_batch_input(DEV_DATA[i:i+batch_size], model=model,seq_len=seq_len,embed_dim=embed_dim, mask_b_rate = mask_b_rate, mask_i_rate = mask_i_rate, corpus = corpus)
395
+
396
+ def test_generator(batch_size, model = 'BERT', seq_len=72, embed_dim=768, corpus = 'en_city'):
397
+ while True:
398
+ for i in range(0,len(TEST_DATA),batch_size):
399
+ #taking every sentence into account
400
+ yield gen_batch_input(TEST_DATA[i:i+batch_size], model=model,seq_len=seq_len,embed_dim=embed_dim, mask_b_rate = 1, mask_i_rate = 1, corpus = corpus)
401
+
402
+
403
+
404
+
405
+ if __name__ == "__main__":
406
+
407
+ parser = argparse.ArgumentParser()
408
+ parser.add_argument('--model', help="BERT/BLUEBERT", default='BERT')
409
+ parser.add_argument('--corpus', help="corpus:en_city/en_disease/de_city/de_disease", default='en_city')
410
+ parser.add_argument('--seq_len', help="maximum number of document chunks", default='150')
411
+ parser.add_argument('--embed_dim', help="input chunk cls dimension", default='768')
412
+ parser.add_argument('--attn_head', help="number of attention heads", default='12')
413
+ parser.add_argument('--encoder_layers', help="number of encoder layers", default='10')
414
+ parser.add_argument('--encoder_dff', help="dimension of feed forward pointwise network", default='768')
415
+ parser.add_argument('--epoch', help="number of epochs in training", default='100')
416
+ parser.add_argument('--batch_size', help="batch size", default='32')
417
+ parser.add_argument('--mask_b_rate', help="probability of begin segment remains", default='1')
418
+ parser.add_argument('--mask_i_rate', help="probability of inner segment remains", default='0.5')
419
+ parser.add_argument('--train_step', help="number of training steps for each epoch", default='100')
420
+ parser.add_argument('--val_step', help="number of validation steps for each epoch", default='100')
421
+ parser.add_argument('--lr', help="learning rate", default='0.0001')
422
+ parser.add_argument('--patience', help="learning rate", default='10')
423
+ parser.add_argument('--output_model', help="output model name", default='model_test')
424
+ parser.add_argument('--output_result', help="output model name prefix", default='result_test')
425
+
426
+ args = parser.parse_args()
427
+
428
+ assert args.corpus in ['en_city','en_disease','de_city','de_disease'], "Invalid corpus"
429
+ assert args.seq_len.isdigit(),'seq_len must be integer'
430
+ assert args.embed_dim.isdigit(),'embed_dim must be integer'
431
+ assert args.attn_head.isdigit(),'attn_head must be integer'
432
+ assert args.epoch.isdigit(),'epoch must be integer'
433
+ assert args.batch_size.isdigit(),'batch_size must be integer'
434
+ assert args.train_step.isdigit(),'train_step must be integer'
435
+ assert args.val_step.isdigit(),'val_step must be integer'
436
+ assert args.encoder_layers.isdigit(),'encoder_layers must be integer'
437
+ assert args.encoder_dff.isdigit(),'encoder_dff must be integer'
438
+ assert args.patience.isdigit(),'patience must be integer'
439
+ try:
440
+ float(args.lr)
441
+ float(args.mask_b_rate)
442
+ float(args.mask_i_rate)
443
+ except ValueError:
444
+ raise "lr/mask_b_rate/mask_i_rate must be float"
445
+
446
+ print(args)
447
+
448
+ load_train_data(model = args.model, corpus=args.corpus)
449
+ load_dev_data(model = args.model, corpus=args.corpus)
450
+ optimizer = tf.keras.optimizers.Adam(learning_rate=float(args.lr))
451
+
452
+ model = Joint_Tobert_TextSeg(seq_len=int(args.seq_len), embed_dim=int(args.embed_dim), begin_output_dim=len(get_begin_dict()), label_output_dim=len(get_label_dict(model = args.model, corpus=args.corpus)), attn_head=int(args.attn_head), encoder_layers = int(args.encoder_layers),encoder_dff = int(args.encoder_dff))
453
+
454
+ loss = [tf.keras.losses.SparseCategoricalCrossentropy(),tf.keras.losses.SparseCategoricalCrossentropy()]
455
+ print('Compiling model')
456
+ model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=optimizer)
457
+
458
+ print('Training model')
459
+ model.fit(train_generator(batch_size = int(args.batch_size), model=args.model, seq_len=int(args.seq_len), embed_dim=int(args.embed_dim), mask_b_rate = float(args.mask_b_rate), mask_i_rate = float(args.mask_i_rate), corpus = args.corpus),
460
+ steps_per_epoch=int(args.train_step),
461
+ epochs=int(args.epoch),
462
+ validation_data=dev_generator(batch_size = int(args.batch_size), model=args.model, seq_len=int(args.seq_len), embed_dim=int(args.embed_dim), mask_b_rate = float(args.mask_b_rate), mask_i_rate = float(args.mask_i_rate), corpus = args.corpus),
463
+ validation_steps=int(args.val_step),
464
+ callbacks=[
465
+ tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=int(args.patience))
466
+ ],)
467
+ # print('Saving model')
468
+ # model.save(args.output_model)
469
+
470
+ print('Evaluating model')
471
+
472
+ del train_generator
473
+ del dev_generator
474
+ del TRAIN_DATA
475
+ del DEV_DATA
476
+ load_test_data(model = args.model, corpus=args.corpus)
477
+ true_begin = get_test_true_begin(seq_len=int(args.seq_len))
478
+ true_label = get_test_true_label(model=args.model, seq_len=int(args.seq_len), corpus = args.corpus)
479
+ predictions = model.predict(test_generator(batch_size = int(args.batch_size), model=args.model, seq_len=int(args.seq_len), embed_dim=int(args.embed_dim), corpus = args.corpus),steps=math.ceil(len(TEST_DATA)/int(args.batch_size)))
480
+ begin_predictions_prob = predictions[0]
481
+ begin_predictions = np.argmax(begin_predictions_prob,axis=-1)
482
+ label_predictions = predictions[1]
483
+ #print(begin_predictions.shape)
484
+ label_predictions
485
+ #pred_prob_flattened = []
486
+
487
+ # for j in predictions.tolist():
488
+ # for i in j:
489
+ # pred_prob_flattened.append(i)
490
+ begin_pred_flattened = []
491
+ for i in begin_predictions:
492
+ begin_pred_flattened.extend(i)
493
+
494
+ begin_pred_prob_flattened = []
495
+ for i in begin_predictions_prob:
496
+ begin_pred_prob_flattened.extend(i)
497
+
498
+ label_pred_flattened = []
499
+ for i in label_predictions:
500
+ label_pred_flattened.extend(np.argmax(i,axis=-1))
501
+ # pred = np.argmax(np.asarray(predictions),axis= -1)
502
+
503
+ true_pred_begin_pairs = list(zip(get_test_true_begin(seq_len=int(args.seq_len),flattened=False),begin_predictions))
504
+
505
+ pk_scores = []
506
+ for pair in true_pred_begin_pairs:
507
+ # document
508
+ true_seg = []
509
+ pred_seg = []
510
+ true_count = 0
511
+ pred_count = 0
512
+ for j in range(len(pair[0])):
513
+ if pair[0][j] == 0:
514
+ break
515
+ if pair[0][j] == 2:
516
+ if true_count != 0:
517
+ true_seg.append(true_count)
518
+ true_count = 1
519
+ else:
520
+ true_count += 1
521
+ if pair[1][j] == 2:
522
+ if pred_count != 0:
523
+ pred_seg.append(pred_count)
524
+ pred_count = 1
525
+ else:
526
+ pred_count += 1
527
+ true_seg.append(true_count)
528
+ pred_seg.append(pred_count)
529
+ pk_scores.append(segeval.pk(pred_seg,true_seg,window_size=10))
530
+
531
+ # print('Outputting result')
532
+ # with open('results/' + 'tobert_masked_crf_' + args.output_result + '_prob', 'w+') as f:
533
+ # for item in list(zip(true_targets,pred_prob_flattened)):
534
+ # f.write(str(item[0]) + ',' + str(item[1]) + '\n')
535
+ with open('results/'+ 'tobert_pairwise_' + args.output_result + '_begin_record', 'w+') as f:
536
+ for item in list(zip(true_begin,begin_pred_flattened)):
537
+ f.write(str(item[0]) + ',' + str(item[1]) + '\n')
538
+
539
+ with open('results/'+ 'tobert_pairwise_' + args.output_result + '_begin_prob', 'w+') as f:
540
+ for item in list(zip(true_begin,begin_pred_prob_flattened)):
541
+ f.write(str(item[0]) + ',' + str(item[1]) + '\n')
542
+
543
+ # with open('results/'+ 'tobert_masked_joint_' + args.output_result + '_label_record', 'w+') as f:
544
+ # for item in list(zip(true_label,label_pred_flattened)):
545
+ # f.write(str(item[0]) + ',' + str(item[1]) + '\n')
546
+
547
+ print('outputing metrics')
548
+ print('pk: ' + str(sum(pk_scores)/len(pk_scores)))
549
+ with open('results/' + 'tobert_masked_joint_' + args.output_result + '_metrics.txt', 'w+') as f:
550
+ # eval_metrics = model.evaluate(test_generator(batch_size = int(args.batch_size), seq_len=int(args.seq_len), embed_dim=int(args.embed_dim)),steps=math.ceil(len(TEST_DATA)/int(args.batch_size)))
551
+ # eval_metrics = list(zip(model.metrics_names,eval_metrics))
552
+ f.write(str(args) + '\n')
553
+ f.write('pk: ' + str(sum(pk_scores)/len(pk_scores)) + '\n')
554
+ # for i in eval_metrics:
555
+ # f.write(str(i[0]) + ': ' + str(i[1]) + '\n')
556
+