UNCANNY69 commited on
Commit
3c5c078
·
verified ·
1 Parent(s): 8d1cae4

Create model.py

Browse files
Files changed (1) hide show
  1. model.py +87 -0
model.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import BertModel, BertConfig
2
+ from transformers.modeling_outputs import BaseModelOutputWithPooling
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from transformers import PreTrainedModel, SequenceClassifierOutput
6
+ from transformers import PretrainedConfig
7
+
8
+ class BertLSTMConfig(PretrainedConfig):
9
+ model_type = "bertLSTMForSequenceClassification"
10
+
11
+ def __init__(self,
12
+ num_classes=2,
13
+ hidden_size=768, # BERT hidden size
14
+ num_layers=12,
15
+ hidden_dim_lstm=256, # New parameter for LSTM
16
+ hidden_dropout_prob=0.1, # Changed from dropout_rate to hidden_dropout_prob
17
+ **kwargs):
18
+ super().__init__(**kwargs)
19
+ self.num_classes = num_classes
20
+ self.hidden_size = hidden_size
21
+ self.num_layers = num_layers
22
+ self.hidden_dim_lstm = hidden_dim_lstm # Assign LSTM hidden dimension
23
+ self.hidden_dropout_prob = hidden_dropout_prob # Adjusted to BERT parameter name
24
+ self.id2label = {
25
+ 0: "fake",
26
+ 1: "true",
27
+ }
28
+ self.label2id = {
29
+ "fake": 0,
30
+ "true": 1,
31
+ }
32
+
33
+
34
+ class BertLSTMForSequenceClassification(PreTrainedModel):
35
+ config_class = BertLSTMConfig
36
+
37
+ def __init__(self, config):
38
+ super(BertLSTMForSequenceClassification, self).__init__(config)
39
+ self.num_classes = config.num_classes
40
+ self.embed_dim = config.hidden_size # BERT hidden size is used as the embedding dimension
41
+ self.num_layers = config.num_layers
42
+ self.hidden_dim_lstm = config.hidden_dim_lstm
43
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
44
+
45
+ # Use BertModel instead of AlbertModel
46
+ self.bert = BertModel.from_pretrained('bert-base-uncased', output_hidden_states=True, output_attentions=False)
47
+ print("BERT Model Loaded")
48
+
49
+ # Adjust the input dimension for LSTM
50
+ self.lstm = nn.LSTM(self.embed_dim, self.hidden_dim_lstm, batch_first=True, num_layers=self.num_layers)
51
+
52
+ # Adjust the output dimension for the linear layer
53
+ self.fc = nn.Linear(self.hidden_dim_lstm, self.num_classes)
54
+
55
+ def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
56
+ bert_output = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
57
+ hidden_states = bert_output["hidden_states"]
58
+
59
+ # Extract the embeddings from the last layer of BERT
60
+ last_hidden_states = hidden_states[-1]
61
+
62
+ # Apply dropout
63
+ last_hidden_states = self.dropout(last_hidden_states)
64
+
65
+ # Pass through LSTM
66
+ lstm_output, _ = self.lstm(last_hidden_states, None)
67
+
68
+ # Take the output from the last time step
69
+ lstm_output = lstm_output[:, -1, :]
70
+
71
+ # Apply dropout
72
+ lstm_output = self.dropout(lstm_output)
73
+
74
+ # Linear layer for classification
75
+ logits = self.fc(lstm_output)
76
+
77
+ loss = None
78
+ if labels is not None:
79
+ loss = F.cross_entropy(logits, labels)
80
+
81
+ out = SequenceClassifierOutput(
82
+ loss=loss,
83
+ logits=logits,
84
+ hidden_states=bert_output.hidden_states,
85
+ attentions=bert_output.attentions,
86
+ )
87
+ return out