MisRoberta-CNN / model.py
UNCANNY69's picture
Create model.py
615d184 verified
from abc import ABCMeta
import torch
from transformers.pytorch_utils import nn
import torch.nn.functional as F
from transformers import RobertaModel, RobertaForSequenceClassification, PreTrainedModel
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers import RobertaConfig
from transformers import PretrainedConfig
class RobertaABSAConfig(PretrainedConfig):
model_type = "robertaCNNForSequenceClassification"
def __init__(self,
num_classes=2,
embed_dim=768,
conv_out_channels=256, # New parameter for Conv1d
conv_kernel_size=3,
fc_hidden=128, # New parameter for FC layer
dropout_rate=0.1,
num_layers=12,
**kwargs):
super().__init__(**kwargs)
self.num_classes = num_classes
self.embed_dim = embed_dim
self.conv_out_channels = conv_out_channels # Assign Conv1d output channels
self.conv_kernel_size = conv_kernel_size # Assign Conv1d kernel size
self.fc_hidden = fc_hidden # Assign FC layer hidden units
self.dropout_rate = dropout_rate
self.num_layers = num_layers
self.id2label = {
0: "fake",
1: "true",
}
self.label2id = {
"fake": 0,
"true": 1,
}
class RobertaCNNForSequenceClassification(PreTrainedModel, metaclass=ABCMeta):
config_class = RobertaABSAConfig
def __init__(self, config):
super(RobertaCNNForSequenceClassification, self).__init__(config)
self.num_classes = config.num_classes
self.embed_dim = config.embed_dim
self.num_layers = config.num_layers
self.conv_out_channels = config.conv_out_channels
self.conv_kernel_size = config.conv_kernel_size
self.dropout = nn.Dropout(config.dropout_rate)
self.roberta = RobertaModel.from_pretrained('roberta-base',
output_hidden_states=True,
output_attentions=False)
print("RoBERTa Model Loaded")
self.conv1d = nn.Conv1d(in_channels=self.embed_dim, out_channels=self.conv_out_channels, kernel_size=self.conv_kernel_size)
self.fc = nn.Linear(self.conv_out_channels, self.num_classes)
def forward(self, input_ids, attention_mask, labels=None):
roberta_output = self.roberta(input_ids=input_ids, attention_mask=attention_mask)
hidden_states = roberta_output["hidden_states"]
hidden_states = torch.stack([hidden_states[layer_i][:, 0].squeeze()
for layer_i in range(0, self.num_layers)], dim=-1) # noqa
hidden_states = hidden_states.view(-1, self.num_layers, self.embed_dim)
hidden_states = hidden_states.permute(0, 2, 1) # Permute to match Conv1d input shape
conv_output = self.conv1d(hidden_states)
conv_output = F.relu(conv_output)
conv_output = F.max_pool1d(conv_output, kernel_size=conv_output.size(2)) # Global Max Pooling
conv_output = conv_output.squeeze(-1)
conv_output = self.dropout(conv_output)
logits = self.fc(conv_output)
loss = None
if labels is not None:
loss = F.cross_entropy(logits, labels)
out = SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=roberta_output.hidden_states,
attentions=roberta_output.attentions,
)
return out