from abc import ABCMeta import torch from transformers.pytorch_utils import nn import torch.nn.functional as F from transformers import AlbertModel, AlbertForSequenceClassification, PreTrainedModel from transformers.modeling_outputs import SequenceClassifierOutput from transformers import AlbertConfig from transformers import PretrainedConfig class AlbertABSAConfig(PretrainedConfig): model_type = "albertCNNForSequenceClassification" def __init__(self, num_classes=2, embed_dim=768, conv_out_channels=256, # New parameter for Conv1d conv_kernel_size=3, fc_hidden=128, # New parameter for FC layer dropout_rate=0.1, num_layers=12, **kwargs): super().__init__(**kwargs) self.num_classes = num_classes self.embed_dim = embed_dim self.conv_out_channels = conv_out_channels # Assign Conv1d output channels self.conv_kernel_size = conv_kernel_size # Assign Conv1d kernel size self.fc_hidden = fc_hidden # Assign FC layer hidden units self.dropout_rate = dropout_rate self.num_layers = num_layers self.id2label = { 0: "fake", 1: "true", } self.label2id = { "fake": 0, "true": 1, } class AlbertCNNForSequenceClassification(PreTrainedModel, metaclass=ABCMeta): config_class = AlbertABSAConfig def __init__(self, config): super(AlbertCNNForSequenceClassification, self).__init__(config) self.num_classes = config.num_classes self.embed_dim = config.embed_dim self.num_layers = config.num_layers self.conv_out_channels = config.conv_out_channels self.conv_kernel_size = config.conv_kernel_size self.dropout = nn.Dropout(config.dropout_rate) self.albert = AlbertModel.from_pretrained('albert-base-v2', output_hidden_states=True, output_attentions=False) print("ALBERT Model Loaded") self.conv1d = nn.Conv1d(in_channels=self.embed_dim, out_channels=self.conv_out_channels, kernel_size=self.conv_kernel_size) self.fc = nn.Linear(self.conv_out_channels, self.num_classes) def forward(self, input_ids, attention_mask, token_type_ids, labels=None): albert_output = self.albert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids) hidden_states = albert_output["hidden_states"] hidden_states = torch.stack([hidden_states[layer_i][:, 0].squeeze() for layer_i in range(0, self.num_layers)], dim=-1) # noqa hidden_states = hidden_states.view(-1, self.num_layers, self.embed_dim) hidden_states = hidden_states.permute(0, 2, 1) # Permute to match Conv1d input shape conv_output = self.conv1d(hidden_states) conv_output = F.relu(conv_output) conv_output = F.max_pool1d(conv_output, kernel_size=conv_output.size(2)) # Global Max Pooling conv_output = conv_output.squeeze(-1) conv_output = self.dropout(conv_output) logits = self.fc(conv_output) loss = None if labels is not None: loss = F.cross_entropy(logits, labels) out = SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=albert_output.hidden_states, attentions=albert_output.attentions, ) return out