|
import torch |
|
from transformers import BertModel |
|
|
|
|
|
class LanguageIdentifier(torch.nn.Module): |
|
def __init__(self): |
|
super().__init__() |
|
|
|
self.portuguese_bert = BertModel.from_pretrained( |
|
"neuralmind/bert-large-portuguese-cased") |
|
|
|
self.linear_layer = torch.nn.Sequential( |
|
torch.nn.Dropout(p=0.2), |
|
torch.nn.Linear(self.portuguese_bert.config.hidden_size, 1), |
|
) |
|
|
|
def forward(self, input_ids, attention_mask): |
|
|
|
|
|
outputs = self.portuguese_bert( |
|
input_ids=input_ids, attention_mask=attention_mask).last_hidden_state[:, 0, :] |
|
|
|
outputs = self.linear_layer(outputs) |
|
|
|
return outputs |
|
|
|
|
|
class Ensembler(torch.nn.Module): |
|
def __init__(self): |
|
super().__init__() |
|
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
def forward(self, input_ids, attention_mask): |
|
outputs = [] |
|
|
|
with torch.no_grad(): |
|
for domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']: |
|
specialist = LanguageIdentifier() |
|
|
|
specialist.load_state_dict(torch.load(f"{domain}.pt", map_location=self.device)) |
|
|
|
specialist.eval() |
|
|
|
specialist.to(self.device) |
|
|
|
outputs.append(specialist(input_ids, attention_mask)) |
|
|
|
|
|
specialist.cpu() |
|
del specialist |
|
torch.cuda.empty_cache() |
|
|
|
|
|
outputs = torch.cat(outputs, dim=1) |
|
|
|
return torch.mean(outputs, dim=1).unsqueeze(1) |