kenken999's picture
m
e2444ac
raw
history blame contribute delete
761 Bytes
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
class LanguageModel:
def __init__(self):
self.model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
self.tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
def predict(self, text, image):
# For simplicity, let's assume the image is not used in this example
inputs = self.tokenizer.encode_plus(
text,
add_special_tokens=True,
max_length=512,
return_attention_mask=True,
return_tensors='pt'
)
outputs = self.model(inputs['input_ids'], attention_mask=inputs['attention_mask'])
return torch.argmax(outputs.logits)