import os import json import torch import numpy as np from transformers import BertTokenizer from ts.torch_handler.base_handler import BaseHandler from model import ImprovedBERTClass from sklearn.preprocessing import OneHotEncoder class UICardMappingHandler(BaseHandler): def __init__(self): super().__init__() self.initialized = False def initialize(self, context): self.manifest = context.manifest properties = context.system_properties model_dir = properties.get("model_dir") self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Load config with open(os.path.join(model_dir, 'config.json'), 'r') as f: self.config = json.load(f) # Initialize encoder and labels self.labels = ['Videos', 'Unit Conversion', 'Translation', 'Shopping Product Comparison', 'Restaurants', 'Product', 'Information', 'Images', 'Gift', 'General Comparison', 'Flights', 'Answer', 'Aircraft Seat Map'] labels_np = np.array(self.labels).reshape(-1, 1) self.encoder = OneHotEncoder(sparse_output=False) self.encoder.fit(labels_np) # Load model self.model = ImprovedBERTClass() self.model.load_state_dict(torch.load(os.path.join(model_dir, 'model.pth'), map_location=self.device)) self.model.to(self.device) self.model.eval() # Load tokenizer self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') self.initialized = True def preprocess(self, data): text = data[0].get("body").get("text", "") k = data[0].get("body").get("k", 3) inputs = self.tokenizer.encode_plus( text, add_special_tokens=True, max_length=64, padding='max_length', return_tensors='pt', truncation=True ) return { "ids": inputs['input_ids'].to(self.device, dtype=torch.long), "mask": inputs['attention_mask'].to(self.device, dtype=torch.long), "token_type_ids": inputs['token_type_ids'].to(self.device, dtype=torch.long), "k": k } def inference(self, data): with torch.no_grad(): outputs = self.model(data["ids"], data["mask"], data["token_type_ids"]) probabilities = torch.sigmoid(outputs) return probabilities.cpu().detach().numpy().flatten(), data["k"] def postprocess(self, inference_output): probabilities, k = inference_output # Get top k predictions top_k_indices = np.argsort(probabilities)[-k:][::-1] top_k_probs = probabilities[top_k_indices] # Create one-hot encodings for top k indices top_k_one_hot = np.zeros((k, len(probabilities))) for i, idx in enumerate(top_k_indices): top_k_one_hot[i, idx] = 1 # Decode the top k predictions top_k_cards = [self.decode_vector(one_hot.reshape(1, -1)) for one_hot in top_k_one_hot] # Create a list of tuples (card, probability) for top k predictions top_k_predictions = list(zip(top_k_cards, top_k_probs.tolist())) # Determine the most likely card predicted_labels = (probabilities > 0.5).astype(int) if sum(predicted_labels) == 0: most_likely_card = "Answer" else: most_likely_card = self.decode_vector(predicted_labels.reshape(1, -1)) # Prepare the response result = { "most_likely_card": most_likely_card, "top_k_predictions": top_k_predictions } return [result] def decode_vector(self, vector): original_label = self.encoder.inverse_transform(vector) return original_label[0][0] # Returns the label as a string def handle(self, data, context): self.context = context data = self.preprocess(data) data = self.inference(data) data = self.postprocess(data) return data