from typing import Dict, List, Any from unsloth.chat_templates import get_chat_template from unsloth import FastLanguageModel class EndpointHandler(): def __init__(self, path=""): # Preload all the elements you are going to need at inference. # pseudo: # self.model= load_model(path) max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally! dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False. model, tokenizer = FastLanguageModel.from_pretrained( model_name = path, # YOUR MODEL YOU USED FOR TRAINING max_seq_length = max_seq_length, dtype = dtype, load_in_4bit = load_in_4bit, # token=hftoken ) FastLanguageModel.for_inference(model) self.model = model self.tokenizer = tokenizer def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: """ data args: inputs (:obj: `str` | `PIL.Image` | `np.array`) kwargs Return: A :obj:`list` | `dict`: will be serialized and returned """ # pseudo # self.model(input) messages = data # tokenizer = self.tokenizer self.tokenizer = get_chat_template( self.tokenizer, chat_template = "chatml", # Supports zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, unsloth mapping = {"role" : "from", "content" : "value", "user" : "human", "assistant" : "gpt"}, # ShareGPT style map_eos_token = True, # Maps <|im_end|> to instead ) inputs = self.tokenizer.apply_chat_template( messages, tokenize = True, add_generation_prompt = True, # Must add for generation return_tensors = "pt", ).to("cuda") # from transformers import TextStreamer # text_streamer = TextStreamer(tokenizer) # _ = model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = 128, use_cache = True) outputs = self.model.generate(input_ids = inputs, max_new_tokens = 64, use_cache = True) # print(outputs) return self.tokenizer.batch_decode(outputs)