AndresR2909's picture
Create handler.py
c3699f2 verified
from typing import Dict, List, Any
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import torch
class EndpointHandler():
def __init__(self, path=""):
tokenizer = AutoTokenizer.from_pretrained(path)
model = AutoModelForCausalLM.from_pretrained(
path,
return_dict=True,
low_cpu_mem_usage=True,
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True,
)
self.pipeline = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=2,
temperature=0.1,
device_map="auto",
)
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
data args:
inputs (:obj: `str`)
date (:obj: `str`)
Return:
A :obj:`list` | `dict`: will be serialized and returned
"""
# get inputs
inputs = data.get("inputs",data)
date = data.get("date", None)
prompt = f"""Clasifica el texto con la etiquta "1" si hay ideación/comportamiento suicida y la etiqueta "0" en otro caso, retorna la respuesta como la correspondiente etiqueta.
texto: {inputs}
etiqueta: """.strip()
# run normal prediction
outputs = self.pipeline(prompt)
pred = outputs[0]["generated_text"].split("etiqueta: ")[-1].strip()
label = "intencion_suicida" if pred == "1" else "no_intencion_suicida"
return [{"input": inputs , "clasiffication": pred, "label" : label }]