docs / huggingface_huggingface-inference-toolkit.txt
danidarko's picture
Upload 59 files
b1d4de0 verified
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/async_utils.py
import functools
from typing import Any, Callable, Dict, TypeVar
import anyio
from anyio import Semaphore
from typing_extensions import ParamSpec
MAX_CONCURRENT_THREADS = 1
MAX_THREADS_GUARD = Semaphore(MAX_CONCURRENT_THREADS)
T = TypeVar('T')
P = ParamSpec('P')
async def async_handler_call(handler: Callable[P, T], body: Dict[str, Any]) -> T:
async with MAX_THREADS_GUARD:
return await anyio.to_thread.run_sync(functools.partial(handler, body))
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/const.py
import os
from pathlib import Path
from huggingface_inference_toolkit.env_utils import strtobool
HF_MODEL_DIR = os.environ.get('HF_MODEL_DIR', '/opt/huggingface/model')
HF_MODEL_ID = os.environ.get('HF_MODEL_ID', None)
HF_TASK = os.environ.get('HF_TASK', None)
HF_FRAMEWORK = os.environ.get('HF_FRAMEWORK', None)
HF_REVISION = os.environ.get('HF_REVISION', None)
HF_HUB_TOKEN = os.environ.get('HF_HUB_TOKEN', None)
HF_TRUST_REMOTE_CODE = strtobool(os.environ.get('HF_TRUST_REMOTE_CODE', '0'))
HF_DEFAULT_PIPELINE_NAME = os.environ.get('HF_DEFAULT_PIPELINE_NAME', 'handler.py')
HF_MODULE_NAME = os.environ.get('HF_MODULE_NAME', f'{Path(HF_DEFAULT_PIPELINE_NAME).stem}.EndpointHandler')
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/diffusers_utils.py
import importlib.util
from typing import Union
from transformers.utils.import_utils import is_torch_bf16_gpu_available
from huggingface_inference_toolkit.logging import logger
_diffusers = importlib.util.find_spec('diffusers') is not None
def is_diffusers_available():
return _diffusers
if is_diffusers_available():
import torch
from diffusers import AutoPipelineForText2Image, DPMSolverMultistepScheduler, StableDiffusionPipeline
class IEAutoPipelineForText2Image:
def __init__(self, model_dir: str, device: Union[str, None]=None, **kwargs):
dtype = torch.float32
if device == 'cuda':
dtype = torch.bfloat16 if is_torch_bf16_gpu_available() else torch.float16
device_map = 'balanced' if device == 'cuda' else None
self.pipeline = AutoPipelineForText2Image.from_pretrained(model_dir, torch_dtype=dtype, device_map=device_map, **kwargs)
if isinstance(self.pipeline, StableDiffusionPipeline):
try:
self.pipeline.scheduler = DPMSolverMultistepScheduler.from_config(self.pipeline.scheduler.config)
except Exception:
pass
def __call__(self, prompt, **kwargs):
if 'num_images_per_prompt' in kwargs:
kwargs.pop('num_images_per_prompt')
logger.warning('Sending num_images_per_prompt > 1 to pipeline is not supported. Using default value 1.')
out = self.pipeline(prompt, num_images_per_prompt=1, **kwargs)
return out.images[0]
DIFFUSERS_TASKS = {'text-to-image': IEAutoPipelineForText2Image}
def get_diffusers_pipeline(task=None, model_dir=None, device=-1, **kwargs):
device = 'cuda' if device == 0 else 'cpu'
pipeline = DIFFUSERS_TASKS[task](model_dir=model_dir, device=device, **kwargs)
return pipeline
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/env_utils.py
def strtobool(val: str) -> bool:
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return True
if val in ('n', 'no', 'f', 'false', 'off', '0'):
return False
raise ValueError(f'Invalid truth value, it should be a string but {val} was provided instead.')
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/handler.py
import os
from pathlib import Path
from typing import Optional, Union
from huggingface_inference_toolkit.const import HF_TRUST_REMOTE_CODE
from huggingface_inference_toolkit.utils import check_and_register_custom_pipeline_from_directory, get_pipeline
class HuggingFaceHandler:
def __init__(self, model_dir: Union[str, Path], task=None, framework='pt'):
self.pipeline = get_pipeline(model_dir=model_dir, task=task, framework=framework, trust_remote_code=HF_TRUST_REMOTE_CODE)
def __call__(self, data):
inputs = data.pop('inputs', data)
parameters = data.pop('parameters', None)
if parameters is not None:
prediction = self.pipeline(inputs, **parameters)
else:
prediction = self.pipeline(inputs)
return prediction
class VertexAIHandler(HuggingFaceHandler):
def __init__(self, model_dir: Union[str, Path], task=None, framework='pt'):
super().__init__(model_dir, task, framework)
def __call__(self, data):
if 'instances' not in data:
raise ValueError("The request body must contain a key 'instances' with a list of instances.")
parameters = data.pop('parameters', None)
predictions = []
for inputs in data['instances']:
payload = {'inputs': inputs, 'parameters': parameters}
predictions.append(super().__call__(payload))
return {'predictions': predictions}
def get_inference_handler_either_custom_or_default_handler(model_dir: Path, task: Optional[str]=None):
custom_pipeline = check_and_register_custom_pipeline_from_directory(model_dir)
if custom_pipeline:
return custom_pipeline
elif os.environ.get('AIP_MODE', None) == 'PREDICTION':
return VertexAIHandler(model_dir=model_dir, task=task)
else:
return HuggingFaceHandler(model_dir=model_dir, task=task)
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/logging.py
import logging
import sys
def setup_logging():
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S', stream=sys.stdout)
logging.getLogger('uvicorn').handlers.clear()
logging.getLogger('uvicorn.access').handlers.clear()
logging.getLogger('uvicorn.error').handlers.clear()
logger = logging.getLogger('huggingface_inference_toolkit')
return logger
logger = setup_logging()
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/optimum_utils.py
import importlib.util
import os
from huggingface_inference_toolkit.logging import logger
_optimum_neuron = False
if importlib.util.find_spec('optimum') is not None:
if importlib.util.find_spec('optimum.neuron') is not None:
_optimum_neuron = True
def is_optimum_neuron_available():
return _optimum_neuron
def get_input_shapes(model_dir):
from transformers import AutoConfig
input_shapes = {}
input_shapes_available = False
try:
config = AutoConfig.from_pretrained(model_dir)
if hasattr(config, 'neuron'):
if config.neuron.get('static_batch_size', None) and config.neuron.get('static_sequence_length', None):
input_shapes['batch_size'] = config.neuron['static_batch_size']
input_shapes['sequence_length'] = config.neuron['static_sequence_length']
input_shapes_available = True
logger.info(f"Input shapes found in config file. Using input shapes from config with batch size {input_shapes['batch_size']} and sequence length {input_shapes['sequence_length']}")
else:
if os.environ.get('HF_OPTIMUM_BATCH_SIZE', None) is not None:
logger.warning('HF_OPTIMUM_BATCH_SIZE environment variable is set. Environment variable will be ignored and input shapes from config file will be used.')
if os.environ.get('HF_OPTIMUM_SEQUENCE_LENGTH', None) is not None:
logger.warning('HF_OPTIMUM_SEQUENCE_LENGTH environment variable is set. Environment variable will be ignored and input shapes from config file will be used.')
except Exception:
input_shapes_available = False
if input_shapes_available:
return input_shapes
sequence_length = os.environ.get('HF_OPTIMUM_SEQUENCE_LENGTH', None)
if sequence_length is None:
raise ValueError('HF_OPTIMUM_SEQUENCE_LENGTH environment variable is not set. Please set HF_OPTIMUM_SEQUENCE_LENGTH to a positive integer.')
if not int(sequence_length) > 0:
raise ValueError(f'HF_OPTIMUM_SEQUENCE_LENGTH must be set to a positive integer. Current value is {sequence_length}')
batch_size = os.environ.get('HF_OPTIMUM_BATCH_SIZE', 1)
logger.info(f'Using input shapes from environment variables with batch size {batch_size} and sequence length {sequence_length}')
return {'batch_size': int(batch_size), 'sequence_length': int(sequence_length)}
def get_optimum_neuron_pipeline(task, model_dir):
logger.info('Getting optimum neuron pipeline.')
from optimum.neuron.pipelines.transformers.base import NEURONX_SUPPORTED_TASKS, pipeline
from optimum.neuron.utils import NEURON_FILE_NAME
if not isinstance(model_dir, str):
model_dir = str(model_dir)
if task == 'sentence-embeddings':
task = 'feature-extraction'
if task not in NEURONX_SUPPORTED_TASKS:
raise ValueError(f'Task {task} is not supported by optimum neuron and inf2. Supported tasks are: {list(NEURONX_SUPPORTED_TASKS.keys())}')
export = True
if NEURON_FILE_NAME in os.listdir(model_dir):
export = False
if export:
logger.info('Model is not converted. Checking if required environment variables are set and converting model.')
input_shapes = get_input_shapes(model_dir)
neuron_pipe = pipeline(task, model=model_dir, export=export, input_shapes=input_shapes)
return neuron_pipe
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/sentence_transformers_utils.py
import importlib.util
_sentence_transformers = importlib.util.find_spec('sentence_transformers') is not None
def is_sentence_transformers_available():
return _sentence_transformers
if is_sentence_transformers_available():
from sentence_transformers import CrossEncoder, SentenceTransformer, util
class SentenceSimilarityPipeline:
def __init__(self, model_dir: str, device: str=None, **kwargs):
self.model = SentenceTransformer(model_dir, device=device, **kwargs)
def __call__(self, inputs=None):
embeddings1 = self.model.encode(inputs['source_sentence'], convert_to_tensor=True)
embeddings2 = self.model.encode(inputs['sentences'], convert_to_tensor=True)
similarities = util.pytorch_cos_sim(embeddings1, embeddings2).tolist()[0]
return {'similarities': similarities}
class SentenceEmbeddingPipeline:
def __init__(self, model_dir: str, device: str=None, **kwargs):
self.model = SentenceTransformer(model_dir, device=device, **kwargs)
def __call__(self, inputs):
embeddings = self.model.encode(inputs).tolist()
return {'embeddings': embeddings}
class RankingPipeline:
def __init__(self, model_dir: str, device: str=None, **kwargs):
self.model = CrossEncoder(model_dir, device=device, **kwargs)
def __call__(self, inputs):
scores = self.model.predict(inputs).tolist()
return {'scores': scores}
SENTENCE_TRANSFORMERS_TASKS = {'sentence-similarity': SentenceSimilarityPipeline, 'sentence-embeddings': SentenceEmbeddingPipeline, 'sentence-ranking': RankingPipeline}
def get_sentence_transformers_pipeline(task=None, model_dir=None, device=-1, **kwargs):
device = 'cuda' if device == 0 else 'cpu'
kwargs.pop('tokenizer', None)
kwargs.pop('framework', None)
if task not in SENTENCE_TRANSFORMERS_TASKS:
raise ValueError(f"Unknown task {task}. Available tasks are: {', '.join(SENTENCE_TRANSFORMERS_TASKS.keys())}")
return SENTENCE_TRANSFORMERS_TASKS[task](model_dir=model_dir, device=device, **kwargs)
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/serialization/base.py
from huggingface_inference_toolkit.serialization.audio_utils import Audioer
from huggingface_inference_toolkit.serialization.image_utils import Imager
from huggingface_inference_toolkit.serialization.json_utils import Jsoner
content_type_mapping = {'application/json': Jsoner, 'application/json; charset=UTF-8': Jsoner, 'text/csv': None, 'text/plain': None, 'image/png': Imager, 'image/jpeg': Imager, 'image/jpg': Imager, 'image/tiff': Imager, 'image/bmp': Imager, 'image/gif': Imager, 'image/webp': Imager, 'image/x-image': Imager, 'audio/x-flac': Audioer, 'audio/flac': Audioer, 'audio/mpeg': Audioer, 'audio/x-mpeg-3': Audioer, 'audio/wave': Audioer, 'audio/wav': Audioer, 'audio/x-wav': Audioer, 'audio/ogg': Audioer, 'audio/x-audio': Audioer, 'audio/webm': Audioer, 'audio/webm;codecs=opus': Audioer, 'audio/AMR': Audioer, 'audio/amr': Audioer, 'audio/AMR-WB': Audioer, 'audio/AMR-WB+': Audioer, 'audio/m4a': Audioer, 'audio/x-m4a': Audioer}
class ContentType:
@staticmethod
def get_deserializer(content_type):
if content_type in content_type_mapping:
return content_type_mapping[content_type]
else:
message = f'''\n Content type "{content_type}" not supported.\n Supported content types are:\n {', '.join(list(content_type_mapping.keys()))}\n '''
raise Exception(message)
@staticmethod
def get_serializer(accept):
if accept in content_type_mapping:
return content_type_mapping[accept]
else:
message = f'''\n Accept type "{accept}" not supported.\n Supported accept types are:\n {', '.join(list(content_type_mapping.keys()))}\n '''
raise Exception(message)
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/serialization/image_utils.py
from io import BytesIO
from PIL import Image
class Imager:
@staticmethod
def deserialize(body):
image = Image.open(BytesIO(body)).convert('RGB')
return {'inputs': image}
@staticmethod
def serialize(image, accept=None):
if isinstance(image, Image.Image):
img_byte_arr = BytesIO()
image.save(img_byte_arr, format=accept.split('/')[-1].upper())
img_byte_arr = img_byte_arr.getvalue()
return img_byte_arr
else:
raise ValueError(f'Can only serialize PIL.Image.Image, got {type(image)}')
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/serialization/json_utils.py
import base64
from io import BytesIO
import orjson
from PIL import Image
def default(obj):
if isinstance(obj, Image.Image):
with BytesIO() as out:
obj.save(out, format='PNG')
png_string = out.getvalue()
return base64.b64encode(png_string).decode('utf-8')
raise TypeError
class Jsoner:
@staticmethod
def deserialize(body):
return orjson.loads(body)
@staticmethod
def serialize(body, accept=None):
return orjson.dumps(body, option=orjson.OPT_SERIALIZE_NUMPY, default=default)
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/utils.py
import importlib.util
import sys
from pathlib import Path
from typing import Optional, Union
from huggingface_hub import HfApi, login, snapshot_download
from transformers import WhisperForConditionalGeneration, pipeline
from transformers.file_utils import is_tf_available, is_torch_available
from transformers.pipelines import Pipeline
from huggingface_inference_toolkit.const import HF_DEFAULT_PIPELINE_NAME, HF_MODULE_NAME
from huggingface_inference_toolkit.diffusers_utils import get_diffusers_pipeline, is_diffusers_available
from huggingface_inference_toolkit.logging import logger
from huggingface_inference_toolkit.optimum_utils import get_optimum_neuron_pipeline, is_optimum_neuron_available
from huggingface_inference_toolkit.sentence_transformers_utils import get_sentence_transformers_pipeline, is_sentence_transformers_available
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_optimum_available = importlib.util.find_spec('optimum') is not None
def is_optimum_available():
return False
framework2weight = {'pytorch': 'pytorch*', 'tensorflow': 'tf*', 'tf': 'tf*', 'pt': 'pytorch*', 'flax': 'flax*', 'rust': 'rust*', 'onnx': '*onnx*', 'safetensors': '*safetensors', 'coreml': '*mlmodel', 'tflite': '*tflite', 'savedmodel': '*tar.gz', 'openvino': '*openvino*', 'ckpt': '*ckpt'}
def create_artifact_filter(framework):
ignore_regex_list = list(set(framework2weight.values()))
pattern = framework2weight.get(framework, None)
if pattern in ignore_regex_list:
ignore_regex_list.remove(pattern)
return ignore_regex_list
else:
return []
def _is_gpu_available():
if is_tf_available():
return True if len(tf.config.list_physical_devices('GPU')) > 0 else False
elif is_torch_available():
return torch.cuda.is_available()
else:
raise RuntimeError('At least one of TensorFlow 2.0 or PyTorch should be installed. To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ To install PyTorch, read the instructions at https://pytorch.org/.')
def _get_framework():
if is_torch_available():
return 'pytorch'
elif is_tf_available():
return 'tensorflow'
else:
raise RuntimeError('At least one of TensorFlow 2.0 or PyTorch should be installed. To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ To install PyTorch, read the instructions at https://pytorch.org/.')
def _load_repository_from_hf(repository_id: Optional[str]=None, target_dir: Optional[Union[str, Path]]=None, framework: Optional[str]=None, revision: Optional[str]=None, hf_hub_token: Optional[str]=None):
if hf_hub_token is not None:
login(token=hf_hub_token)
if framework is None:
framework = _get_framework()
if isinstance(target_dir, str):
target_dir = Path(target_dir)
if not target_dir.exists():
target_dir.mkdir(parents=True)
if framework == 'pytorch':
files = HfApi().model_info(repository_id).siblings
if any((f.rfilename.endswith('safetensors') for f in files)):
framework = 'safetensors'
ignore_regex = create_artifact_filter(framework)
logger.info(f"Ignore regex pattern for files, which are not downloaded: {', '.join(ignore_regex)}")
snapshot_download(repo_id=repository_id, revision=revision, local_dir=str(target_dir), local_dir_use_symlinks=False, ignore_patterns=ignore_regex)
return target_dir
def check_and_register_custom_pipeline_from_directory(model_dir):
custom_module = Path(model_dir).joinpath(HF_DEFAULT_PIPELINE_NAME)
legacy_module = Path(model_dir).joinpath('pipeline.py')
if custom_module.is_file():
logger.info(f'Found custom pipeline at {custom_module}')
spec = importlib.util.spec_from_file_location(HF_MODULE_NAME, custom_module)
if spec:
sys.path.insert(0, model_dir)
handler = importlib.util.module_from_spec(spec)
sys.modules[HF_MODULE_NAME] = handler
spec.loader.exec_module(handler)
custom_pipeline = handler.EndpointHandler(model_dir)
elif legacy_module.is_file():
logger.warning('You are using a legacy custom pipeline.\n Please update to the new format.\n See documentation for more information.')
spec = importlib.util.spec_from_file_location('pipeline.PreTrainedPipeline', legacy_module)
if spec:
sys.path.insert(0, model_dir)
pipeline = importlib.util.module_from_spec(spec)
sys.modules['pipeline.PreTrainedPipeline'] = pipeline
spec.loader.exec_module(pipeline)
custom_pipeline = pipeline.PreTrainedPipeline(model_dir)
else:
logger.info(f'No custom pipeline found at {custom_module}')
custom_pipeline = None
return custom_pipeline
def get_device():
gpu = _is_gpu_available()
if gpu:
return 0
else:
return -1
def get_pipeline(task: str, model_dir: Path, **kwargs) -> Pipeline:
device = get_device()
if is_optimum_neuron_available():
logger.info('Using device Neuron')
else:
logger.info(f"Using device {('GPU' if device == 0 else 'CPU')}")
if task is None:
raise EnvironmentError('The task for this model is not set: Please set one: https://huggingface.co/docs#how-is-a-models-type-of-inference-api-and-widget-determined')
if task in {'automatic-speech-recognition', 'image-segmentation', 'image-classification', 'audio-classification', 'object-detection', 'zero-shot-image-classification'}:
kwargs['feature_extractor'] = model_dir
elif task in {'image-to-text', 'text-to-image'}:
pass
elif task == 'conversational':
task = 'text-generation'
else:
kwargs['tokenizer'] = model_dir
if is_optimum_neuron_available():
hf_pipeline = get_optimum_neuron_pipeline(task=task, model_dir=model_dir)
elif is_sentence_transformers_available() and task in ['sentence-similarity', 'sentence-embeddings', 'sentence-ranking']:
hf_pipeline = get_sentence_transformers_pipeline(task=task, model_dir=model_dir, device=device, **kwargs)
elif is_diffusers_available() and task == 'text-to-image':
hf_pipeline = get_diffusers_pipeline(task=task, model_dir=model_dir, device=device, **kwargs)
else:
hf_pipeline = pipeline(task=task, model=model_dir, device=device, **kwargs)
if task == 'automatic-speech-recognition' and isinstance(hf_pipeline.model, WhisperForConditionalGeneration):
hf_pipeline._preprocess_params['chunk_length_s'] = 30
hf_pipeline.model.config.forced_decoder_ids = hf_pipeline.tokenizer.get_decoder_prompt_ids(language='english', task='transcribe')
return hf_pipeline
def convert_params_to_int_or_bool(params):
for (k, v) in params.items():
if v.isnumeric():
params[k] = int(v)
if v == 'false':
params[k] = False
if v == 'true':
params[k] = True
return params
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/vertex_ai_utils.py
import re
from pathlib import Path
from typing import Union
from huggingface_inference_toolkit.logging import logger
GCS_URI_PREFIX = 'gs://'
def _load_repository_from_gcs(artifact_uri: str, target_dir: Union[str, Path]='/tmp') -> str:
from google.cloud import storage
logger.info(f'Loading model artifacts from {artifact_uri} to {target_dir}')
if isinstance(target_dir, str):
target_dir = Path(target_dir)
if artifact_uri.startswith(GCS_URI_PREFIX):
matches = re.match(f'{GCS_URI_PREFIX}(.*?)/(.*)', artifact_uri)
(bucket_name, prefix) = matches.groups()
gcs_client = storage.Client()
blobs = gcs_client.list_blobs(bucket_name, prefix=prefix)
for blob in blobs:
name_without_prefix = blob.name[len(prefix):]
name_without_prefix = name_without_prefix[1:] if name_without_prefix.startswith('/') else name_without_prefix
file_split = name_without_prefix.split('/')
directory = target_dir / Path(*file_split[0:-1])
directory.mkdir(parents=True, exist_ok=True)
if name_without_prefix and (not name_without_prefix.endswith('/')):
blob.download_to_filename(target_dir / name_without_prefix)
return str(target_dir.absolute())
# File: huggingface-inference-toolkit-main/src/huggingface_inference_toolkit/webservice_starlette.py
import os
from pathlib import Path
from time import perf_counter
import orjson
from starlette.applications import Starlette
from starlette.responses import PlainTextResponse, Response
from starlette.routing import Route
from huggingface_inference_toolkit.async_utils import async_handler_call
from huggingface_inference_toolkit.const import HF_FRAMEWORK, HF_HUB_TOKEN, HF_MODEL_DIR, HF_MODEL_ID, HF_REVISION, HF_TASK
from huggingface_inference_toolkit.handler import get_inference_handler_either_custom_or_default_handler
from huggingface_inference_toolkit.logging import logger
from huggingface_inference_toolkit.serialization.base import ContentType
from huggingface_inference_toolkit.serialization.json_utils import Jsoner
from huggingface_inference_toolkit.utils import _load_repository_from_hf, convert_params_to_int_or_bool
from huggingface_inference_toolkit.vertex_ai_utils import _load_repository_from_gcs
async def prepare_model_artifacts():
global inference_handler
if len(list(Path(HF_MODEL_DIR).glob('**/*'))) <= 0:
if HF_MODEL_ID is not None:
_load_repository_from_hf(repository_id=HF_MODEL_ID, target_dir=HF_MODEL_DIR, framework=HF_FRAMEWORK, revision=HF_REVISION, hf_hub_token=HF_HUB_TOKEN)
elif len(os.environ.get('AIP_STORAGE_URI', '')) > 0:
_load_repository_from_gcs(os.environ['AIP_STORAGE_URI'], target_dir=HF_MODEL_DIR)
else:
raise ValueError(f"Can't initialize model.\n Please set env HF_MODEL_DIR or provider a HF_MODEL_ID.\n Provided values are:\n HF_MODEL_DIR: {HF_MODEL_DIR} and HF_MODEL_ID:{HF_MODEL_ID}")
logger.info(f'Initializing model from directory:{HF_MODEL_DIR}')
inference_handler = get_inference_handler_either_custom_or_default_handler(HF_MODEL_DIR, task=HF_TASK)
logger.info('Model initialized successfully')
async def health(request):
return PlainTextResponse('Ok')
async def predict(request):
try:
content_type = request.headers.get('content-Type', None)
deserialized_body = ContentType.get_deserializer(content_type).deserialize(await request.body())
if 'inputs' not in deserialized_body and 'instances' not in deserialized_body:
raise ValueError(f'Body needs to provide a inputs key, received: {orjson.dumps(deserialized_body)}')
if request.query_params and 'parameters' not in deserialized_body:
deserialized_body['parameters'] = convert_params_to_int_or_bool(dict(request.query_params))
start_time = perf_counter()
pred = await async_handler_call(inference_handler, deserialized_body)
logger.info(f'POST {request.url.path} | Duration: {(perf_counter() - start_time) * 1000:.2f} ms')
accept = request.headers.get('accept', None)
if accept is None or accept == '*/*':
accept = 'application/json'
serialized_response_body = ContentType.get_serializer(accept).serialize(pred, accept)
return Response(serialized_response_body, media_type=accept)
except Exception as e:
logger.error(e)
return Response(Jsoner.serialize({'error': str(e)}), status_code=400, media_type='application/json')
if os.getenv('AIP_MODE', None) == 'PREDICTION':
logger.info('Running in Vertex AI environment')
_predict_route = os.getenv('AIP_PREDICT_ROUTE', None)
_health_route = os.getenv('AIP_HEALTH_ROUTE', None)
if _predict_route is None or _health_route is None:
raise ValueError('AIP_PREDICT_ROUTE and AIP_HEALTH_ROUTE need to be set in Vertex AI environment')
app = Starlette(debug=False, routes=[Route(_health_route, health, methods=['GET']), Route(_predict_route, predict, methods=['POST'])], on_startup=[prepare_model_artifacts])
else:
app = Starlette(debug=False, routes=[Route('/', health, methods=['GET']), Route('/health', health, methods=['GET']), Route('/', predict, methods=['POST']), Route('/predict', predict, methods=['POST'])], on_startup=[prepare_model_artifacts])