docs / huggingface_text-embeddings-inference.txt
danidarko's picture
Upload 59 files
b1d4de0 verified
# File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/cli.py
import sys
import typer
from pathlib import Path
from loguru import logger
from typing import Optional
from enum import Enum
app = typer.Typer()
class Dtype(str, Enum):
float32 = 'float32'
float16 = 'float16'
bloat16 = 'bfloat16'
@app.command()
def serve(model_path: Path, dtype: Dtype='float32', uds_path: Path='/tmp/text-embeddings-server', logger_level: str='INFO', json_output: bool=False, otlp_endpoint: Optional[str]=None, otlp_service_name: str='text-embeddings-inference.server'):
logger.remove()
logger.add(sys.stdout, format='{message}', filter='text_embeddings_server', level=logger_level, serialize=json_output, backtrace=True, diagnose=False)
from text_embeddings_server import server
from text_embeddings_server.utils.tracing import setup_tracing
if otlp_endpoint is not None:
setup_tracing(otlp_endpoint=otlp_endpoint, otlp_service_name=otlp_service_name)
dtype = None if dtype is None else dtype.value
server.serve(model_path, dtype, uds_path)
if __name__ == '__main__':
app()
# File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/models/__init__.py
import torch
from loguru import logger
from pathlib import Path
from typing import Optional
from transformers import AutoConfig
from transformers.models.bert import BertConfig
from text_embeddings_server.models.model import Model
from text_embeddings_server.models.default_model import DefaultModel
__all__ = ['Model']
torch.set_grad_enabled(False)
FLASH_ATTENTION = True
try:
from text_embeddings_server.models.flash_bert import FlashBert
except ImportError as e:
logger.warning(f'Could not import Flash Attention enabled models: {e}')
FLASH_ATTENTION = False
if FLASH_ATTENTION:
__all__.append(FlashBert)
def get_model(model_path: Path, dtype: Optional[str]):
if dtype == 'float32':
dtype = torch.float32
elif dtype == 'float16':
dtype = torch.float16
elif dtype == 'bfloat16':
dtype = torch.bfloat16
else:
raise RuntimeError(f'Unknown dtype {dtype}')
if torch.cuda.is_available():
device = torch.device('cuda')
else:
if dtype != torch.float32:
raise ValueError('CPU device only supports float32 dtype')
device = torch.device('cpu')
config = AutoConfig.from_pretrained(model_path)
if config.model_type == 'bert':
config: BertConfig
if device.type == 'cuda' and config.position_embedding_type == 'absolute' and (dtype in [torch.float16, torch.bfloat16]) and FLASH_ATTENTION:
return FlashBert(model_path, device, dtype)
else:
return DefaultModel(model_path, device, dtype)
raise NotImplementedError
# File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/models/default_model.py
import inspect
import torch
from pathlib import Path
from typing import Type, List
from transformers import AutoModel
from opentelemetry import trace
from text_embeddings_server.models import Model
from text_embeddings_server.models.types import PaddedBatch, Embedding
tracer = trace.get_tracer(__name__)
class DefaultModel(Model):
def __init__(self, model_path: Path, device: torch.device, dtype: torch.dtype):
model = AutoModel.from_pretrained(model_path).to(dtype).to(device)
self.hidden_size = model.config.hidden_size
self.has_position_ids = inspect.signature(model.forward).parameters.get('position_ids', None) is not None
self.has_token_type_ids = inspect.signature(model.forward).parameters.get('token_type_ids', None) is not None
super(DefaultModel, self).__init__(model=model, dtype=dtype, device=device)
@property
def batch_type(self) -> Type[PaddedBatch]:
return PaddedBatch
@tracer.start_as_current_span('embed')
def embed(self, batch: PaddedBatch) -> List[Embedding]:
kwargs = {'input_ids': batch.input_ids, 'attention_mask': batch.attention_mask}
if self.has_token_type_ids:
kwargs['token_type_ids'] = batch.token_type_ids
if self.has_position_ids:
kwargs['position_ids'] = batch.position_ids
output = self.model(**kwargs)
embedding = output[0][:, 0]
cpu_results = embedding.view(-1).tolist()
return [Embedding(values=cpu_results[i * self.hidden_size:(i + 1) * self.hidden_size]) for i in range(len(batch))]
# File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/models/flash_bert.py
import torch
from pathlib import Path
from torch import nn
from typing import Type, List
from safetensors import safe_open
from transformers.activations import ACT2FN
from transformers.models.bert import BertConfig
from opentelemetry import trace
import dropout_layer_norm
from text_embeddings_server.models import Model
from text_embeddings_server.models.types import FlashBatch, Embedding
from text_embeddings_server.utils.flash_attn import attention
tracer = trace.get_tracer(__name__)
class FastLayerNorm:
def __init__(self, prefix, handle, device, dtype, config: BertConfig):
self.weight = handle.get_tensor(f'{prefix}.weight').to(dtype).to(device)
self.bias = handle.get_tensor(f'{prefix}.bias').to(dtype).to(device)
self.variance_epsilon = config.layer_norm_eps
def forward(self, hidden_states, residual=None):
(normed_hidden_states, res, *rest) = dropout_layer_norm.dropout_add_ln_fwd(hidden_states, residual, self.weight, self.bias, None, None, None, None, 0.0, self.variance_epsilon, 1.0, 0, None, False, False)
if res is None:
res = hidden_states
return (normed_hidden_states, res)
class BertEmbeddings:
def __init__(self, prefix, handle, device, dtype, config: BertConfig):
self.word_embeddings_weight = handle.get_tensor(f'{prefix}.word_embeddings.weight').to(dtype).to(device)
self.token_type_embeddings_weight = handle.get_tensor(f'{prefix}.token_type_embeddings.weight').to(dtype).to(device)
if config.position_embedding_type == 'absolute':
self.position_embeddings_weight = handle.get_tensor(f'{prefix}.position_embeddings.weight').to(dtype).to(device)
else:
raise NotImplementedError('FlashBert only supports absolute position embeddings')
self.layer_norm = FastLayerNorm(f'{prefix}.LayerNorm', handle, device, dtype, config)
def forward(self, input_ids, token_type_ids, position_ids):
inputs_embeds = nn.functional.embedding(input_ids, self.word_embeddings_weight)
token_type_embeds = nn.functional.embedding(token_type_ids, self.token_type_embeddings_weight)
position_embeds = nn.functional.embedding(position_ids, self.position_embeddings_weight)
inputs_embeds += position_embeds
(embeddings, _) = self.layer_norm.forward(inputs_embeds, token_type_embeds)
return embeddings
class BertAttention:
def __init__(self, prefix, handle, device, dtype, config: BertConfig):
query_weight = handle.get_tensor(f'{prefix}.self.query.weight')
query_bias = handle.get_tensor(f'{prefix}.self.query.bias')
key_weight = handle.get_tensor(f'{prefix}.self.key.weight')
key_bias = handle.get_tensor(f'{prefix}.self.key.bias')
value_weight = handle.get_tensor(f'{prefix}.self.value.weight')
value_bias = handle.get_tensor(f'{prefix}.self.value.bias')
self.qkv_weight = torch.cat([query_weight, key_weight, value_weight]).T.to(dtype).to(device)
self.qkv_bias = torch.cat([query_bias, key_bias, value_bias]).to(dtype).to(device)
self.dense_weight = handle.get_tensor(f'{prefix}.output.dense.weight').T.to(dtype).to(device)
self.dense_bias = handle.get_tensor(f'{prefix}.output.dense.bias').to(dtype).to(device)
self.layer_norm = FastLayerNorm(f'{prefix}.output.LayerNorm', handle, device, dtype, config)
self.head_size = config.hidden_size // config.num_attention_heads
self.softmax_scale = self.head_size ** (-0.5)
self.num_heads = config.num_attention_heads
def forward(self, hidden_states, cu_seqlens, max_s):
residual = hidden_states
qkv = torch.addmm(self.qkv_bias, hidden_states, self.qkv_weight)
(q, k, v) = qkv.view(-1, self.num_heads * 3, self.head_size).split(self.num_heads, dim=1)
attn_output = torch.empty_like(q)
attention(q, k, v, attn_output, cu_seqlens, max_s, self.softmax_scale)
hidden_states = torch.addmm(self.dense_bias, attn_output.view(-1, self.num_heads * self.head_size), self.dense_weight)
(hidden_states, _) = self.layer_norm.forward(hidden_states, residual)
return hidden_states
class BertLayer:
def __init__(self, prefix, handle, device, dtype, config: BertConfig):
self.attention = BertAttention(f'{prefix}.attention', handle, device, dtype, config)
self.intermediate_weight = handle.get_tensor(f'{prefix}.intermediate.dense.weight').T.to(dtype).to(device)
self.intermediate_bias = handle.get_tensor(f'{prefix}.intermediate.dense.bias').to(dtype).to(device)
act = config.hidden_act
self.intermediate_act_fn = ACT2FN[act] if 'gelu' not in act else lambda x: torch.nn.functional.gelu(x, approximate='tanh' if act in ['gelu_fast', 'gelu_pytorch_tanh'] else 'none')
self.output_weight = handle.get_tensor(f'{prefix}.output.dense.weight').T.to(dtype).to(device)
self.output_bias = handle.get_tensor(f'{prefix}.output.dense.bias').to(dtype).to(device)
self.layer_norm = FastLayerNorm(f'{prefix}.output.LayerNorm', handle, device, dtype, config)
def forward(self, hidden_states, cu_seqlens, max_s):
hidden_states = self.attention.forward(hidden_states, cu_seqlens, max_s)
residual = hidden_states
hidden_states = torch.addmm(self.intermediate_bias, hidden_states, self.intermediate_weight)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = torch.addmm(self.output_bias, hidden_states, self.output_weight)
(hidden_states, _) = self.layer_norm.forward(hidden_states, residual)
return hidden_states
class BertEncoder:
def __init__(self, prefix, handle, device, dtype, config: BertConfig):
self.layers = [BertLayer(f'{prefix}.layer.{i}', handle, device, dtype, config) for i in range(config.num_hidden_layers)]
def forward(self, hidden_states, cu_seqlens, max_s):
for layer in self.layers:
hidden_states = layer.forward(hidden_states, cu_seqlens, max_s)
return hidden_states
class FlashBertModel:
def __init__(self, handle, device, dtype, config: BertConfig):
self.embeddings = BertEmbeddings('embeddings', handle, device, dtype, config)
self.encoder = BertEncoder('encoder', handle, device, dtype, config)
def forward(self, input_ids, token_type_ids, position_ids, cu_seqlens, max_s):
embeddings = self.embeddings.forward(input_ids, token_type_ids, position_ids)
encoder_outputs = self.encoder.forward(embeddings, cu_seqlens, max_s)
return encoder_outputs[cu_seqlens[:-1]]
class FlashBert(Model):
def __init__(self, model_path: Path, device: torch.device, dtype: torch.dtype):
config = BertConfig.from_pretrained(model_path)
with safe_open(model_path / 'model.safetensors', framework='pt') as f:
model = FlashBertModel(f, device, dtype, config)
self.hidden_size = config.hidden_size
super(FlashBert, self).__init__(model=model, dtype=dtype, device=device)
@property
def batch_type(self) -> Type[FlashBatch]:
return FlashBatch
@tracer.start_as_current_span('embed')
def embed(self, batch: FlashBatch) -> List[Embedding]:
embedding = self.model.forward(input_ids=batch.input_ids, token_type_ids=batch.token_type_ids, position_ids=batch.position_ids, cu_seqlens=batch.cu_seqlens, max_s=batch.max_s)
cpu_results = embedding.view(-1).tolist()
return [Embedding(values=cpu_results[i * self.hidden_size:(i + 1) * self.hidden_size]) for i in range(len(batch))]
# File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/models/model.py
import torch
from abc import ABC, abstractmethod
from typing import List, TypeVar, Type
from text_embeddings_server.models.types import Batch, Embedding
B = TypeVar('B', bound=Batch)
class Model(ABC):
def __init__(self, model, dtype: torch.dtype, device: torch.device):
self.model = model
self.dtype = dtype
self.device = device
@property
@abstractmethod
def batch_type(self) -> Type[B]:
raise NotImplementedError
@abstractmethod
def embed(self, batch: B) -> List[Embedding]:
raise NotImplementedError
# File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/models/types.py
import torch
from abc import ABC, abstractmethod
from dataclasses import dataclass
from opentelemetry import trace
from text_embeddings_server.pb import embed_pb2
from text_embeddings_server.pb.embed_pb2 import Embedding
tracer = trace.get_tracer(__name__)
class Batch(ABC):
@classmethod
@abstractmethod
def from_pb(cls, pb: embed_pb2.EmbedRequest, device: torch.device) -> 'Batch':
raise NotImplementedError
@abstractmethod
def __len__(self):
raise NotImplementedError
@dataclass
class PaddedBatch(Batch):
input_ids: torch.Tensor
token_type_ids: torch.Tensor
position_ids: torch.Tensor
attention_mask: torch.Tensor
@classmethod
@tracer.start_as_current_span('from_pb')
def from_pb(cls, pb: embed_pb2.EmbedRequest, device: torch.device) -> 'PaddedBatch':
all_tensors = torch.zeros([4, len(pb.cu_seq_lengths) - 1, pb.max_length], dtype=torch.int32)
for (i, start_index) in enumerate(pb.cu_seq_lengths[:-1]):
end_index = pb.cu_seq_lengths[i + 1]
input_length = end_index - start_index
all_tensors[0, i, :input_length] = torch.tensor(pb.input_ids[start_index:end_index], dtype=torch.int32)
all_tensors[1, i, :input_length] = torch.tensor(pb.token_type_ids[start_index:end_index], dtype=torch.int32)
all_tensors[2, i, :input_length] = torch.tensor(pb.position_ids[start_index:end_index], dtype=torch.int32)
all_tensors[3, i, :input_length] = 1
all_tensors = all_tensors.to(device)
return PaddedBatch(input_ids=all_tensors[0], token_type_ids=all_tensors[1], position_ids=all_tensors[2], attention_mask=all_tensors[3])
def __len__(self):
return len(self.input_ids)
@dataclass
class FlashBatch(Batch):
input_ids: torch.Tensor
token_type_ids: torch.Tensor
position_ids: torch.Tensor
cu_seqlens: torch.Tensor
max_s: int
size: int
@classmethod
@tracer.start_as_current_span('from_pb')
def from_pb(cls, pb: embed_pb2.EmbedRequest, device: torch.device) -> 'FlashBatch':
if device.type != 'cuda':
raise RuntimeError(f'FlashBatch does not support device {device}')
batch_input_ids = torch.tensor(pb.input_ids, dtype=torch.int32, device=device)
batch_token_type_ids = torch.tensor(pb.token_type_ids, dtype=torch.int32, device=device)
batch_position_ids = torch.tensor(pb.position_ids, dtype=torch.int32, device=device)
cu_seqlens = torch.tensor(pb.cu_seq_lengths, dtype=torch.int32, device=device)
return FlashBatch(input_ids=batch_input_ids, token_type_ids=batch_token_type_ids, position_ids=batch_position_ids, cu_seqlens=cu_seqlens, max_s=pb.max_length, size=len(cu_seqlens) - 1)
def __len__(self):
return self.size
# File: text-embeddings-inference-main/backends/python/server/text_embeddings_server/server.py
import asyncio
import torch
from grpc import aio
from loguru import logger
from grpc_reflection.v1alpha import reflection
from pathlib import Path
from typing import Optional
from text_embeddings_server.models import Model, get_model
from text_embeddings_server.pb import embed_pb2_grpc, embed_pb2
from text_embeddings_server.utils.tracing import UDSOpenTelemetryAioServerInterceptor
from text_embeddings_server.utils.interceptor import ExceptionInterceptor
class EmbeddingService(embed_pb2_grpc.EmbeddingServiceServicer):
def __init__(self, model: Model):
self.model = model
self._inference_mode_raii_guard = torch._C._InferenceMode(True)
async def Health(self, request, context):
if self.model.device.type == 'cuda':
torch.zeros((2, 2), device='cuda')
return embed_pb2.HealthResponse()
async def Embed(self, request, context):
batch = self.model.batch_type.from_pb(request, self.model.device)
embeddings = self.model.embed(batch)
return embed_pb2.EmbedResponse(embeddings=embeddings)
def serve(model_path: Path, dtype: Optional[str], uds_path: Path):
async def serve_inner(model_path: Path, dtype: Optional[str]=None):
unix_socket = f'unix://{uds_path}'
try:
model = get_model(model_path, dtype)
except Exception:
logger.exception('Error when initializing model')
raise
server = aio.server(interceptors=[ExceptionInterceptor(), UDSOpenTelemetryAioServerInterceptor()])
embed_pb2_grpc.add_EmbeddingServiceServicer_to_server(EmbeddingService(model), server)
SERVICE_NAMES = (embed_pb2.DESCRIPTOR.services_by_name['EmbeddingService'].full_name, reflection.SERVICE_NAME)
reflection.enable_server_reflection(SERVICE_NAMES, server)
server.add_insecure_port(unix_socket)
await server.start()
logger.info(f'Server started at {unix_socket}')
try:
await server.wait_for_termination()
except KeyboardInterrupt:
logger.info('Signal received. Shutting down')
await server.stop(0)
asyncio.run(serve_inner(model_path, dtype))