|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
Processor class for Phi3-V. |
|
""" |
|
import re |
|
from typing import List, Optional, Union |
|
|
|
import torch |
|
|
|
import transformers |
|
from transformers.feature_extraction_utils import BatchFeature |
|
from transformers.image_utils import ImageInput |
|
from transformers.processing_utils import ProcessorMixin |
|
from transformers.tokenization_utils_base import PaddingStrategy, TextInput, TruncationStrategy |
|
from transformers.utils import TensorType |
|
from .image_processing_phi3_v import Phi3VImageProcessor |
|
transformers.Phi3VImageProcessor = Phi3VImageProcessor |
|
|
|
class Phi3VProcessor(ProcessorMixin): |
|
r""" |
|
Constructs a Phi3-V processor which wraps a Phi3-V image processor and a LLaMa tokenizer into a single processor. |
|
|
|
[`Phi3VProcessor`] offers all the functionalities of [`Phi3VImageProcessor`] and [`LlamaTokenizerFast`]. See the |
|
[`~Phi3VProcessor.__call__`] and [`~Phi3VProcessor.decode`] for more information. |
|
|
|
Args: |
|
image_processor ([`Phi3VImageProcessor`], *optional*): |
|
The image processor is a required input. |
|
tokenizer ([`LlamaTokenizerFast`], *optional*): |
|
The tokenizer is a required input. |
|
""" |
|
|
|
attributes = ["image_processor", "tokenizer"] |
|
image_processor_class = "Phi3VImageProcessor" |
|
tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast") |
|
special_image_token = "<|image|>" |
|
|
|
def __init__(self, image_processor, tokenizer): |
|
self.image_processor = image_processor |
|
self.tokenizer = tokenizer |
|
self.num_img_tokens = image_processor.num_img_tokens |
|
self.img_tokens = [f"<|image_{i+1}|>" for i in range(1000000)] |
|
|
|
def __call__( |
|
self, |
|
text: Union[TextInput, List[TextInput]], |
|
images: ImageInput = None, |
|
padding: Union[bool, str, PaddingStrategy] = False, |
|
truncation: Union[bool, str, TruncationStrategy] = None, |
|
max_length=None, |
|
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH, |
|
) -> BatchFeature: |
|
""" |
|
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` |
|
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode |
|
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to |
|
Phi3ImageProcessor's [`~Phi3ImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring |
|
of the above two methods for more information. |
|
|
|
Args: |
|
text (`str`, `List[str]`, `List[List[str]]`): |
|
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings |
|
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set |
|
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences). |
|
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): |
|
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch |
|
tensor. Both channels-first and channels-last formats are supported. |
|
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): |
|
Select a strategy to pad the returned sequences (according to the model's padding side and padding |
|
index) among: |
|
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single |
|
sequence if provided). |
|
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum |
|
acceptable input length for the model if that argument is not provided. |
|
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different |
|
lengths). |
|
max_length (`int`, *optional*): |
|
Maximum length of the returned list and optionally padding length (see above). |
|
truncation (`bool`, *optional*): |
|
Activates truncation to cut input sequences longer than `max_length` to `max_length`. |
|
return_tensors (`str` or [`~utils.TensorType`], *optional*): |
|
If set, will return tensors of a particular framework. Acceptable values are: |
|
|
|
- `'tf'`: Return TensorFlow `tf.constant` objects. |
|
- `'pt'`: Return PyTorch `torch.Tensor` objects. |
|
- `'np'`: Return NumPy `np.ndarray` objects. |
|
- `'jax'`: Return JAX `jnp.ndarray` objects. |
|
|
|
Returns: |
|
[`BatchFeature`]: A [`BatchFeature`] with the following fields: |
|
|
|
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. |
|
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when |
|
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not |
|
`None`). |
|
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. |
|
""" |
|
if images is not None: |
|
image_inputs = self.image_processor(images, return_tensors=return_tensors) |
|
else: |
|
image_inputs = {} |
|
inputs = self._convert_images_texts_to_inputs(image_inputs, text, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors) |
|
return inputs |
|
|
|
def calc_num_image_tokens(self, images: ImageInput): |
|
""" Calculate the number of image tokens for each image. |
|
Args: |
|
images (`ImageInput`): |
|
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If |
|
passing in images with pixel values between 0 and 1, set `do_rescale=False`. |
|
""" |
|
return self.image_processor.calc_num_image_tokens(images) |
|
|
|
def calc_num_image_tokens_from_image_size(self, width, height): |
|
""" Calculate the number of image token for an image with given width and height. |
|
Args: |
|
width (`int`): |
|
Width of the image. |
|
height (`int`): |
|
Height of the image. |
|
""" |
|
return self.image_processor.calc_num_image_tokens_from_image_size(width, height) |
|
|
|
|
|
@property |
|
def special_image_token_id(self): |
|
return self.tokenizer.convert_tokens_to_ids(self.special_image_token) |
|
|
|
def get_special_image_token_id(self): |
|
return self.tokenizer.convert_tokens_to_ids(self.special_image_token) |
|
|
|
def _convert_images_texts_to_inputs(self, images, texts, padding=False, truncation=None, max_length=None, return_tensors=None): |
|
|
|
if not len(images): |
|
model_inputs = self.tokenizer(texts, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length) |
|
return BatchFeature(data={**model_inputs}) |
|
|
|
pattern = r"<\|image_\d+\|>" |
|
if isinstance(texts, str): |
|
texts = [texts] |
|
|
|
prompt_chunks = [] |
|
image_tags = [] |
|
for text in texts: |
|
prompt_chunks.append([self.tokenizer(chunk).input_ids for chunk in re.split(pattern, text)]) |
|
image_tags.append(re.findall(pattern, text)) |
|
|
|
if 'num_img_tokens' in images: |
|
num_img_tokens = images['num_img_tokens'] |
|
else: |
|
assert 'num_crops' in images, 'num_crops must be provided in images if num_img_tokens is not provided' |
|
num_crops = images['num_crops'] |
|
num_img_tokens = [_num_crops * self.num_img_tokens for _num_crops in num_crops] |
|
|
|
images, image_sizes = images['pixel_values'], images['image_sizes'] |
|
|
|
|
|
|
|
|
|
|
|
image_ids = [[int(s.split("|")[1].split("_")[-1]) for s in tags] for tags in image_tags] |
|
unique_image_ids = sorted(list(set([iid for ids in image_ids for iid in ids]))) |
|
|
|
|
|
assert unique_image_ids == list(range(1, len(unique_image_ids)+1)), f"image_ids must start from 1, and must be continuous int, e.g. [1, 2, 3], cannot be {unique_image_ids}" |
|
|
|
assert len(unique_image_ids) == len(images), f"total images must be the same as the number of image tags, got {len(unique_image_ids)} image tags and {len(images)} images" |
|
|
|
image_ids_pad = [[[-iid]*num_img_tokens[iid-1] for iid in ids] for ids in image_ids] |
|
|
|
def insert_separator(X, sep_list): |
|
if len(X) > len(sep_list): |
|
sep_list.append([]) |
|
return [ele for sublist in zip(X, sep_list) for ele in sublist] |
|
input_ids = [] |
|
for sub_prompt_chunks, sub_image_ids_pad in zip(prompt_chunks, image_ids_pad): |
|
input_ids.append([]) |
|
offset = 0 |
|
for x in insert_separator(sub_prompt_chunks, sub_image_ids_pad): |
|
input_ids[-1].extend(x[offset:]) |
|
|
|
max_length = max(len(ids) for ids in input_ids) |
|
for i in range(len(input_ids)): |
|
while len(input_ids[i]) < max_length: |
|
input_ids[i] = [self.tokenizer.pad_token_id]+input_ids[i] |
|
|
|
|
|
input_ids = torch.tensor(input_ids, dtype=torch.long).unsqueeze(0) |
|
attention_mask = (input_ids > -1000000).to(torch.long) |
|
attention_mask[input_ids == self.tokenizer.pad_token_id] = 0 |
|
|
|
return BatchFeature(data={"input_ids": input_ids, |
|
"attention_mask": attention_mask, |
|
"pixel_values": images, |
|
"image_sizes": image_sizes}) |
|
|
|
|
|
|
|
def batch_decode(self, *args, **kwargs): |
|
""" |
|
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please |
|
refer to the docstring of this method for more information. |
|
""" |
|
return self.tokenizer.batch_decode(*args, **kwargs) |
|
|
|
|
|
def decode(self, *args, **kwargs): |
|
""" |
|
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to |
|
the docstring of this method for more information. |
|
""" |
|
return self.tokenizer.decode(*args, **kwargs) |
|
|
|
@property |
|
|
|
def model_input_names(self): |
|
tokenizer_input_names = self.tokenizer.model_input_names |
|
image_processor_input_names = self.image_processor.model_input_names |
|
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) |
|
|