How to Run ONNX Model
Can you please provide a working code for running ONNX model
Hey! Can take a look here (https://huggingface.co/llava-hf/llava-interleave-qwen-0.5b-hf/discussions/5), I also adapted a bit the script from linked discussion for generations with only one batch. For bigger batch you'd need to handle padding accordingly
import onnxruntime as ort
from PIL import Image
import requests
import numpy as np
from transformers import AutoTokenizer, AutoProcessor
# Load the tokenizer and processor
tokenizer = AutoTokenizer.from_pretrained("llava-hf/llava-interleave-qwen-0.5b-hf")
processor = AutoProcessor.from_pretrained("llava-hf/llava-interleave-qwen-0.5b-hf")
# Download and load the image
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"
image = Image.open(requests.get(url, stream=True).raw)
# Prepare text input
conversation = [
{
"role": "system",
"content": "You are a helpful assistant that answers questions about images."
},
{
"role": "user",
"content": [
{"type": "text", "text": "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud"},
{"type": "image"},
],
},
]
# Apply chat template
prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
# Preprocess the image and text
inputs = processor(images=image, text=prompt, return_tensors="np")
# Load ONNX model
vision_encoder_session = ort.InferenceSession("/raid/raushan/llava-interleave-qwen-0.5b-hf/onnx/vision_encoder.onnx")
decoder_session = ort.InferenceSession("/raid/raushan/llava-interleave-qwen-0.5b-hf/onnx/decoder_model_merged.onnx")
embed_tokens_session = ort.InferenceSession("/raid/raushan/llava-interleave-qwen-0.5b-hf/onnx/embed_tokens.onnx")
# Run vision encoder
vision_input_name = vision_encoder_session.get_inputs()[0].name
vision_output_name = vision_encoder_session.get_outputs()[0].name
vision_features = vision_encoder_session.run([vision_output_name], {vision_input_name: inputs["pixel_values"]})[0]
# Tokens for the prompt
input_ids, attention_mask = inputs["input_ids"], inputs["attention_mask"]
# Prepare inputs
sequence_length = input_ids.shape[1]
batch_size = 1
num_layers = 24
head_dim = 64
num_heads = 16
pad_token_id = tokenizer.pad_token_id
past_sequence_length = 0 # Set to 0 for the initial pass
special_image_token_id = 151646
# Position IDs
position_ids = np.arange(sequence_length, dtype=np.int64).reshape(1, -1)
# Past Key Values
past_key_values = {
f"past_key_values.{i}.key": np.zeros((batch_size, num_heads, past_sequence_length, head_dim), dtype=np.float32)
for i in range(num_layers)
}
past_key_values.update({
f"past_key_values.{i}.value": np.zeros((batch_size, num_heads, past_sequence_length, head_dim), dtype=np.float32)
for i in range(num_layers)
})
# Run embed tokens
embed_input_name = embed_tokens_session.get_inputs()[0].name
embed_output_name = embed_tokens_session.get_outputs()[0].name
token_embeddings = embed_tokens_session.run([embed_output_name], {embed_input_name: input_ids})[0]
def merge_input_ids_with_image_features(image_features, inputs_embeds, input_ids, attention_mask):
num_images, num_image_patches, embed_dim = image_features.shape
batch_size, sequence_length = input_ids.shape
left_padding = not np.sum(input_ids[:, -1] == pad_token_id)
# 1. Create a mask to know where special image tokens are
special_image_token_mask = input_ids == special_image_token_id
num_special_image_tokens = np.sum(special_image_token_mask, axis=-1)
# Compute the maximum embed dimension
max_embed_dim = (num_special_image_tokens.max() * (num_image_patches - 1)) + sequence_length
batch_indices, non_image_indices = np.where(input_ids != special_image_token_id)
# 2. Compute the positions where text should be written
# Calculate new positions for text tokens in merged image-text sequence.
# `special_image_token_mask` identifies image tokens. Each image token will be replaced by `nb_text_tokens_per_images - 1` text tokens.
# `np.cumsum` computes how each image token shifts subsequent text token positions.
# - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one.
new_token_positions = np.cumsum((special_image_token_mask * (num_image_patches - 1) + 1), -1) - 1
nb_image_pad = max_embed_dim - 1 - new_token_positions[:, -1]
if left_padding:
new_token_positions += nb_image_pad[:, None] # offset for left padding
text_to_overwrite = new_token_positions[batch_indices, non_image_indices]
# 3. Create the full embedding, already padded to the maximum position
final_embedding = np.zeros((batch_size, max_embed_dim, embed_dim), dtype=np.float32)
final_attention_mask = np.zeros((batch_size, max_embed_dim), dtype=np.int64)
# 4. Fill the embeddings based on the mask. If we have ["hey" "<image>", "how", "are"]
# we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the image features
final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices]
final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices]
# 5. Fill the embeddings corresponding to the images. Anything that is not `text_positions` needs filling (#29835)
image_to_overwrite = np.full((batch_size, max_embed_dim), True)
image_to_overwrite[batch_indices, text_to_overwrite] = False
image_to_overwrite &= image_to_overwrite.cumsum(-1) - 1 >= nb_image_pad[:, None]
final_embedding[image_to_overwrite] = image_features.reshape(-1, embed_dim)
final_attention_mask = np.logical_or(final_attention_mask, image_to_overwrite).astype(final_attention_mask.dtype)
position_ids = final_attention_mask.cumsum(axis=-1) - 1
position_ids = np.where(final_attention_mask == 0, 1, position_ids)
# 6. Mask out the embedding at padding positions, as we later use the past_key_value value to determine the non-attended tokens.
batch_indices, pad_indices = np.where(input_ids == pad_token_id)
indices_to_mask = new_token_positions[batch_indices, pad_indices]
final_embedding[batch_indices, indices_to_mask] = 0
return final_embedding, final_attention_mask, position_ids
# Combine token embeddings and vision features
combined_embeddings, attention_mask, position_ids = merge_input_ids_with_image_features(vision_features, token_embeddings, input_ids, attention_mask)
combined_len = combined_embeddings.shape[1]
# Combine all inputs
decoder_inputs = {
"attention_mask": attention_mask,
"position_ids": position_ids,
"inputs_embeds": combined_embeddings,
**past_key_values
}
# Print input shapes
for name, value in decoder_inputs.items():
print(f"{name} shape: {value.shape} dtype {value.dtype}")
# Run the decoder
decoder_input_names = [input.name for input in decoder_session.get_inputs()]
decoder_output_name = decoder_session.get_outputs()[0].name
names = [n.name for n in decoder_session.get_outputs()]
outputs = decoder_session.run(names, {name: decoder_inputs[name] for name in decoder_input_names if name in decoder_inputs})
# ... (previous code remains the same until after the decoder run)
print(f"Outputs shape: {outputs[0].shape}")
print(f"Outputs type: {outputs[0].dtype}")
# Process outputs (decode tokens to text)
generated_tokens = []
eos_token_id = tokenizer.eos_token_id
max_new_tokens = 50
for i in range(max_new_tokens):
logits = outputs[0]
past_kv = outputs[1:]
logits_next_token = logits[:, -1]
token_id = np.argmax(logits_next_token)
if token_id == eos_token_id:
break
generated_tokens.append(token_id)
# Prepare input for next token generation
new_input_embeds = embed_tokens_session.run([embed_output_name], {embed_input_name: np.array([[token_id]])})[0]
past_key_values = {name.replace("present", "past_key_values"): value for name, value in zip(names[1:], outputs[1:])}
attention_mask = np.ones((1, combined_len + i + 1), dtype=np.int64)
position_ids = np.arange(combined_len + i + 1, dtype=np.int64).reshape(1, -1)[:, -1:]
decoder_inputs = {
"attention_mask": attention_mask,
"position_ids": position_ids,
"inputs_embeds": new_input_embeds,
**past_key_values
}
outputs = decoder_session.run(names, {name: decoder_inputs[name] for name in decoder_input_names if name in decoder_inputs})
# Convert to list of integers
token_ids = [int(token) for token in generated_tokens]
print(f"Generated token IDs: {token_ids}")
# Decode tokens one by one
decoded_tokens = [tokenizer.decode([token]) for token in token_ids]
print(f"Decoded tokens: {decoded_tokens}")
# Full decoded output
decoded_output = tokenizer.decode(token_ids, skip_special_tokens=True)
print(f"Full decoded output: {decoded_output}")
This is working Properly but It is working even slower then non-quantized version from transformers Can You Plz Tell how to make it faster
I am using q4f16 for all
Hmm indeed the quantized model is slower. I tried with original precision and got almost same latency for HF and ONNX. Let me see if there's any way to optimize onnx generation
q4 and q4f16 are typically faster on GPU, and I assume you are currently running on CPU?
See https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#samples for more information and example code.