chameleon-7b / app.py
merve's picture
merve HF staff
Update app.py
80fa0a9 verified
raw
history blame
3.44 kB
from transformers import ChameleonProcessor, ChameleonForConditionalGeneration, TextIteratorStreamer, BitsAndBytesConfig
import torch
from PIL import Image
import requests
import spaces
from threading import Thread
import gradio as gr
from gradio import FileData
import time
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.float16).to("cuda")
@spaces.GPU
def bot_streaming(message, history):
txt = message.text
ext_buffer = f"{txt}"
if message.files:
if len(message.files) == 1:
image = [message.files[0].path]
# interleaved images or video
elif len(message.files) > 1:
image = [msg.path for msg in message.files]
else:
def has_file_data(lst):
return any(isinstance(item, FileData) for sublist in lst if isinstance(sublist, tuple) for item in sublist)
def extract_paths(lst):
return [item.path for sublist in lst if isinstance(sublist, tuple) for item in sublist if isinstance(item, FileData)]
latest_text_only_index = -1
for i, item in enumerate(history):
if all(isinstance(sub_item, str) for sub_item in item):
latest_text_only_index = i
image = [path for i, item in enumerate(history) if i < latest_text_only_index and has_file_data(item) for path in extract_paths(item)]
if message.files is None:
gr.Error("You need to upload an image or video for LLaVA to work.")
image_extensions = Image.registered_extensions()
image_extensions = tuple([ex for ex, f in image_extensions.items()])
if len(image) == 1:
image = Image.open(image[0]).convert("RGB")
prompt = f"{message.text}<image>"
elif len(image) > 1:
image_list = []
user_prompt = message.text
for img in image:
img = Image.open(img).convert("RGB")
image_list.append(img)
toks = "<image>" * len(image_list)
prompt = user_prompt + toks
image = image_list
inputs = processor(prompt, image, return_tensors="pt").to("cuda", torch.float16)
streamer = TextIteratorStreamer(processor, {"skip_special_tokens": True})
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=250)
generated_text = ""
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
buffer = ""
for new_text in streamer:
buffer += new_text
generated_text_without_prompt = buffer#[len(ext_buffer):]
time.sleep(0.01)
yield buffer
demo = gr.ChatInterface(fn=bot_streaming, title="Chameleon 🦎", examples=[
{"text": "Where to find this monument? Can you give me other recommendations around the area?", "files":["./wat_arun.jpg"]},
{"text": "Do these two pieces belong to the same era and if so, which era is it?", "files":["./rococo_1.jpg","./rococo_2.jpg"]},
{"text": "What art style is this and which century?", "files":["./rococo_1.jpg"]},
{"text": "What is on the flower?", "files":["./bee.jpg"]}],
textbox=gr.MultimodalTextbox(file_count="multiple"),
description="Try [Chameleon-7B](https://huggingface.co/facebook/chameleon-7b) by Meta with transformers in this demo. Upload image(s), and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error. ",
stop_btn="Stop Generation", multimodal=True)
demo.launch(debug=True)