Ligeng-Zhu's picture
update
0676163
raw
history blame
6 kB
import gradio as gr
import os, os.path as osp
import time
import glob
import cv2
from PIL import Image
import hashlib
import shutil
import os, sys, os.path as osp
import csv
import random
import json
from huggingface_hub import HfApi, repo_exists, file_exists
from huggingface_hub.hf_api import CommitOperationAdd
def calc_file_md5(fpath, max_digits=6):
with open(fpath, "rb") as f:
file_hash = hashlib.md5()
while chunk := f.read(8192):
file_hash.update(chunk)
return file_hash.hexdigest()[:max_digits]
def string_to_md5(string, max_digits=6):
return hashlib.md5(string.encode()).hexdigest()[:max_digits]
finfo = [
json.load(open("f1/coyo25m-0-000000.tar.json")),
json.load(open("f2/coyo25m-0-000000.tar.json")),
json.load(open("f3/coyo25m-0-000000.tar.json")),
json.load(open("f3/coyo25m-0-000000.tar.json")),
]
keys = list(finfo[0].keys())
if not os.path.exists("keys.txt"):
with open("keys.txt", "w") as f:
f.write("\n".join(keys))
else:
with open("keys.txt", "r") as f:
keys = list(f.read().split("\n"))
api = HfApi()
def get_random_caption(k):
indexs = random.sample(list(range(5)), k=2)
output = []
idxs = []
for i in indexs:
if i == 4:
output.append(finfo[0][k]["orig_text"])
else:
output.append(finfo[i][k]["output"])
idxs.append(i)
return output, idxs
def load_image(idx):
k = keys[idx]
infos, indexs = get_random_caption(k)
return k, f"{k}", infos[0], infos[1], str(indexs), None, None
def random_image(idx):
k = random.choice(keys)
index = keys.index(k)
infos, indexs = get_random_caption(k)
return k, index, f"{k}", infos[0], infos[1], str(indexs), None, None
def save_labeling(url, cap1, cap2, labeler, caption_source, rate1, rate2):
os.makedirs("flagged", exist_ok=True)
output_info = {
"url": url,
"cap1": cap1,
"cap2": cap2,
"rate-details": rate1,
"rate-halluication": rate2,
"caption_source": caption_source,
"labeler": labeler,
}
# print(url)
lid = (
labeler.replace(" ", "_").replace("@", "_").replace(".", "_").replace("/", "-")
)
# output_path = osp.join(f"flagged", url.replace("/", "--") + f".{lid}.json")
output_path = osp.join(
f"flagged", "md5-" + string_to_md5(url, max_digits=12) + f".{lid}.json"
)
with open(output_path, "w") as fp:
json.dump(output_info, fp, indent=2)
if "RUNNING_ON_SPACE" in os.environ:
if not api.repo_exists(
"Efficient-Large-Model/VILA-S-Human-Test", repo_type="dataset"
):
api.create_repo(
"Efficient-Large-Model/VILA-S-Human-Test",
repo_type="dataset",
private=True,
)
operation = CommitOperationAdd(
path_or_fileobj=output_path,
path_in_repo=osp.basename(output_path),
)
print("uploading ", output_path)
commit_info = api.create_commit(
repo_id="Efficient-Large-Model/VILA-S-Human-Test",
repo_type="dataset",
operations=[
operation,
],
commit_message=f"update {output_path}",
)
output_path = commit_info
return output_path + "\n" + json.dumps(output_info, indent=2)
with gr.Blocks(
title="VILA Video Benchmark",
) as demo:
with gr.Row():
with gr.Column(scale=2):
image_input = gr.Image(
label="Video Preview ",
# height=320,
# width=480,
value="https://github.com/NVlabs/VILA/raw/main/demo_images/vila-logo.jpg",
)
with gr.Column(scale=1):
slider = gr.Slider(maximum=len(keys), label="Video Index", value=0)
gr.Markdown("## Step-0, put in your name")
labeler = gr.Text(
value="placeholder",
label="Labeler ID (your name or email)",
interactive=True,
)
logging = gr.Markdown(label="Logging info")
with gr.Row():
with gr.Column():
gr.Markdown("## Step-1, randomly pick a image")
random_img = gr.Button(value="Random Image", variant="primary")
with gr.Column(scale=3):
gr.Markdown("## Step-2, randomly pick a image")
with gr.Row():
r1 = gr.Radio(
choices=["Left better", "Tie", "Right better"], label="Detailness"
)
r2 = gr.Radio(
choices=["Left better", "Tie", "Right better"], label="Halluciation"
)
with gr.Column():
gr.Markdown("## Step-3, submit the results")
submit = gr.Button(value="submit", variant="stop")
with gr.Row():
gr.Markdown(
"### Warning: if you find two caption identical, please skip and evaluate next"
)
with gr.Row():
vcap1 = gr.Textbox(label="Anoymous Caption 1")
vcap2 = gr.Textbox(label="Anoymous Caption 2")
cap_res = gr.Textbox(label="Caption Saving Results")
caption_source = gr.Textbox(label="Temp Info", visible=False)
from functools import partial
submit.click(
save_labeling,
inputs=[logging, vcap1, vcap2, labeler, caption_source, r1, r2],
outputs=[cap_res],
)
slider.change(
load_image,
inputs=[slider],
outputs=[image_input, logging, vcap1, vcap2, caption_source, r1, r2],
)
random_img.click(
random_image,
inputs=[random_img],
outputs=[image_input, slider, logging, vcap1, vcap2, caption_source, r1, r2],
)
# btn_save.click(
# save_labeling,
# inputs=[video_path, _vtag, _vcap, vtag, vcap, uid],
# outputs=[
# cap_res,
# ],
# )
demo.queue()
if __name__ == "__main__":
demo.launch()