from typing import List, Optional, Tuple
from PIL import Image
from playwright.sync_api import sync_playwright
import os
import gradio as gr
from gradio_client.client import DEFAULT_TEMP_DIR
from transformers import AutoProcessor, AutoModelForCausalLM
API_TOKEN = os.getenv("HF_AUTH_TOKEN")
# PROCESSOR = AutoProcessor.from_pretrained(
# "HuggingFaceM4/img2html",
# token=API_TOKEN,
# )
IMAGE_GALLERY_PATHS = [
f"example_images/{ex_image}"
for ex_image in os.listdir(f"example_images")
]
def add_file_gallery(selected_state: gr.SelectData, gallery_list: List[str]):
# return (
# f"example_images/{gallery_list.root[selected_state.index].image.orig_name}",
# "",
# )
return f"example_images/{gallery_list.root[selected_state.index].image.orig_name}"
def expand_layout():
return gr.Column(scale=2), gr.Textbox()
def render_webpage(
html_css_code,
):
with sync_playwright() as p:
browser = p.chromium.launch(headless=True)
context = browser.new_context(
user_agent=(
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0"
" Safari/537.36"
)
)
page = context.new_page()
page.set_content(html_css_code)
page.wait_for_load_state("networkidle")
output_path_screenshot = f"{DEFAULT_TEMP_DIR}/{hash(html_css_code)}.png"
page.screenshot(path=output_path_screenshot, full_page=True)
context.close()
browser.close()
return Image.open(output_path_screenshot)
def model_inference(
image,
):
CAR_COMPNAY = """
XYZ Car Company
Our Cars
Model A
Description of Model A.
Model B
Description of Model B.
"""
rendered_page = render_webpage(CAR_COMPNAY)
return CAR_COMPNAY, rendered_page
# textbox = gr.Textbox(
# placeholder="Upload an image and ask the AI to create a meme!",
# show_label=False,
# value="Write a meme about this image.",
# visible=True,
# container=False,
# label="Text input",
# scale=8,
# max_lines=5,
# )
generated_html = gr.Textbox(
label="IDEFICS Generated HTML",
elem_id="generated_html",
)
rendered_html = gr.Image(
)
css = """
.gradio-container{max-width: 1000px!important}
h1{display: flex;align-items: center;justify-content: center;gap: .25em}
*{transition: width 0.5s ease, flex-grow 0.5s ease}
"""
with gr.Blocks(title="Img2html", theme=gr.themes.Base(), css=css) as demo:
with gr.Row(equal_height=True):
# scale=2 when expanded
with gr.Column(scale=4, min_width=250) as upload_area:
imagebox = gr.Image(
type="filepath", label="Image to HTML", height=272, visible=True, sources=["upload", "clipboard"],
)
with gr.Group():
with gr.Row():
submit_btn = gr.Button(
value="▶️ Submit", visible=True, min_width=120
)
clear_btn = gr.ClearButton(
[imagebox, generated_html, rendered_html], value="🧹 Clear", min_width=120
)
regenerate_btn = gr.Button(
value="🔄 Regenerate", visible=True, min_width=120
)
with gr.Column(scale=5) as result_area:
rendered_html.render()
with gr.Row():
generated_html.render()
with gr.Row(equal_height=True):
template_gallery = gr.Gallery(
value=IMAGE_GALLERY_PATHS,
label="Templates Gallery",
allow_preview=False,
columns=4,
elem_id="gallery",
show_share_button=False,
height=400,
)
gr.on(
triggers=[
imagebox.upload,
submit_btn.click,
template_gallery.select,
regenerate_btn.click,
],
fn=model_inference,
inputs=[
imagebox,
],
outputs=[generated_html, rendered_html],
queue=False,
)
regenerate_btn.click(
fn=model_inference,
inputs=[
imagebox,
],
outputs=[generated_html, rendered_html],
queue=False,
)
template_gallery.select(
fn=add_file_gallery,
inputs=[template_gallery],
outputs=[imagebox],
queue=False,
)
demo.load(
# fn=choose_gallery,
# inputs=[gallery_type_choice],
# outputs=[template_gallery],
queue=False,
)
demo.queue(max_size=40, api_open=False)
demo.launch(max_threads=400)