|
import gradio as gr |
|
from urllib.parse import urlparse |
|
import requests |
|
import time |
|
import os |
|
|
|
from utils.gradio_helpers import parse_outputs, process_outputs |
|
|
|
inputs = [] |
|
inputs.append(gr.Textbox( |
|
label="Prompt", info='''Describe the subject. Include clothes and hairstyle for more consistency.''' |
|
)) |
|
|
|
inputs.append(gr.Textbox( |
|
label="Negative Prompt", info='''Things you do not want to see in your image''' |
|
)) |
|
|
|
inputs.append(gr.Image( |
|
label="Subject", type="filepath" |
|
)) |
|
|
|
inputs.append(gr.Slider( |
|
label="Number Of Outputs", info='''The number of images to generate.''', value=3, |
|
minimum=1, maximum=20, step=1, |
|
)) |
|
|
|
inputs.append(gr.Slider( |
|
label="Number Of Images Per Pose", info='''The number of images to generate for each pose.''', value=1, |
|
minimum=1, maximum=4, step=1, |
|
)) |
|
|
|
inputs.append(gr.Checkbox( |
|
label="Randomise Poses", info='''Randomise the poses used.''', value=True |
|
)) |
|
|
|
inputs.append(gr.Dropdown( |
|
choices=['webp', 'jpg', 'png'], label="output_format", info='''Format of the output images''', value="webp" |
|
)) |
|
|
|
inputs.append(gr.Number( |
|
label="Output Quality", info='''Quality of the output images, from 0 to 100. 100 is best quality, 0 is lowest quality.''', value=80 |
|
)) |
|
|
|
inputs.append(gr.Number( |
|
label="Seed", info='''Set a seed for reproducibility. Random by default.''', value=None |
|
)) |
|
|
|
names = ['prompt', 'negative_prompt', 'subject', 'number_of_outputs', 'number_of_images_per_pose', 'randomise_poses', 'output_format', 'output_quality', 'seed'] |
|
|
|
outputs = [] |
|
outputs.append(gr.Image()) |
|
outputs.append(gr.Image()) |
|
outputs.append(gr.Image()) |
|
outputs.append(gr.Image()) |
|
outputs.append(gr.Image()) |
|
|
|
expected_outputs = len(outputs) |
|
def predict(request: gr.Request, *args, progress=gr.Progress(track_tqdm=True)): |
|
headers = {'Content-Type': 'application/json'} |
|
|
|
payload = {"input": {}} |
|
|
|
|
|
base_url = "http://0.0.0.0:7860" |
|
for i, key in enumerate(names): |
|
value = args[i] |
|
if value and (os.path.exists(str(value))): |
|
value = f"{base_url}/file=" + value |
|
if value is not None and value != "": |
|
payload["input"][key] = value |
|
|
|
response = requests.post("http://0.0.0.0:5000/predictions", headers=headers, json=payload) |
|
|
|
|
|
if response.status_code == 201: |
|
follow_up_url = response.json()["urls"]["get"] |
|
response = requests.get(follow_up_url, headers=headers) |
|
while response.json()["status"] != "succeeded": |
|
if response.json()["status"] == "failed": |
|
raise gr.Error("The submission failed!") |
|
response = requests.get(follow_up_url, headers=headers) |
|
time.sleep(1) |
|
if response.status_code == 200: |
|
json_response = response.json() |
|
|
|
if(outputs[0].get_config()["name"] == "json"): |
|
return json_response["output"] |
|
predict_outputs = parse_outputs(json_response["output"]) |
|
processed_outputs = process_outputs(predict_outputs) |
|
difference_outputs = expected_outputs - len(processed_outputs) |
|
|
|
if difference_outputs > 0: |
|
extra_outputs = [gr.update(visible=False)] * difference_outputs |
|
processed_outputs.extend(extra_outputs) |
|
|
|
elif difference_outputs < 0: |
|
processed_outputs = processed_outputs[:difference_outputs] |
|
|
|
return tuple(processed_outputs) if len(processed_outputs) > 1 else processed_outputs[0] |
|
else: |
|
if(response.status_code == 409): |
|
raise gr.Error(f"Sorry, the Cog image is still processing. Try again in a bit.") |
|
raise gr.Error(f"The submission failed! Error: {response.status_code}") |
|
|
|
title = "Demo for consistent-character cog image by fofr" |
|
model_description = "Create images of a given character in different poses" |
|
|
|
app = gr.Interface( |
|
fn=predict, |
|
inputs=inputs, |
|
outputs=outputs, |
|
title=title, |
|
description=model_description, |
|
allow_flagging="never", |
|
) |
|
app.launch(share=True) |
|
|
|
|