File size: 4,297 Bytes
e547b24 db53b0f e547b24 b2796f9 8b1e228 e547b24 b2796f9 d7729a9 6f5a32e e547b24 6f5a32e b2796f9 e547b24 b2796f9 e547b24 8b1e228 e547b24 8b1e228 b2796f9 e547b24 6f5a32e e547b24 8b1e228 e547b24 02f8cfa 8b1e228 02f8cfa 73f7edc e547b24 02f8cfa 8b1e228 02f8cfa 8b1e228 02f8cfa 8b1e228 bc84ac0 8b1e228 02f8cfa 4d6cbec 8b1e228 b2796f9 e547b24 02f8cfa 2717910 b2796f9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
import gradio as gr
import requests
import io
import random
import os
import time
from PIL import Image
from deep_translator import GoogleTranslator
import json
# Project by Nymbo
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
timeout = 100
def query(prompt, negative_prompt, steps, cfg_scale, sampler, seed, strength, width, height):
if prompt == "" or prompt is None:
return None
key = random.randint(0, 999)
prompt = GoogleTranslator(source='my', target='en').translate(prompt)
print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
print(f'\033[1mGeneration {key}:\033[0m {prompt}')
payload = {
"inputs": prompt,
"parameters": {
"negative_prompt": negative_prompt,
"steps": steps,
"cfg_scale": cfg_scale,
"sampler": sampler,
"seed": seed if seed != -1 else random.randint(1, 1000000000),
"strength": strength,
"width": width,
"height": height
}
}
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
if response.status_code != 200:
print(f"Error: Failed to get image. Response status: {response.status_code}")
print(f"Response content: {response.text}")
if response.status_code == 503:
raise gr.Error(f"{response.status_code} : The model is being loaded")
raise gr.Error(f"{response.status_code}")
try:
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
return image
except Exception as e:
print(f"Error when trying to open the image: {e}")
return None
css = """
#app-container {
max-width: 600px;
margin-left: auto;
margin-right: auto;
}
"""
with gr.Blocks(theme='Nymbo/Nymbo_Theme', css=css) as app:
gr.HTML("<center><h1>Walone AI Image Stable</h1></center>")
with gr.Column(elem_id="app-container"):
with gr.Row():
with gr.Column(elem_id="prompt-container"):
with gr.Row():
text_prompt = gr.Textbox(label="Prompt ရေးရန်", placeholder="ဒီနေရာမှာ prompt ရေးပါ", lines=2, elem_id="prompt-text-input")
with gr.Row():
with gr.Accordion("အဆင့်မြင့် Settings", open=False):
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", lines=3, elem_id="negative-prompt-text-input")
steps = gr.Slider(label="Sampling steps", value=4, minimum=1, maximum=100, step=1)
cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=64)
height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=64)
with gr.Row():
text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
with gr.Row():
image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
text_button.click(query, inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
app.launch(show_api=False, share=True)
|