File size: 5,111 Bytes
e547b24
 
 
 
 
 
 
 
 
 
 
 
f6084c1
e547b24
 
 
 
4d6cbec
2717910
 
 
e547b24
 
2717910
4d6cbec
d7729a9
6f5a32e
e547b24
4d6cbec
e547b24
6f5a32e
2717910
4d6cbec
e547b24
 
4d6cbec
2717910
 
 
 
 
 
 
 
4d6cbec
e547b24
 
4d6cbec
e547b24
 
2717910
e547b24
2717910
 
 
e547b24
4d6cbec
e547b24
 
6f5a32e
e547b24
 
2717910
e547b24
4d6cbec
e547b24
02f8cfa
4d6cbec
02f8cfa
 
73f7edc
e547b24
 
4d6cbec
02f8cfa
4d6cbec
d7729a9
2717910
4d6cbec
02f8cfa
4d6cbec
02f8cfa
 
 
d7729a9
2717910
4d6cbec
02f8cfa
 
bc84ac0
4d6cbec
 
 
02f8cfa
 
 
4d6cbec
 
e547b24
4d6cbec
02f8cfa
 
2717910
4d6cbec
02f8cfa
 
2717910
4d6cbec
 
e547b24
4d6cbec
2717910
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import gradio as gr
import requests
import io
import random
import os
import time
from PIL import Image
from deep_translator import GoogleTranslator
import json

# Project by Nymbo

API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA"
API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
timeout = 100

# Function to query the API and return the generated image
def query(prompt, negative_prompt, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
    if not prompt:
        raise gr.Error("Prompt cannot be empty.")

    key = random.randint(0, 999)

    # Translate the prompt from Russian to English if necessary
    prompt = GoogleTranslator(source='my', target='en').translate(prompt)
    print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')

    # Add some extra flair to the prompt
    prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
    print(f'\033[1mGeneration {key}:\033[0m {prompt}')

    # Prepare the payload for the API call, including width and height
    payload = {
        "inputs": prompt,
        "parameters": {
            "negative_prompt": negative_prompt,
            "steps": steps,
            "cfg_scale": cfg_scale,
            "sampler": sampler,
            "seed": seed if seed != -1 else random.randint(1, 1000000000),
            "strength": strength,
            "width": width,
            "height": height
        }
    }

    # Send the request to the API and handle the response
    response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
    if response.status_code != 200:
        error_message = f"Error: Failed to get image. Response status: {response.status_code}"
        if response.status_code == 503:
            error_message += " - The model is being loaded"
        raise gr.Error(error_message)

    try:
        # Convert the response content into an image
        image_bytes = response.content
        image = Image.open(io.BytesIO(image_bytes))
        print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
        return image
    except Exception as e:
        raise gr.Error(f"Error when trying to open the image: {e}")

# CSS to style the app
css = """
#app-container {
    max-width: 800px;
    margin-left: auto;
    margin-right: auto;
}
"""

# Build the Gradio UI with Blocks
with gr.Blocks(theme='Nymbo/Nymbo_Theme', css=css) as app:
    # Add a title to the app
    gr.HTML("<center><h1>Walone AI Image Pro Large</h1></center>")

    # Container for all the UI elements
    with gr.Column(elem_id="app-container"):
        # Add a text input for the main prompt
        with gr.Row():
            with gr.Column(elem_id="prompt-container"):
                with gr.Row():
                    text_prompt = gr.Textbox(label="Prompt", placeholder="Prompt စာသားရေးပါ", lines=2, elem_id="prompt-text-input")

                # Accordion for advanced settings
                with gr.Row():
                    with gr.Accordion("Advanced Settings", open=False):
                        negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", lines=3, elem_id="negative-prompt-text-input")
                        with gr.Row():
                            width = gr.Slider(label="Width", value=1024, minimum=64, maximum=1216, step=32)
                            height = gr.Slider(label="Height", value=1024, minimum=64, maximum=1216, step=32)
                        steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
                        cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
                        strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
                        seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1) # Setting the seed to -1 will make it random
                        method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])

        # Add a button to trigger the image generation
        with gr.Row():
            text_button = gr.Button("Run", variant='primary', elem_id="gen-button")

        # Image output area to display the generated image
        with gr.Row():
            image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")

        # Bind the button to the query function with the added width and height inputs
        text_button.click(query, inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)

# Launch the Gradio app
app.launch(show_api=False, share=False)