Spaces:
Runtime error
Runtime error
Commit
•
ddfadf6
1
Parent(s):
cacfcbe
This PR simplifies the display, uses uppercase and highlights the prompts (#1)
Browse files- This PR simplifies the display, uses uppercase and highlights the prompts (2f25a45d3a4585e4469198c6dd9b8189a47e88c3)
Co-authored-by: Fabrice TIERCELIN <[email protected]>
app.py
CHANGED
@@ -1,80 +1,77 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import sys
|
3 |
-
from utils import load_lora_model
|
4 |
-
from bg_alpha import adjust_transparency
|
5 |
-
from lineart import get_pipe
|
6 |
-
import os
|
7 |
-
from PIL import Image
|
8 |
-
import spaces
|
9 |
-
|
10 |
-
path = os.getcwd()
|
11 |
-
output_dir = f"{path}/output"
|
12 |
-
lora_dir = f"{path}/models/lora"
|
13 |
-
|
14 |
-
load_lora_model(lora_dir)
|
15 |
-
|
16 |
-
pipe = get_pipe(lora_dir)
|
17 |
-
|
18 |
-
@spaces.GPU()
|
19 |
-
def generate(prompt, negative_prompt):
|
20 |
-
default_pos = "((white background)), lineart, <lora:sdxl_BWLine:1.0>, monochrome, "
|
21 |
-
default_neg = ""
|
22 |
-
prompt = default_pos + prompt
|
23 |
-
negative_prompt = default_neg + negative_prompt
|
24 |
-
|
25 |
-
width, height = 1024, 1024
|
26 |
-
color = (255, 255, 255)
|
27 |
-
white_bg = Image.new("RGB", (width, height), color)
|
28 |
-
|
29 |
-
image = pipe(
|
30 |
-
prompt=prompt,
|
31 |
-
negative_prompt = negative_prompt,
|
32 |
-
image=[white_bg],
|
33 |
-
num_inference_steps=50,
|
34 |
-
controlnet_conditioning_scale=[0.1]
|
35 |
-
).images[0]
|
36 |
-
|
37 |
-
return image
|
38 |
-
|
39 |
-
|
40 |
-
class webui:
|
41 |
-
def __init__(self):
|
42 |
-
self.demo = gr.Blocks()
|
43 |
-
|
44 |
-
def process(self, pos_prompt, neg_prompt):
|
45 |
-
image = generate(pos_prompt, neg_prompt)
|
46 |
-
image = adjust_transparency(image)
|
47 |
-
return [image]
|
48 |
-
|
49 |
-
def launch(self, share):
|
50 |
-
with self.demo:
|
51 |
-
with gr.Column():
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
if
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
ui.launch(share=False)
|
79 |
-
else:
|
80 |
-
ui.launch(share=False)
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import sys
|
3 |
+
from utils import load_lora_model
|
4 |
+
from bg_alpha import adjust_transparency
|
5 |
+
from lineart import get_pipe
|
6 |
+
import os
|
7 |
+
from PIL import Image
|
8 |
+
import spaces
|
9 |
+
|
10 |
+
path = os.getcwd()
|
11 |
+
output_dir = f"{path}/output"
|
12 |
+
lora_dir = f"{path}/models/lora"
|
13 |
+
|
14 |
+
load_lora_model(lora_dir)
|
15 |
+
|
16 |
+
pipe = get_pipe(lora_dir)
|
17 |
+
|
18 |
+
@spaces.GPU()
|
19 |
+
def generate(prompt, negative_prompt):
|
20 |
+
default_pos = "((white background)), lineart, <lora:sdxl_BWLine:1.0>, monochrome, "
|
21 |
+
default_neg = ""
|
22 |
+
prompt = default_pos + prompt
|
23 |
+
negative_prompt = default_neg + negative_prompt
|
24 |
+
|
25 |
+
width, height = 1024, 1024
|
26 |
+
color = (255, 255, 255)
|
27 |
+
white_bg = Image.new("RGB", (width, height), color)
|
28 |
+
|
29 |
+
image = pipe(
|
30 |
+
prompt=prompt,
|
31 |
+
negative_prompt = negative_prompt,
|
32 |
+
image=[white_bg],
|
33 |
+
num_inference_steps=50,
|
34 |
+
controlnet_conditioning_scale=[0.1]
|
35 |
+
).images[0]
|
36 |
+
|
37 |
+
return image
|
38 |
+
|
39 |
+
|
40 |
+
class webui:
|
41 |
+
def __init__(self):
|
42 |
+
self.demo = gr.Blocks()
|
43 |
+
|
44 |
+
def process(self, pos_prompt, neg_prompt):
|
45 |
+
image = generate(pos_prompt, neg_prompt)
|
46 |
+
image = adjust_transparency(image)
|
47 |
+
return [image]
|
48 |
+
|
49 |
+
def launch(self, share):
|
50 |
+
with self.demo:
|
51 |
+
with gr.Column():
|
52 |
+
pos_prompt = gr.Textbox(value="1girl, cute, kawaii, medium breasts, medium hair, smile, mini skirt, best quality, very aesthetic,", max_lines=1000, label="Positive prompt")
|
53 |
+
neg_prompt = gr.Textbox(value="bold line, multiple people,", max_lines=1000, label="Negative prompt")
|
54 |
+
|
55 |
+
submit = gr.Button(value="Start", variant="primary")
|
56 |
+
|
57 |
+
output_0 = gr.Image(label="Output", format="png")
|
58 |
+
|
59 |
+
submit.click(
|
60 |
+
self.process,
|
61 |
+
inputs=[pos_prompt, neg_prompt], #[input_image, pos_prompt, neg_prompt, alpha_th, thickness, reference_image],
|
62 |
+
outputs=[output_0]
|
63 |
+
)
|
64 |
+
|
65 |
+
self.demo.queue()
|
66 |
+
self.demo.launch(share=share)
|
67 |
+
|
68 |
+
|
69 |
+
if __name__ == "__main__":
|
70 |
+
ui = webui()
|
71 |
+
if len(sys.argv) > 1:
|
72 |
+
if sys.argv[1] == "share":
|
73 |
+
ui.launch(share=True)
|
74 |
+
else:
|
75 |
+
ui.launch(share=False)
|
76 |
+
else:
|
77 |
+
ui.launch(share=False)
|
|
|
|
|
|