Deadmon commited on
Commit
69dc758
1 Parent(s): b8d4b4e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -179
app.py CHANGED
@@ -34,6 +34,7 @@ function refresh() {
34
  }
35
  }
36
  """
 
37
  def nms(x, t, s):
38
  x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
39
 
@@ -69,7 +70,8 @@ def HWC3(x):
69
  y = y.clip(0, 255).astype(np.uint8)
70
  return y
71
 
72
- DESCRIPTION = '''
 
73
  '''
74
 
75
  if not torch.cuda.is_available():
@@ -132,17 +134,14 @@ styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
132
  STYLE_NAMES = list(styles.keys())
133
  DEFAULT_STYLE_NAME = "(No style)"
134
 
135
-
136
  def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]:
137
  p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
138
  return p.replace("{prompt}", positive), n + negative
139
 
140
-
141
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
142
 
143
  eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler")
144
 
145
-
146
  controlnet = ControlNetModel.from_pretrained(
147
  "xinsir/controlnet-scribble-sdxl-1.0",
148
  torch_dtype=torch.float16
@@ -174,181 +173,98 @@ pipe_canny = StableDiffusionXLControlNetPipeline.from_pretrained(
174
 
175
  pipe_canny.to(device)
176
 
177
- MAX_SEED = np.iinfo(np.int32).max
178
- processor = HEDdetector.from_pretrained('lllyasviel/Annotators')
179
- def nms(x, t, s):
180
- x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
181
-
182
- f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
183
- f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
184
- f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
185
- f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
186
-
187
- y = np.zeros_like(x)
188
-
189
- for f in [f1, f2, f3, f4]:
190
- np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
191
-
192
- z = np.zeros_like(y, dtype=np.uint8)
193
- z[y > t] = 255
194
- return z
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
 
196
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
197
- if randomize_seed:
198
- seed = random.randint(0, MAX_SEED)
199
- return seed
200
-
201
- @spaces.GPU
202
- def run(
203
- image: PIL.Image.Image,
204
- prompt: str,
205
- negative_prompt: str,
206
- style_name: str = DEFAULT_STYLE_NAME,
207
- num_steps: int = 25,
208
- guidance_scale: float = 5,
209
- controlnet_conditioning_scale: float = 1.0,
210
- seed: int = 0,
211
- use_hed: bool = False,
212
- use_canny: bool = False,
213
- progress=gr.Progress(track_tqdm=True),
214
- ) -> PIL.Image.Image:
215
- width, height = image['composite'].size
216
- ratio = np.sqrt(1024. * 1024. / (width * height))
217
- new_width, new_height = int(width * ratio), int(height * ratio)
218
- image = image['composite'].resize((new_width, new_height))
219
-
220
- if use_canny:
221
- controlnet_img = np.array(image)
222
- controlnet_img = cv2.Canny(controlnet_img, 100, 200)
223
- controlnet_img = HWC3(controlnet_img)
224
- image = Image.fromarray(controlnet_img)
225
-
226
- elif not use_hed:
227
- controlnet_img = image
228
- else:
229
- controlnet_img = processor(image, scribble=False)
230
- # following is some processing to simulate human sketch draw, different threshold can generate different width of lines
231
- controlnet_img = np.array(controlnet_img)
232
- controlnet_img = nms(controlnet_img, 127, 3)
233
- controlnet_img = cv2.GaussianBlur(controlnet_img, (0, 0), 3)
234
-
235
- # higher threshold, thiner line
236
- random_val = int(round(random.uniform(0.01, 0.10), 2) * 255)
237
- controlnet_img[controlnet_img > random_val] = 255
238
- controlnet_img[controlnet_img < 255] = 0
239
- image = Image.fromarray(controlnet_img)
240
-
241
-
242
- prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
243
-
244
- generator = torch.Generator(device=device).manual_seed(seed)
245
- if use_canny:
246
- out = pipe_canny(
247
- prompt=prompt,
248
- negative_prompt=negative_prompt,
249
- image=image,
250
- num_inference_steps=num_steps,
251
- generator=generator,
252
- controlnet_conditioning_scale=controlnet_conditioning_scale,
253
- guidance_scale=guidance_scale,
254
- width=new_width,
255
- height=new_height,
256
- ).images[0]
257
- else:
258
- out = pipe(
259
- prompt=prompt,
260
- negative_prompt=negative_prompt,
261
- image=image,
262
- num_inference_steps=num_steps,
263
- generator=generator,
264
- controlnet_conditioning_scale=controlnet_conditioning_scale,
265
- guidance_scale=guidance_scale,
266
- width=new_width,
267
- height=new_height,).images[0]
268
-
269
- return (controlnet_img, out)
270
-
271
-
272
- with gr.Blocks(css="style.css", js=js_func) as demo:
273
- gr.Markdown(DESCRIPTION, elem_id="description")
274
- gr.DuplicateButton(
275
- value="Duplicate Space for private use",
276
- elem_id="duplicate-button",
277
- visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
278
- )
279
-
280
- with gr.Row():
281
- with gr.Column():
282
- with gr.Group():
283
- image = gr.ImageEditor(type="pil", image_mode="L", crop_size=(512, 512))
284
- prompt = gr.Textbox(label="Prompt")
285
- style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
286
- use_hed = gr.Checkbox(label="use HED detector", value=False, info="check this box if you upload an image and want to turn it to a sketch")
287
- use_canny = gr.Checkbox(label="use Canny", value=False, info="check this to use ControlNet canny instead of scribble")
288
- run_button = gr.Button("Run")
289
- with gr.Accordion("Advanced options", open=False):
290
- negative_prompt = gr.Textbox(
291
- label="Negative prompt",
292
- value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
293
- )
294
- num_steps = gr.Slider(
295
- label="Number of steps",
296
- minimum=1,
297
- maximum=50,
298
- step=1,
299
- value=25,
300
- )
301
- guidance_scale = gr.Slider(
302
- label="Guidance scale",
303
- minimum=0.1,
304
- maximum=10.0,
305
- step=0.1,
306
- value=5,
307
- )
308
- controlnet_conditioning_scale = gr.Slider(
309
- label="controlnet conditioning scale",
310
- minimum=0.5,
311
- maximum=5.0,
312
- step=0.1,
313
- value=0.9,
314
- )
315
- seed = gr.Slider(
316
- label="Seed",
317
- minimum=0,
318
- maximum=MAX_SEED,
319
- step=1,
320
- value=0,
321
- )
322
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
323
-
324
- with gr.Column():
325
- with gr.Group():
326
- image_slider = ImageSlider(position=0.5)
327
-
328
-
329
- inputs = [
330
- image,
331
- prompt,
332
- negative_prompt,
333
- style,
334
- num_steps,
335
- guidance_scale,
336
- controlnet_conditioning_scale,
337
- seed,
338
- use_hed,
339
- use_canny
340
- ]
341
- outputs = [image_slider]
342
- run_button.click(
343
- fn=randomize_seed_fn,
344
- inputs=[seed, randomize_seed],
345
- outputs=seed,
346
- queue=False,
347
- api_name=False,
348
- ).then(lambda x: None, inputs=None, outputs=image_slider).then(
349
- fn=run, inputs=inputs, outputs=outputs
350
- )
351
-
352
 
 
 
 
353
 
354
- demo.queue().launch()
 
 
 
34
  }
35
  }
36
  """
37
+
38
  def nms(x, t, s):
39
  x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
40
 
 
70
  y = y.clip(0, 255).astype(np.uint8)
71
  return y
72
 
73
+ DESCRIPTION = '''# Scribble SDXL 🖋️🌄
74
+ sketch to image with SDXL, using [@xinsir](https://huggingface.co/xinsir) [scribble sdxl controlnet](https://huggingface.co/xinsir/controlnet-scribble-sdxl-1.0), [sdxl controlnet canny](https://huggingface.co/xinsir/controlnet-canny-sdxl-1.0)
75
  '''
76
 
77
  if not torch.cuda.is_available():
 
134
  STYLE_NAMES = list(styles.keys())
135
  DEFAULT_STYLE_NAME = "(No style)"
136
 
 
137
  def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]:
138
  p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
139
  return p.replace("{prompt}", positive), n + negative
140
 
 
141
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
142
 
143
  eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler")
144
 
 
145
  controlnet = ControlNetModel.from_pretrained(
146
  "xinsir/controlnet-scribble-sdxl-1.0",
147
  torch_dtype=torch.float16
 
173
 
174
  pipe_canny.to(device)
175
 
176
+ MAX_IMAGE_PIXELS = 100000000 # Adjust if needed.
177
+
178
+ def resize_image(image, max_pixels=MAX_IMAGE_PIXELS):
179
+ """Resize an image to have at most max_pixels, maintaining aspect ratio."""
180
+ width, height = image.size
181
+ if width * height > max_pixels:
182
+ scale_factor = (max_pixels / (width * height)) ** 0.5
183
+ new_size = (int(width * scale_factor), int(height * scale_factor))
184
+ return image.resize(new_size, Image.ANTIALIAS)
185
+ return image
186
+
187
+ def process(image, prompt, style, detector_name):
188
+ # Convert image to RGB mode if it's not already
189
+ if image.mode != 'RGB':
190
+ image = image.convert('RGB')
191
+ image = resize_image(image)
192
+
193
+ width, height = image.size
194
+
195
+ prompt, negative_prompt = apply_style(style, prompt)
196
+
197
+ if detector_name == "hed":
198
+ image = HWC3(np.array(image, dtype=np.uint8))
199
+ with torch.no_grad():
200
+ detected_map = hed(image, scribble=True)
201
+ detected_map = HWC3(detected_map)
202
+ image = Image.fromarray(detected_map)
203
+ images = pipe(prompt, negative_prompt=negative_prompt, image=image, height=height, width=width).images
204
+ return images[0]
205
+ elif detector_name == "scribble":
206
+ image = HWC3(np.array(image, dtype=np.uint8))
207
+ with torch.no_grad():
208
+ detected_map = nms(image, 127, 3.0)
209
+ detected_map = HWC3(detected_map)
210
+ image = Image.fromarray(detected_map)
211
+ images = pipe(prompt, negative_prompt=negative_prompt, image=image, height=height, width=width).images
212
+ return images[0]
213
+ elif detector_name == "canny":
214
+ image = np.array(image, dtype=np.uint8)
215
+ image = cv2.Canny(image, 100, 200)
216
+ image = image[:, :, None]
217
+ image = np.concatenate([image, image, image], axis=2)
218
+ detected_map = image
219
+ image = Image.fromarray(detected_map)
220
+ images = pipe_canny(prompt, negative_prompt=negative_prompt, image=image, height=height, width=width).images
221
+ return images[0]
222
+
223
+ block_css = (
224
+ code := """
225
+ #image_upload {
226
+ height: 100% !important;
227
+ }
228
+ #prompt_input {
229
+ height: 100% !important;
230
+ }
231
+ #select_style {
232
+ height: 100% !important;
233
+ }
234
+ #detect_method {
235
+ height: 100% !important;
236
+ }
237
+ #submit_button {
238
+ height: 100% !important;
239
+ }
240
+ """
241
+ )
242
 
243
+ def create_demo():
244
+ """Create Gradio demo."""
245
+
246
+ with gr.Blocks(css=block_css) as demo:
247
+ gr.Markdown(DESCRIPTION)
248
+ with gr.Row():
249
+ with gr.Column():
250
+ input_image = gr.Image(source='upload', elem_id="image_upload", tool='editor', type="pil")
251
+ prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt", elem_id="prompt_input")
252
+ style = gr.Dropdown(STYLE_NAMES, value=DEFAULT_STYLE_NAME, label="Select style", elem_id="select_style")
253
+ detect_method = gr.Dropdown(choices=["scribble", "hed", "canny"], value="scribble", label="Select Detect Method", elem_id="detect_method")
254
+ submit_btn = gr.Button("Generate", elem_id="submit_button")
255
+ with gr.Column():
256
+ gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=2, height="auto")
257
+
258
+ submit_btn.click(process, inputs=[input_image, prompt, style, detect_method], outputs=[gallery])
259
+
260
+ # Refresh button to apply the dark theme
261
+ refresh_btn = gr.Button("Refresh for Dark Theme")
262
+ refresh_btn.click(None, None, None, _js=js_func)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
 
264
+ return demo
265
+
266
+ hed = HEDdetector.from_pretrained('lllyasviel/ControlNet')
267
 
268
+ if __name__ == "__main__":
269
+ demo = create_demo()
270
+ demo.launch(debug=True)