Spaces:
Runtime error
Runtime error
Upload 3 files
Browse files- README.md +8 -9
- app.py +117 -0
- requirements.txt +6 -0
README.md
CHANGED
@@ -1,13 +1,12 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
|
|
8 |
app_file: app.py
|
9 |
-
pinned:
|
10 |
license: openrail
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: floorplan generation
|
3 |
+
emoji: 📏
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: green
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.12.0
|
8 |
+
python_version: 3.9.13
|
9 |
app_file: app.py
|
10 |
+
pinned: true
|
11 |
license: openrail
|
12 |
+
---
|
|
|
|
app.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio
|
2 |
+
import cv2
|
3 |
+
from PIL import Image
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
|
7 |
+
from diffusers.utils import load_image
|
8 |
+
import torch
|
9 |
+
import accelerate
|
10 |
+
import transformers
|
11 |
+
from random import randrange
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
17 |
+
|
18 |
+
base_model_id = "runwayml/stable-diffusion-v1-5"
|
19 |
+
model_id = "LuyangZ/controlnet_Neufert4_64_100"
|
20 |
+
|
21 |
+
controlnet = ControlNetModel.from_pretrained(model_id, torch_dtype=torch.float32)
|
22 |
+
controlnet.to(device)
|
23 |
+
torch.cuda.empty_cache()
|
24 |
+
|
25 |
+
pipeline = StableDiffusionControlNetPipeline.from_pretrained(base_model_id , controlnet=controlnet, torch_dtype=torch.float32)
|
26 |
+
pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
|
27 |
+
|
28 |
+
pipeline = pipeline.to(device)
|
29 |
+
torch.cuda.empty_cache()
|
30 |
+
seed = randrange(500)
|
31 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
def expand2square(ol_img, background_color):
|
36 |
+
width, height = ol_img.size
|
37 |
+
|
38 |
+
if width == height:
|
39 |
+
pad = int(width*0.2)
|
40 |
+
width_new = width + pad
|
41 |
+
halfpad = int(pad/2)
|
42 |
+
|
43 |
+
ol_result = Image.new(ol_img.mode, (width_new, width_new), background_color)
|
44 |
+
ol_result.paste(ol_img, (halfpad, halfpad))
|
45 |
+
|
46 |
+
return ol_img
|
47 |
+
|
48 |
+
elif width > height:
|
49 |
+
|
50 |
+
pad = int(width*0.2)
|
51 |
+
width_new = width + pad
|
52 |
+
halfpad = int(pad/2)
|
53 |
+
|
54 |
+
ol_result = Image.new(ol_img.mode, (width_new, width_new), background_color)
|
55 |
+
ol_result.paste(ol_img, (halfpad, (width_new - height) // 2))
|
56 |
+
|
57 |
+
return ol_result
|
58 |
+
|
59 |
+
else:
|
60 |
+
pad = int(height*0.2)
|
61 |
+
height_new = height + pad
|
62 |
+
halfpad = int(pad/2)
|
63 |
+
|
64 |
+
ol_result = Image.new(ol_img.mode, (height_new, height_new), background_color)
|
65 |
+
ol_result.paste(ol_img, ((height_new - width) // 2, halfpad))
|
66 |
+
|
67 |
+
return ol_result
|
68 |
+
|
69 |
+
def clean_img(image, mask):
|
70 |
+
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
|
71 |
+
mask = cv2.threshold(mask, 250, 255, cv2.THRESH_BINARY_INV)[1]
|
72 |
+
|
73 |
+
image[mask<250]=(255,255,255)
|
74 |
+
image = Image.fromarray(image).convert('RGB')
|
75 |
+
return image
|
76 |
+
|
77 |
+
def floorplan_generation(outline, num_of_rooms):
|
78 |
+
new_width = 512
|
79 |
+
new_height = 512
|
80 |
+
|
81 |
+
outline = cv2.cvtColor(outline, cv2.COLOR_RGB2BGR)
|
82 |
+
outline_original = outline.copy()
|
83 |
+
|
84 |
+
gray = cv2.cvtColor(outline, cv2.COLOR_BGR2GRAY)
|
85 |
+
thresh = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY_INV)[1]
|
86 |
+
|
87 |
+
x,y,w,h = cv2.boundingRect(thresh)
|
88 |
+
n_outline = outline_original[y:y+h, x:x+w]
|
89 |
+
n_outline = cv2.cvtColor(n_outline, cv2.COLOR_BGR2RGB)
|
90 |
+
n_outline = Image.fromarray(n_outline).convert('RGB')
|
91 |
+
n_outline = expand2square(n_outline, (255, 255, 255))
|
92 |
+
n_outline = n_outline.resize((new_width, new_height))
|
93 |
+
|
94 |
+
num_of_rooms = str(num_of_rooms)
|
95 |
+
validation_prompt = "floor plan," + num_of_rooms + " bedrooms"
|
96 |
+
validation_image = n_outline
|
97 |
+
|
98 |
+
image = pipeline(validation_prompt,
|
99 |
+
validation_image,
|
100 |
+
num_inference_steps=20,
|
101 |
+
generator=generator).images[0]
|
102 |
+
|
103 |
+
image = np.array(image)
|
104 |
+
mask = np.array(n_outline)
|
105 |
+
mask = cv2.cvtColor(mask, cv2.COLOR_RGB2BGR)
|
106 |
+
image = clean_img(image, mask)
|
107 |
+
return image
|
108 |
+
|
109 |
+
|
110 |
+
gradio_interface = gradio.Interface(
|
111 |
+
fn=floorplan_generation,
|
112 |
+
inputs=[gradio.Image(label="Floor Plan Outline, Entrance"),
|
113 |
+
gradio.Textbox(type="text", label="number of rooms", placeholder="number of rooms", show_legend=True)],
|
114 |
+
outputs=gradio.Image(label="Generated Floor Plan"),
|
115 |
+
title="floorplan generation")
|
116 |
+
|
117 |
+
gradio_interface.launch(enable_queue=False)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
numpy==1.25.0
|
2 |
+
opencv-python==4.8.0.74
|
3 |
+
diffusers == 0.24.0
|
4 |
+
torch==2.0.1
|
5 |
+
accelerate == 0.25.0
|
6 |
+
transformers
|