Update README.md
Browse files
README.md
CHANGED
@@ -48,7 +48,7 @@ pip install diffusers==0.30.2, hf_hub_download
|
|
48 |
```python
|
49 |
from huggingface_hub import hf_hub_download
|
50 |
import os
|
51 |
-
|
52 |
try:
|
53 |
local_dir = os.path.dirname(__file__)
|
54 |
except:
|
@@ -62,27 +62,49 @@ hf_hub_download(repo_id="briaai/BRIA-4B-Adapt-ControlNet-Union", filename='contr
|
|
62 |
|
63 |
import torch
|
64 |
from diffusers.utils import load_image
|
65 |
-
from controlnet_bria import BriaControlNetModel
|
66 |
from pipeline_bria_controlnet import BriaControlNetPipeline
|
67 |
import PIL.Image as Image
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
base_model = 'briaai/BRIA-4B-Adapt'
|
70 |
controlnet_model = 'briaai/BRIA-4B-Adapt-ControlNet-Union'
|
71 |
-
|
72 |
controlnet = BriaControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)
|
73 |
-
|
74 |
-
|
75 |
|
76 |
control_image_canny = load_image("https://huggingface.co/briaai/BRIA-4B-Adapt-ControlNet-Union/resolve/main/images/canny.jpg")
|
77 |
controlnet_conditioning_scale = 1.0
|
78 |
control_mode = 1
|
79 |
-
|
80 |
width, height = control_image_canny.size
|
81 |
|
82 |
prompt = 'In a serene living room, someone rests on a sapphire blue couch, diligently drawing in a rose-tinted notebook, with a sleek black coffee table, a muted green wall, an elegant geometric lamp, and a lush potted palm enhancing the peaceful ambiance.'
|
83 |
|
84 |
generator = torch.Generator(device="cuda").manual_seed(555)
|
85 |
-
image =
|
86 |
prompt,
|
87 |
control_image=control_image_canny,
|
88 |
control_mode=control_mode,
|
@@ -92,8 +114,10 @@ image = pipe(
|
|
92 |
num_inference_steps=50,
|
93 |
max_sequence_length=128,
|
94 |
guidance_scale=5,
|
95 |
-
generator=generator
|
|
|
96 |
).images[0]
|
|
|
97 |
```
|
98 |
|
99 |
# Multi-Controls Inference
|
|
|
48 |
```python
|
49 |
from huggingface_hub import hf_hub_download
|
50 |
import os
|
51 |
+
import pandas as pd
|
52 |
try:
|
53 |
local_dir = os.path.dirname(__file__)
|
54 |
except:
|
|
|
62 |
|
63 |
import torch
|
64 |
from diffusers.utils import load_image
|
65 |
+
from controlnet_bria import BriaControlNetModel
|
66 |
from pipeline_bria_controlnet import BriaControlNetPipeline
|
67 |
import PIL.Image as Image
|
68 |
|
69 |
+
RATIO_CONFIGS_1024 = {
|
70 |
+
0.6666666666666666: {"width": 832, "height": 1248},
|
71 |
+
0.7432432432432432: {"width": 880, "height": 1184},
|
72 |
+
0.8028169014084507: {"width": 912, "height": 1136},
|
73 |
+
1.0: {"width": 1024, "height": 1024},
|
74 |
+
1.2456140350877194: {"width": 1136, "height": 912},
|
75 |
+
1.3454545454545455: {"width": 1184, "height": 880},
|
76 |
+
1.4339622641509433: {"width": 1216, "height": 848},
|
77 |
+
1.5: {"width": 1248, "height": 832},
|
78 |
+
1.5490196078431373: {"width": 1264, "height": 816},
|
79 |
+
1.62: {"width": 1296, "height": 800},
|
80 |
+
1.7708333333333333: {"width": 1360, "height": 768},
|
81 |
+
}
|
82 |
+
|
83 |
+
def resize_img(control_image):
|
84 |
+
image_ratio = control_image.width / control_image.height
|
85 |
+
ratio = min(RATIO_CONFIGS_1024.keys(), key=lambda k: abs(k - image_ratio))
|
86 |
+
to_height = RATIO_CONFIGS_1024[ratio]["height"]
|
87 |
+
to_width = RATIO_CONFIGS_1024[ratio]["width"]
|
88 |
+
resized_image = control_image.resize((to_width, to_height), resample=Image.Resampling.LANCZOS)
|
89 |
+
return resized_image
|
90 |
+
|
91 |
+
|
92 |
base_model = 'briaai/BRIA-4B-Adapt'
|
93 |
controlnet_model = 'briaai/BRIA-4B-Adapt-ControlNet-Union'
|
|
|
94 |
controlnet = BriaControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)
|
95 |
+
pipeline = BriaControlNetPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16, trust_remote_code=True)
|
96 |
+
pipeline = pipeline.to(device="cuda", dtype=torch.bfloat16)
|
97 |
|
98 |
control_image_canny = load_image("https://huggingface.co/briaai/BRIA-4B-Adapt-ControlNet-Union/resolve/main/images/canny.jpg")
|
99 |
controlnet_conditioning_scale = 1.0
|
100 |
control_mode = 1
|
101 |
+
control_image_canny = resize_img(control_image_canny)
|
102 |
width, height = control_image_canny.size
|
103 |
|
104 |
prompt = 'In a serene living room, someone rests on a sapphire blue couch, diligently drawing in a rose-tinted notebook, with a sleek black coffee table, a muted green wall, an elegant geometric lamp, and a lush potted palm enhancing the peaceful ambiance.'
|
105 |
|
106 |
generator = torch.Generator(device="cuda").manual_seed(555)
|
107 |
+
image = pipeline(
|
108 |
prompt,
|
109 |
control_image=control_image_canny,
|
110 |
control_mode=control_mode,
|
|
|
114 |
num_inference_steps=50,
|
115 |
max_sequence_length=128,
|
116 |
guidance_scale=5,
|
117 |
+
generator=generator,
|
118 |
+
negative_prompt="Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate"
|
119 |
).images[0]
|
120 |
+
print(image)
|
121 |
```
|
122 |
|
123 |
# Multi-Controls Inference
|