Diffusers
TalHach61's picture
Update README.md
b6d1407 verified
|
raw
history blame
8.18 kB
metadata
license: other
license_name: bria-legal-lobby
license_link: https://bria.ai/legal-lobby

BRIA 3.0 ControlNet Union Model Card

BRIA-3.0 ControlNet-Union, trained on the foundation of BRIA-4B-Adapt Text-to-Image, supports 6 control modes, including depth (0), canny (1), colorgrid (2), recolor (3), tile (4), pose (5). This model can be jointly used with other ControlNets.

This model combines technological innovation with ethical responsibility and legal security, setting a new standard in the AI industry. Bria AI licenses the foundation model with full legal liability coverage. Our dataset does not contain copyrighted materials, such as fictional characters, logos, trademarks, public figures, harmful content, or privacy-infringing content.

CLICK HERE FOR A DEMO

For more information, please visit our website.

Join our Discord community for more information, tutorials, tools, and to connect with other users!

controlnet_pose_showoff.png

Get Access

Interested in BRIA-3.0 ControlNet-Union? Purchase is required to license and access BRIA-3.0 ControlNet-Union, ensuring royalty management with our data partners and full liability coverage for commercial use.

Are you a startup or a student? We encourage you to apply for our Startup Program to request access. This program are designed to support emerging businesses and academic pursuits with our cutting-edge technology.

Contact us today to unlock the potential of BRIA-4B-Adapt! By submitting the form above, you agree to BRIA’s Privacy policy and Terms & conditions.

Key Features

  • Legally Compliant: Offers full legal liability coverage for copyright and privacy infringements. Thanks to training on 100% licensed data from leading data partners, we ensure the ethical use of content.

  • Patented Attribution Engine: Our attribution engine is our way to compensate our data partners, powered by our proprietary and patented algorithms.

  • Enterprise-Ready: Specifically designed for business applications, Bria-4B-Adapt delivers high-quality fine-tuning capabilities for generating compliant imagery for a variety of commercial needs.

  • Customizable Technology: Provides access to source code and weights for extensive customization, catering to specific business requirements.

  • Fully-Automated: Provides access to fully no-code automatic fine-tuning capabilities on Bria's platform: https://platform.bria.ai/console/tailored-generation.

Model Description

  • Developed by: BRIA AI

  • Model type: Latent Flow-Matching Text-to-Image Model

  • License: Commercial licensing terms & conditions.

  • Purchase is required to license and access the model.

  • Model Description: ControlNet Union for BRIA 3.0 Text-to-Image model. The model generates images guided by text and a conditioned image.

  • Resources for more information: BRIA AI

Control Mode

Control Mode Description
0 depth
1 canny
2 colorgrid
3 recolor
4 tlie
5 pose

Inference

pip install diffusers==0.30.2, hf_hub_download
from huggingface_hub import hf_hub_download
import os

try:
    local_dir = os.path.dirname(__file__)
except:
    local_dir = '.'
    
hf_hub_download(repo_id="briaai/BRIA-4B-Adapt", filename='pipeline_bria.py', local_dir=local_dir)
hf_hub_download(repo_id="briaai/BRIA-4B-Adapt", filename='transformer_bria.py', local_dir=local_dir)
hf_hub_download(repo_id="briaai/BRIA-4B-Adapt", filename='bria_utils.py', local_dir=local_dir)
hf_hub_download(repo_id="briaai/BRIA-3.0-ControlNet-Union", filename='pipeline_bria_controlnet.py', local_dir=local_dir)
hf_hub_download(repo_id="briaai/BRIA-3.0-ControlNet-Union", filename='controlnet_bria.py', local_dir=local_dir)

import torch
from diffusers.utils import load_image
from controlnet_bria import BriaControlNetModel, BriaMultiControlNetModel
from pipeline_bria_controlnet import BriaControlNetPipeline
import PIL.Image as Image

base_model = 'briaai/BRIA-4B-Adapt'
controlnet_model = 'briaai/BRIA-3.0-ControlNet-Union'

controlnet = BriaControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)
pipe = BriaControlNetPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16, trust_remote_code=True)
pipe.to("cuda")

control_image_canny = load_image("https://huggingface.co/briaai/BRIA-3.0-ControlNet-Union/resolve/main/canny.jpg")
controlnet_conditioning_scale = 1.0
control_mode = 1

width, height = control_image_canny.size

prompt = 'In a serene living room, someone rests on a sapphire blue couch, diligently drawing in a rose-tinted notebook, with a sleek black coffee table, a muted green wall, an elegant geometric lamp, and a lush potted palm enhancing the peaceful ambiance.'

generator = torch.Generator(device="cuda").manual_seed(555)
image = pipe(
    prompt, 
    control_image=control_image_canny,
    control_mode=control_mode,
    width=width,
    height=height,
    controlnet_conditioning_scale=controlnet_conditioning_scale,
    num_inference_steps=50, 
    max_sequence_length=128,
    guidance_scale=5,
    generator=generator
).images[0]

Multi-Controls Inference

from huggingface_hub import hf_hub_download
import os

try:
    local_dir = os.path.dirname(__file__)
except:
    local_dir = '.'
    
hf_hub_download(repo_id="briaai/BRIA-4B-Adapt", filename='pipeline_bria.py', local_dir=local_dir)
hf_hub_download(repo_id="briaai/BRIA-4B-Adapt", filename='transformer_bria.py', local_dir=local_dir)
hf_hub_download(repo_id="briaai/BRIA-4B-Adapt", filename='bria_utils.py', local_dir=local_dir)
hf_hub_download(repo_id="briaai/BRIA-3.0-ControlNet-Union", filename='pipeline_bria_controlnet.py', local_dir=local_dir)
hf_hub_download(repo_id="briaai/BRIA-3.0-ControlNet-Union", filename='controlnet_bria.py', local_dir=local_dir)


import torch
from diffusers.utils import load_image
from controlnet_bria import BriaControlNetModel, BriaMultiControlNetModel
from pipeline_bria_controlnet import BriaControlNetPipeline
import PIL.Image as Image

base_model = 'briaai/BRIA-4B-Adapt'
controlnet_model = 'briaai/BRIA-3.0-ControlNet-Union'

controlnet = BriaControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)
controlnet = BriaMultiControlNetModel([controlnet])

pipe = BriaControlNetPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16, trust_remote_code=True)
pipe.to("cuda")

# control_image_colorgrid = load_image("https://huggingface.co/briaai/BRIA-3.0-ControlNet-Union/resolve/main/colorgrid.jpg")
# control_image_pose = load_image("https://huggingface.co/briaai/BRIA-3.0-ControlNet-Union/resolve/main/pose.jpg")

control_image_colorgrid = Image.open("/home/ubuntu/spring/Infra/sd3/eval_results/control_images/colorgrid/38.jpg").convert("RGB")
control_image_pose = Image.open("/home/ubuntu/spring/Infra/sd3/eval_results/control_images/pose/38.jpg").convert("RGB")

control_image = [control_image_colorgrid, control_image_pose]
controlnet_conditioning_scale = [0.5, 0.5]
control_mode = [2, 5]

width, height = control_image[0].size

prompt = 'Two kids in jackets play near a tent in a forest with trees.'

generator = torch.Generator(device="cuda").manual_seed(555)
image = pipe(
    prompt, 
    control_image=control_image,
    control_mode=control_mode,
    width=width,
    height=height,
    controlnet_conditioning_scale=controlnet_conditioning_scale,
    num_inference_steps=50, 
    max_sequence_length=128,
    guidance_scale=5,
    generator=generator
).images[0]