Fill in this form to immediatly access the model for non commercial use

Bria AI Model weights are open source for non commercial use only, per the provided license.

Log in or Sign Up to review the conditions and access this model content.

BRIA-3.2 ControlNet Union Model Card

BRIA-3.2 ControlNet-Union, trained on the foundation of BRIA-3.2 Text-to-Image, supports 6 control modes, including depth (0), canny (1), colorgrid (2), recolor (3), tile (4), pose (5). This model can be jointly used with other ControlNets.

Built with a strong commitment to legal compliance and responsible AI practices, this model ensures safe and scalable generative image capabilities for commercial use.

CLICK HERE FOR A DEMO

For more information, please visit our website.

Join our Discord community for more information, tutorials, tools, and to connect with other users!

Get Access

BRIA-3.2-ControlNet-Union requires access to BRIA-3.2 Text-to-Image. For more information, click here.

Model Description

  • Developed by: BRIA AI
  • Model type: Latent Flow-Matching Text-to-Image Model
  • License: Commercial licensing terms & conditions.
  • Purchase is required to license and access the model.
  • Model Description: ControlNet Union for BRIA-3.2 Text-to-Image model. The model generates images guided by text and a conditioned image.
  • Resources for more information: BRIA AI

Control Mode

Control Mode Description
0 depth
1 canny
2 colorgrid
3 recolor
4 tile
5 pose

Installations

pip install -qr https://huggingface.co/briaai/BRIA-3.2/resolve/main/requirements.txt
pip install diffusers==0.30.2, hf_hub_download
from huggingface_hub import hf_hub_download
import os
try:
    local_dir = os.path.dirname(__file__)
except:
    local_dir = '.'
    
hf_hub_download(repo_id="briaai/BRIA-3.2", revision="pre_diffusers_support", filename='pipeline_bria.py', local_dir=local_dir)
hf_hub_download(repo_id="briaai/BRIA-3.2", revision="pre_diffusers_support", filename='transformer_bria.py', local_dir=local_dir)
hf_hub_download(repo_id="briaai/BRIA-3.2", revision="pre_diffusers_support", filename='bria_utils.py', local_dir=local_dir)
hf_hub_download(repo_id="briaai/BRIA-3.2-ControlNet-Union", filename='pipeline_bria_controlnet.py', local_dir=local_dir)
hf_hub_download(repo_id="briaai/BRIA-3.2-ControlNet-Union", filename='controlnet_bria.py', local_dir=local_dir)

Inference

import torch
from diffusers.utils import load_image
from controlnet_bria import BriaControlNetModel
from pipeline_bria_controlnet import BriaControlNetPipeline
import PIL.Image as Image

RATIO_CONFIGS_1024 = {
    0.6666666666666666: {"width": 832, "height": 1248},
    0.7432432432432432: {"width": 880, "height": 1184},
    0.8028169014084507: {"width": 912, "height": 1136},
    1.0: {"width": 1024, "height": 1024},
    1.2456140350877194: {"width": 1136, "height": 912},
    1.3454545454545455: {"width": 1184, "height": 880},
    1.4339622641509433: {"width": 1216, "height": 848},
    1.5: {"width": 1248, "height": 832},
    1.5490196078431373: {"width": 1264, "height": 816},
    1.62: {"width": 1296, "height": 800},
    1.7708333333333333: {"width": 1360, "height": 768},
}

def resize_img(control_image):
    image_ratio = control_image.width / control_image.height
    ratio = min(RATIO_CONFIGS_1024.keys(), key=lambda k: abs(k - image_ratio))
    to_height = RATIO_CONFIGS_1024[ratio]["height"]
    to_width = RATIO_CONFIGS_1024[ratio]["width"]
    resized_image = control_image.resize((to_width, to_height), resample=Image.Resampling.LANCZOS)
    return resized_image


base_model = 'briaai/BRIA-3.2'
controlnet_model = 'briaai/BRIA-3.2-ControlNet-Union'
controlnet = BriaControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)
pipeline = BriaControlNetPipeline.from_pretrained(base_model,revision="pre_diffusers_support" , controlnet=controlnet, torch_dtype=torch.bfloat16, trust_remote_code=True)
pipeline = pipeline.to(device="cuda", dtype=torch.bfloat16)

control_image_canny = load_image("https://huggingface.co/briaai/BRIA-3.2-ControlNet-Union/resolve/main/images/canny.jpg")
controlnet_conditioning_scale = 1.0
control_mode = 1
control_image_canny = resize_img(control_image_canny)
width, height = control_image_canny.size

prompt = 'In a serene living room, someone rests on a sapphire blue couch, diligently drawing in a rose-tinted notebook, with a sleek black coffee table, a muted green wall, an elegant geometric lamp, and a lush potted palm enhancing the peaceful ambiance.'

generator = torch.Generator(device="cuda").manual_seed(555)
image = pipeline(
    prompt, 
    control_image=control_image_canny,
    control_mode=control_mode,
    width=width,
    height=height,
    controlnet_conditioning_scale=controlnet_conditioning_scale,
    num_inference_steps=50, 
    max_sequence_length=128,
    guidance_scale=5,
    generator=generator,
    negative_prompt="Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate"
).images[0]
print(image)        

Multi-Controls Inference

import torch
from diffusers.utils import load_image
from controlnet_bria import BriaControlNetModel, BriaMultiControlNetModel
from pipeline_bria_controlnet import BriaControlNetPipeline
import PIL.Image as Image

base_model = 'briaai/BRIA-3.2'
controlnet_model = 'briaai/BRIA-3.2-ControlNet-Union'

controlnet = BriaControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)
controlnet = BriaMultiControlNetModel([controlnet])

pipe = BriaControlNetPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16, trust_remote_code=True)
pipe.to("cuda")

control_image_colorgrid = load_image("https://huggingface.co/briaai/BRIA-3.2-ControlNet-Union/resolve/main/images/colorgrid.jpg")
control_image_pose = load_image("https://huggingface.co/briaai/BRIA-3.2-ControlNet-Union/resolve/main/images/pose.jpg")

control_image = [control_image_colorgrid, control_image_pose]
controlnet_conditioning_scale = [0.5, 0.5]
control_mode = [2, 5]

width, height = control_image[0].size

prompt = 'Two kids in jackets play near a tent in a forest.'

generator = torch.Generator(device="cuda").manual_seed(555)
image = pipe(
    prompt, 
    control_image=control_image,
    control_mode=control_mode,
    width=width,
    height=height,
    controlnet_conditioning_scale=controlnet_conditioning_scale,
    num_inference_steps=50, 
    max_sequence_length=128,
    guidance_scale=5,
    generator=generator,
    negative_prompt="Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate"
).images[0]
Downloads last month
4
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Space using briaai/BRIA-3.2-ControlNet-Union 1