Generated with:

import torch 
from diffusers import AutoencoderKL, FluxTransformer2DModel, FlowMatchEulerDiscreteScheduler, FluxPipeline
from transformers import (
    CLIPTextConfig, 
    CLIPTextModel,
    CLIPTokenizer,
    T5Config,
    T5EncoderModel, 
    AutoTokenizer,
)


def get_dummy_components(num_layers: int = 1, num_single_layers: int = 1):
    torch.manual_seed(0)
    transformer = FluxTransformer2DModel(
        patch_size=1,
        in_channels=4,
        num_layers=num_layers,
        num_single_layers=num_single_layers,
        attention_head_dim=16,
        num_attention_heads=2,
        joint_attention_dim=32,
        pooled_projection_dim=32,
        axes_dims_rope=[4, 4, 8],
    )
    clip_text_encoder_config = CLIPTextConfig(
        bos_token_id=0,
        eos_token_id=2,
        hidden_size=32,
        intermediate_size=37,
        layer_norm_eps=1e-05,
        num_attention_heads=4,
        num_hidden_layers=5,
        pad_token_id=1,
        vocab_size=1000,
        hidden_act="gelu",
        projection_dim=32,
    )

    torch.manual_seed(0)
    text_encoder = CLIPTextModel(clip_text_encoder_config)

    torch.manual_seed(0)
    # Create tiny dummy t5 with gated silu
    config = T5Config(
        vocab_size=1103,
        d_model=32,
        d_ff=38,
        d_kv=8,
        num_layers=2,
        num_heads=4,
        relative_attention_num_buckets=8,
        dropout_rate=0.1,
        initializer_factor=0.002,
        eos_token_id=1,
        bos_token_id=0,
        pad_token_id=0,
        is_encoder_decoder=True,
        feed_forward_proj="gated-silu",
        is_gated_act=True,
        dense_act_fn="gelu_new",
        tie_word_embeddings=False,
    )
    text_encoder_2 = T5EncoderModel(config=config)

    tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
    tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")

    torch.manual_seed(0)
    vae = AutoencoderKL(
        sample_size=32,
        in_channels=3,
        out_channels=3,
        block_out_channels=(4,),
        layers_per_block=1,
        latent_channels=1,
        norm_num_groups=1,
        use_quant_conv=False,
        use_post_quant_conv=False,
        shift_factor=0.0609,
        scaling_factor=1.5035,
    )

    scheduler = FlowMatchEulerDiscreteScheduler()

    return {
        "scheduler": scheduler,
        "text_encoder": text_encoder,
        "text_encoder_2": text_encoder_2,
        "tokenizer": tokenizer,
        "tokenizer_2": tokenizer_2,
        "transformer": transformer,
        "vae": vae,
        "image_encoder": None,
        "feature_extractor": None,
    }

if __name__ == "__main__":
    components = get_dummy_components()
    pipeline = FluxPipeline(**components)
    pipeline.push_to_hub("hf-internal-testing/tiny-flux-pipe-gated-silu")
from huggingface_hub import hf_hub_download, upload_file
import os

REPO_SRC = "hf-internal-testing/tiny-flux-sharded"
REPO_DST = "hf-internal-testing/tiny-flux-pipe-gated-silu"
DST_FOLDER = "transformer"

filenames = [
    "transformer/config.json",
    "transformer/diffusion_pytorch_model-00001-of-00002.safetensors",
    "transformer/diffusion_pytorch_model-00002-of-00002.safetensors",
    "transformer/diffusion_pytorch_model.safetensors",
    "transformer/diffusion_pytorch_model.safetensors.index.json"
]

for file in filenames:
    local_path = hf_hub_download(repo_id=REPO_SRC, filename=file)
    upload_file(
        path_or_fileobj=local_path,
        path_in_repo=os.path.join(DST_FOLDER, os.path.basename(file)),
        repo_id=REPO_DST,
        repo_type="model",
        commit_message=f"Copied {file} from {REPO_SRC}"
    )
Downloads last month
858
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support