This tiny model is for debugging. It is randomly initialized with the config adapted from Qwen/Qwen-Image.

File size:

  • ~10MB text_encoder/model.safetensors
  • ~200KB transformer/diffusion_pytorch_model.safetensors
  • ~5MB vae/diffusion_pytorch_model.safetensors

Example usage:

import torch
from diffusers import DiffusionPipeline

model_id = "tiny-random/Qwen-Image"
torch_dtype = torch.bfloat16
device = "cuda"
pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch_dtype)
pipe = pipe.to(device)

positive_magic = {
    "en": "Ultra HD, 4K, cinematic composition.",  # for english prompt,
    "zh": "超清,4K,电影级构图"  # for chinese prompt,
}
prompt = '''A coffee shop entrance features a chalkboard sign reading "Qwen Coffee 😊 $2 per cup," with a neon light beside it displaying "通义千问". Next to it hangs a poster showing a beautiful Chinese woman, and beneath the poster is written "π≈3.1415926-53589793-23846264-33832795-02384197". Ultra HD, 4K, cinematic composition.'''
prompt += 'Some dummy random texts to make prompt long enough ' * 10
negative_prompt = " "

# Generate with different aspect ratios
aspect_ratios = {
    "1:1": (1328, 1328),
    "16:9": (1664, 928),
    "9:16": (928, 1664),
    "4:3": (1472, 1140),
    "3:4": (1140, 1472)
}

for width, height in aspect_ratios.values():
    image = pipe(
        prompt=prompt + positive_magic["en"],
        negative_prompt=negative_prompt,
        width=width,
        height=height,
        num_inference_steps=4,
        true_cfg_scale=4.0,
        generator=torch.Generator(device="cuda").manual_seed(42)
    ).images[0]
    print(image)

Codes to create this repo:

import json

import torch
from diffusers import (
    AutoencoderKLQwenImage,
    DiffusionPipeline,
    FlowMatchEulerDiscreteScheduler,
    QwenImagePipeline,
    QwenImageTransformer2DModel,
)
from huggingface_hub import hf_hub_download
from transformers import AutoConfig, AutoTokenizer, Qwen2_5_VLForConditionalGeneration
from transformers.generation import GenerationConfig

source_model_id = "Qwen/Qwen-Image"
save_folder = "/tmp/tiny-random/Qwen-Image"

torch.set_default_dtype(torch.bfloat16)
scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(source_model_id, subfolder='scheduler')
tokenizer = AutoTokenizer.from_pretrained(source_model_id, subfolder='tokenizer')

def save_json(path, obj):
    import json
    from pathlib import Path
    Path(path).parent.mkdir(parents=True, exist_ok=True)
    with open(path, 'w', encoding='utf-8') as f:
        json.dump(obj, f, indent=2, ensure_ascii=False)

def init_weights(model):
    import torch
    torch.manual_seed(42)
    with torch.no_grad():
        for name, p in sorted(model.named_parameters()):
            torch.nn.init.normal_(p, 0, 0.1)
            print(name, p.shape, p.dtype, p.device)

with open(hf_hub_download(source_model_id, filename='text_encoder/config.json', repo_type='model'), 'r', encoding='utf - 8') as f:
    config = json.load(f)
    config.update({
        'hidden_size': 32,
        'intermediate_size': 64,
        'max_window_layers': 1,
        'num_attention_heads': 2,
        'num_hidden_layers': 2,
        'num_key_value_heads': 1,
        'sliding_window': 64,
        'tie_word_embeddings': True,
        'use_sliding_window': True,
    })
    del config['torch_dtype']
    config['rope_scaling']['mrope_section'] = [4, 2, 2]
    config['text_config'].update({
        'hidden_size': 32,
        'intermediate_size': 64,
        'num_attention_heads': 2,
        'num_hidden_layers': 2,
        'num_key_value_heads': 1,
        'sliding_window': 64,
        'tie_word_embeddings': True,
        'max_window_layers': 1,
        'use_sliding_window': True,
        'layer_types': ['full_attention', 'sliding_attention']
    })
    del config['text_config']['torch_dtype']
    config['text_config']['rope_scaling']['mrope_section'] = [4, 2, 2]
    config['vision_config'].update(
        {
            'depth': 2,
            'fullatt_block_indexes': [0],
            'hidden_size': 32,
            'intermediate_size': 64,
            'num_heads': 2,
            'out_hidden_size': 32,
        }
    )
    del config['vision_config']['torch_dtype']
    save_json(f'{save_folder}/text_encoder/config.json', config)
    text_encoder_config = AutoConfig.from_pretrained(f'{save_folder}/text_encoder')
    text_encoder = Qwen2_5_VLForConditionalGeneration(text_encoder_config).to(torch.bfloat16)
    generation_config = GenerationConfig.from_pretrained(source_model_id, subfolder='text_encoder')
    # text_encoder.config.generation_config = generation_config
    text_encoder.generation_config = generation_config
    init_weights(text_encoder)

with open(hf_hub_download(source_model_id, filename='transformer/config.json', repo_type='model'), 'r', encoding='utf-8') as f:
    config = json.load(f)
    config.update({
        'attention_head_dim': 32,
        'axes_dims_rope': [8, 12, 12],
        'joint_attention_dim': 32,
        'num_attention_heads': 1,
        'num_layers': 2,
    })
    del config['pooled_projection_dim']  # not used
    save_json(f'{save_folder}/transformer/config.json', config)
    transformer_config = QwenImageTransformer2DModel.load_config(f'{save_folder}/transformer')
    transformer = QwenImageTransformer2DModel.from_config(transformer_config)
    init_weights(transformer)

with open(hf_hub_download(source_model_id, filename='vae/config.json', repo_type='model'), 'r', encoding='utf-8') as f:
    config = json.load(f)
    config.update({
        'num_res_blocks': 1,
        'base_dim': 16,
        'dim_mult': [1, 2, 4, 4],
    })
    del config['latents_mean']  # not used
    del config['latents_std']  # not used
    save_json(f'{save_folder}/vae/config.json', config)
    vae_config = AutoencoderKLQwenImage.load_config(f'{save_folder}/vae')
    vae = AutoencoderKLQwenImage.from_config(vae_config)
    init_weights(vae)

pipeline = QwenImagePipeline(
    scheduler=scheduler,
    text_encoder=text_encoder,
    tokenizer=tokenizer,
    transformer=transformer,
    vae=vae,
)
pipeline = pipeline.to(torch.bfloat16)
pipeline.save_pretrained(save_folder, safe_serialization=True)
print(pipeline)
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for tiny-random/Qwen-Image

Base model

Qwen/Qwen-Image
Finetuned
(3)
this model

Collection including tiny-random/Qwen-Image