Spaces:
Running
on
Zero
Running
on
Zero
rizavelioglu
commited on
Commit
·
984afb9
1
Parent(s):
778f222
add flux.1 kontext vae
Browse files- app.py +7 -3
- requirements.txt +0 -3
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import spaces
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
-
from diffusers import AutoencoderKL
|
5 |
from diffusers.utils.remote_utils import remote_decode
|
6 |
import torchvision.transforms.v2 as transforms
|
7 |
from torchvision.io import read_image
|
@@ -63,6 +63,8 @@ class VAETester:
|
|
63 |
"FLUX.1": AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae").to(self.device),
|
64 |
"CogView4-6B": AutoencoderKL.from_pretrained("THUDM/CogView4-6B", subfolder="vae").to(self.device),
|
65 |
"playground-v2.5": AutoencoderKL.from_pretrained("playgroundai/playground-v2.5-1024px-aesthetic", subfolder="vae").to(self.device),
|
|
|
|
|
66 |
}
|
67 |
# Define the desired order of models
|
68 |
order = [
|
@@ -78,6 +80,8 @@ class VAETester:
|
|
78 |
"FLUX.1",
|
79 |
#"FLUX.1 (remote)",
|
80 |
"CogView4-6B",
|
|
|
|
|
81 |
]
|
82 |
|
83 |
# Construct the vae_models dictionary in the specified order
|
@@ -142,7 +146,7 @@ class VAETester:
|
|
142 |
results[name] = self.process_image(img, model_config, tolerance)
|
143 |
return results
|
144 |
|
145 |
-
@spaces.GPU(duration=
|
146 |
def test_all_vaes(image_path: str, tolerance: float, img_size: int):
|
147 |
"""Gradio interface function to test all VAEs"""
|
148 |
tester = VAETester(img_size=img_size)
|
@@ -198,7 +202,7 @@ with gr.Blocks(title="VAE Performance Tester", css=".monospace-text {font-family
|
|
198 |
with gr.Row():
|
199 |
diff_gallery = gr.Gallery(label="Difference Maps", columns=4, height=512)
|
200 |
recon_gallery = gr.Gallery(label="Reconstructed Images", columns=4, height=512)
|
201 |
-
scores_output = gr.Textbox(label="Sum of differences (lower is better) | Processing time (lower is faster)", lines=
|
202 |
|
203 |
if examples:
|
204 |
with gr.Row():
|
|
|
1 |
import spaces
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
+
from diffusers import AutoencoderKL, AutoencoderDC
|
5 |
from diffusers.utils.remote_utils import remote_decode
|
6 |
import torchvision.transforms.v2 as transforms
|
7 |
from torchvision.io import read_image
|
|
|
63 |
"FLUX.1": AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae").to(self.device),
|
64 |
"CogView4-6B": AutoencoderKL.from_pretrained("THUDM/CogView4-6B", subfolder="vae").to(self.device),
|
65 |
"playground-v2.5": AutoencoderKL.from_pretrained("playgroundai/playground-v2.5-1024px-aesthetic", subfolder="vae").to(self.device),
|
66 |
+
"dc-ae-f32c32-sana-1.0": AutoencoderDC.from_pretrained("mit-han-lab/dc-ae-f32c32-sana-1.0-diffusers").to(self.device),
|
67 |
+
"FLUX.1-Kontext": AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", subfolder="vae").to(self.device),
|
68 |
}
|
69 |
# Define the desired order of models
|
70 |
order = [
|
|
|
80 |
"FLUX.1",
|
81 |
#"FLUX.1 (remote)",
|
82 |
"CogView4-6B",
|
83 |
+
"dc-ae-f32c32-sana-1.0",
|
84 |
+
"FLUX.1-Kontext",
|
85 |
]
|
86 |
|
87 |
# Construct the vae_models dictionary in the specified order
|
|
|
146 |
results[name] = self.process_image(img, model_config, tolerance)
|
147 |
return results
|
148 |
|
149 |
+
@spaces.GPU(duration=20)
|
150 |
def test_all_vaes(image_path: str, tolerance: float, img_size: int):
|
151 |
"""Gradio interface function to test all VAEs"""
|
152 |
tester = VAETester(img_size=img_size)
|
|
|
202 |
with gr.Row():
|
203 |
diff_gallery = gr.Gallery(label="Difference Maps", columns=4, height=512)
|
204 |
recon_gallery = gr.Gallery(label="Reconstructed Images", columns=4, height=512)
|
205 |
+
scores_output = gr.Textbox(label="Sum of differences (lower is better) | Processing time (lower is faster)", lines=12, elem_classes="monospace-text")
|
206 |
|
207 |
if examples:
|
208 |
with gr.Row():
|
requirements.txt
CHANGED
@@ -2,8 +2,5 @@ torch
|
|
2 |
torchvision
|
3 |
accelerate
|
4 |
safetensors
|
5 |
-
huggingface_hub
|
6 |
-
spaces>=0.34.2
|
7 |
-
gradio>=5.25.2
|
8 |
torchao # latest diffusers request this
|
9 |
git+https://github.com/huggingface/diffusers@main
|
|
|
2 |
torchvision
|
3 |
accelerate
|
4 |
safetensors
|
|
|
|
|
|
|
5 |
torchao # latest diffusers request this
|
6 |
git+https://github.com/huggingface/diffusers@main
|