Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -53,7 +53,7 @@ good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfold
|
|
53 |
#).to(device)
|
54 |
|
55 |
dtype = torch.bfloat16
|
56 |
-
base_model = "AlekseyCalvin/
|
57 |
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda")
|
58 |
#pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
|
59 |
torch.cuda.empty_cache()
|
@@ -216,7 +216,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
|
|
216 |
with gr.Accordion("Advanced Settings", open=True):
|
217 |
with gr.Column():
|
218 |
with gr.Row():
|
219 |
-
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=2.
|
220 |
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=8)
|
221 |
|
222 |
with gr.Row():
|
|
|
53 |
#).to(device)
|
54 |
|
55 |
dtype = torch.bfloat16
|
56 |
+
base_model = "AlekseyCalvin/Flux-Krea-Blaze_byMintLab_fp8_Diffusers"
|
57 |
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda")
|
58 |
#pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
|
59 |
torch.cuda.empty_cache()
|
|
|
216 |
with gr.Accordion("Advanced Settings", open=True):
|
217 |
with gr.Column():
|
218 |
with gr.Row():
|
219 |
+
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=2.2)
|
220 |
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=8)
|
221 |
|
222 |
with gr.Row():
|