Spaces:
Build error
Build error
Upload folder using huggingface_hub
Browse files- README.md +2 -8
- media.py +152 -0
- requirements.txt +9 -0
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: red
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.38.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Generative_Suite
|
3 |
+
app_file: media.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
sdk_version: 5.38.0
|
|
|
|
|
6 |
---
|
|
|
|
media.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# --- LIBRARIES ---
|
2 |
+
import torch
|
3 |
+
import gradio as gr
|
4 |
+
import random
|
5 |
+
import time
|
6 |
+
from diffusers import AutoPipelineForText2Image, TextToVideoSDPipeline
|
7 |
+
import gc
|
8 |
+
import os
|
9 |
+
import imageio
|
10 |
+
|
11 |
+
# --- AUTHENTICATION FOR HUGGING FACE SPACES ---
|
12 |
+
# This will read the token from a "Secret" you set in your Space's settings
|
13 |
+
# It's more secure and the correct way to do it on HF Spaces.
|
14 |
+
try:
|
15 |
+
from huggingface_hub import login
|
16 |
+
HF_TOKEN = os.environ.get('HF_TOKEN')
|
17 |
+
if HF_TOKEN:
|
18 |
+
login(token=HF_TOKEN)
|
19 |
+
print("✅ Hugging Face Authentication successful.")
|
20 |
+
else:
|
21 |
+
print("⚠️ Hugging Face token not found in Space Secrets. Gated models may not be available.")
|
22 |
+
except ImportError:
|
23 |
+
print("Could not import huggingface_hub. Please ensure it's in requirements.txt")
|
24 |
+
|
25 |
+
# --- CONFIGURATION & STATE ---
|
26 |
+
available_models = {
|
27 |
+
"Fast Image (SDXL Turbo)": "stabilityai/sdxl-turbo",
|
28 |
+
"Quality Image (SDXL)": "stabilityai/stable-diffusion-xl-base-1.0",
|
29 |
+
"Video (Zeroscope)": "cerspense/zeroscope-v2-576w"
|
30 |
+
}
|
31 |
+
model_state = { "current_pipe": None, "loaded_model_name": None }
|
32 |
+
|
33 |
+
|
34 |
+
# --- CORE GENERATION FUNCTION ---
|
35 |
+
# This is a generator function, which yields updates to the UI.
|
36 |
+
def generate_media(model_key, prompt, negative_prompt, steps, cfg_scale, width, height, seed, num_frames):
|
37 |
+
# --- Model Loading Logic ---
|
38 |
+
# If the requested model isn't the one we have loaded, switch them.
|
39 |
+
if model_state.get("loaded_model_name") != model_key:
|
40 |
+
print(f"Switching to {model_key}. Unloading previous model...")
|
41 |
+
yield {status_textbox: f"Unloading previous model..."} # UI Update
|
42 |
+
if model_state.get("current_pipe"):
|
43 |
+
del model_state["current_pipe"]
|
44 |
+
gc.collect()
|
45 |
+
torch.cuda.empty_cache()
|
46 |
+
|
47 |
+
model_id = available_models[model_key]
|
48 |
+
print(f"Loading {model_id}...")
|
49 |
+
yield {status_textbox: f"Loading {model_id}... This can take a minute."} # UI Update
|
50 |
+
|
51 |
+
# Load the correct pipeline based on model type
|
52 |
+
if "Image" in model_key:
|
53 |
+
pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16")
|
54 |
+
elif "Video" in model_key:
|
55 |
+
pipe = TextToVideoSDPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
56 |
+
|
57 |
+
pipe.to("cuda")
|
58 |
+
# Offload larger models to save VRAM, but keep fast models fully on GPU
|
59 |
+
if "Turbo" not in model_key and "Video" not in model_key:
|
60 |
+
pipe.enable_model_cpu_offload()
|
61 |
+
|
62 |
+
model_state["current_pipe"] = pipe
|
63 |
+
model_state["loaded_model_name"] = model_key
|
64 |
+
print("✅ Model loaded successfully.")
|
65 |
+
|
66 |
+
pipe = model_state["current_pipe"]
|
67 |
+
generator = torch.Generator("cuda").manual_seed(seed)
|
68 |
+
yield {status_textbox: f"Generating with {model_key}..."} # UI Update
|
69 |
+
|
70 |
+
# --- Generation Logic ---
|
71 |
+
if "Image" in model_key:
|
72 |
+
print("Generating image...")
|
73 |
+
if "Turbo" in model_key: # Special settings for SDXL Turbo
|
74 |
+
num_steps, guidance_scale = 1, 0.0
|
75 |
+
else:
|
76 |
+
num_steps, guidance_scale = int(steps), float(cfg_scale)
|
77 |
+
|
78 |
+
image = pipe(
|
79 |
+
prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=num_steps,
|
80 |
+
guidance_scale=guidance_scale, width=int(width), height=int(height), generator=generator
|
81 |
+
).images[0]
|
82 |
+
print("✅ Image generation complete.")
|
83 |
+
yield {output_image: image, output_video: None, status_textbox: f"Seed used: {seed}"}
|
84 |
+
|
85 |
+
elif "Video" in model_key:
|
86 |
+
print("Generating video...")
|
87 |
+
video_frames = pipe(prompt=prompt, num_inference_steps=int(steps), height=320, width=576, num_frames=int(num_frames), generator=generator).frames
|
88 |
+
|
89 |
+
video_path = f"/tmp/video_{seed}.mp4"
|
90 |
+
imageio.mimsave(video_path, video_frames, fps=12)
|
91 |
+
print(f"✅ Video saved to {video_path}")
|
92 |
+
yield {output_image: None, output_video: video_path, status_textbox: f"Seed used: {seed}"}
|
93 |
+
|
94 |
+
|
95 |
+
# --- GRADIO USER INTERFACE ---
|
96 |
+
with gr.Blocks(theme='gradio/soft') as demo:
|
97 |
+
gr.Markdown("# The Generative Media Suite")
|
98 |
+
gr.Markdown("Create fast images, high-quality images, or short videos. Created by cheeseman182.")
|
99 |
+
seed_state = gr.State(-1)
|
100 |
+
|
101 |
+
with gr.Row():
|
102 |
+
with gr.Column(scale=2):
|
103 |
+
model_selector = gr.Radio(label="Select Model", choices=list(available_models.keys()), value=list(available_models.keys())[0])
|
104 |
+
prompt_input = gr.Textbox(label="Prompt", lines=4, placeholder="An astronaut riding a horse on Mars, cinematic...")
|
105 |
+
negative_prompt_input = gr.Textbox(label="Negative Prompt", lines=2, value="ugly, blurry, deformed, watermark, text")
|
106 |
+
|
107 |
+
with gr.Accordion("Settings", open=True):
|
108 |
+
steps_slider = gr.Slider(1, 100, 30, step=1, label="Inference Steps")
|
109 |
+
cfg_slider = gr.Slider(0.0, 15.0, 7.5, step=0.5, label="Guidance Scale (CFG)")
|
110 |
+
with gr.Row():
|
111 |
+
width_slider = gr.Slider(256, 1024, 768, step=64, label="Width")
|
112 |
+
height_slider = gr.Slider(256, 1024, 768, step=64, label="Height")
|
113 |
+
num_frames_slider = gr.Slider(12, 48, 24, step=4, label="Video Frames", visible=False)
|
114 |
+
seed_input = gr.Number(-1, label="Seed (-1 for random)")
|
115 |
+
|
116 |
+
generate_button = gr.Button("Generate", variant="primary")
|
117 |
+
|
118 |
+
with gr.Column(scale=3):
|
119 |
+
output_image = gr.Image(label="Image Result", interactive=False, height="60vh", visible=True)
|
120 |
+
output_video = gr.Video(label="Video Result", interactive=False, height="60vh", visible=False)
|
121 |
+
status_textbox = gr.Textbox(label="Status", interactive=False)
|
122 |
+
|
123 |
+
# --- UI Logic ---
|
124 |
+
def update_ui_on_model_change(model_key):
|
125 |
+
is_video = "Video" in model_key
|
126 |
+
is_turbo = "Turbo" in model_key
|
127 |
+
return {
|
128 |
+
steps_slider: gr.update(interactive=not is_turbo, value=1 if is_turbo else 30),
|
129 |
+
cfg_slider: gr.update(interactive=not is_turbo, value=0.0 if is_turbo else 7.5),
|
130 |
+
width_slider: gr.update(visible=not is_video),
|
131 |
+
height_slider: gr.update(visible=not is_video),
|
132 |
+
num_frames_slider: gr.update(visible=is_video),
|
133 |
+
output_image: gr.update(visible=not is_video),
|
134 |
+
output_video: gr.update(visible=is_video)
|
135 |
+
}
|
136 |
+
model_selector.change(update_ui_on_model_change, model_selector, [steps_slider, cfg_slider, width_slider, height_slider, num_frames_slider, output_image, output_video])
|
137 |
+
|
138 |
+
# --- Button Logic ---
|
139 |
+
# This chain first sets the seed, then calls the main generation function.
|
140 |
+
click_event = generate_button.click(
|
141 |
+
fn=lambda s: (s if s != -1 else random.randint(0, 2**32 - 1)),
|
142 |
+
inputs=seed_input,
|
143 |
+
outputs=seed_state,
|
144 |
+
queue=False
|
145 |
+
).then(
|
146 |
+
fn=generate_media,
|
147 |
+
inputs=[model_selector, prompt_input, negative_prompt_input, steps_slider, cfg_slider, width_slider, height_slider, seed_state, num_frames_slider],
|
148 |
+
outputs=[output_image, output_video, status_textbox]
|
149 |
+
)
|
150 |
+
|
151 |
+
# This is the correct way to launch on Hugging Face Spaces
|
152 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
--find-links https://download.pytorch.org/whl/cu121
|
2 |
+
torch==2.3.1+cu121
|
3 |
+
torchvision==0.18.1+cu121
|
4 |
+
bitsandbytes==0.43.1
|
5 |
+
transformers==4.41.2
|
6 |
+
accelerate==0.31.0
|
7 |
+
diffusers==0.29.0
|
8 |
+
gradio==4.36.1
|
9 |
+
imageio[ffmpeg]
|