rahul7star commited on
Commit
938a714
·
verified ·
1 Parent(s): b21ecc9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -70
app.py CHANGED
@@ -1,11 +1,7 @@
1
  import os
2
  import sys
3
-
4
  sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
5
 
6
- #import subprocess
7
- #subprocess.run('pip install flash-attn==2.7.4.post1 --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
8
-
9
  # wan2.2-main/gradio_ti2v.py
10
  import gradio as gr
11
  import torch
@@ -26,26 +22,31 @@ import gc
26
  print("Starting Gradio App for Wan 2.2 TI2V-5B...")
27
 
28
  # Download model snapshots from Hugging Face Hub
29
- repo_id = "Wan-AI/Wan2.2-TI2V-5B-Diffusers"
30
  print(f"Downloading/loading checkpoints for {repo_id}...")
31
  ckpt_dir = snapshot_download(repo_id, local_dir_use_symlinks=False)
32
  print(f"Using checkpoints from {ckpt_dir}")
33
 
34
  # Load the model configuration
35
- TASK_NAME = 'ti2v-5B-Diff'
36
  cfg = WAN_CONFIGS[TASK_NAME]
37
  FIXED_FPS = 24
38
  MIN_FRAMES_MODEL = 8
39
  MAX_FRAMES_MODEL = 121
40
 
 
 
 
 
 
 
 
 
 
41
  # Instantiate the pipeline in the global scope
42
  print("Initializing WanTI2V pipeline...")
43
  device = "cuda" if torch.cuda.is_available() else "cpu"
44
  device_id = 0 if torch.cuda.is_available() else -1
45
-
46
-
47
-
48
-
49
  pipeline = wan.WanTI2V(
50
  config=cfg,
51
  checkpoint_dir=ckpt_dir,
@@ -60,44 +61,52 @@ pipeline = wan.WanTI2V(
60
  )
61
  print("Pipeline initialized and ready.")
62
 
63
- # --- Helper Functions ---
64
- def select_best_size_for_image(image, available_sizes):
65
- """Select the size option with aspect ratio closest to the input image."""
66
- if image is None:
67
- return available_sizes[0] # Return first option if no image
68
-
69
- img_width, img_height = image.size
70
- img_aspect_ratio = img_height / img_width
71
-
72
- best_size = available_sizes[0]
73
- best_diff = float('inf')
74
-
75
- for size_str in available_sizes:
76
- # Parse size string like "704*1280"
77
- height, width = map(int, size_str.split('*'))
78
- size_aspect_ratio = height / width
79
- diff = abs(img_aspect_ratio - size_aspect_ratio)
80
-
81
- if diff < best_diff:
82
- best_diff = diff
83
- best_size = size_str
84
 
85
- return best_size
 
86
 
87
- def handle_image_upload(image):
88
- """Handle image upload and return the best matching size."""
89
- if image is None:
90
- return gr.update()
91
 
92
- pil_image = Image.fromarray(image).convert("RGB")
93
- available_sizes = list(SUPPORTED_SIZES[TASK_NAME])
94
- best_size = select_best_size_for_image(pil_image, available_sizes)
95
 
96
- return gr.update(value=best_size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
  def get_duration(image,
99
  prompt,
100
- size,
 
101
  duration_seconds,
102
  sampling_steps,
103
  guide_scale,
@@ -105,8 +114,10 @@ def get_duration(image,
105
  seed,
106
  progress):
107
  """Calculate dynamic GPU duration based on parameters."""
108
- if sampling_steps > 35 and duration_seconds >= 2:
109
- return 120
 
 
110
  elif sampling_steps < 35 or duration_seconds < 2:
111
  return 105
112
  else:
@@ -117,33 +128,40 @@ def get_duration(image,
117
  def generate_video(
118
  image,
119
  prompt,
120
- size,
 
121
  duration_seconds,
122
- sampling_steps,
123
- guide_scale,
124
- shift,
125
- seed,
126
  progress=gr.Progress(track_tqdm=True)
127
  ):
128
  """The main function to generate video, called by the Gradio interface."""
129
  if seed == -1:
130
  seed = random.randint(0, sys.maxsize)
131
 
 
 
 
 
132
  input_image = None
133
  if image is not None:
134
  input_image = Image.fromarray(image).convert("RGB")
135
- # Resize image to match selected size
136
- target_height, target_width = map(int, size.split('*'))
137
- input_image = input_image.resize((target_width, target_height))
138
 
139
  # Calculate number of frames based on duration
140
  num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
141
 
 
 
 
142
  video_tensor = pipeline.generate(
143
  input_prompt=prompt,
144
  img=input_image, # Pass None for T2V, Image for I2V
145
- size=SIZE_CONFIGS[size],
146
- max_area=MAX_AREA_CONFIGS[size],
147
  frame_num=num_frames, # Use calculated frames instead of cfg.frame_num
148
  shift=shift,
149
  sample_solver='unipc',
@@ -175,7 +193,7 @@ with gr.Blocks(css=css, theme=gr.themes.Soft(), delete_cache=(60, 900)) as demo:
175
 
176
  with gr.Row():
177
  with gr.Column(scale=2):
178
- image_input = gr.Image(type="numpy", label="Input Image (Optional)", elem_id="input_image")
179
  prompt_input = gr.Textbox(label="Prompt", value="A beautiful waterfall in a lush jungle, cinematic.", lines=3)
180
  duration_input = gr.Slider(
181
  minimum=round(MIN_FRAMES_MODEL/FIXED_FPS, 1),
@@ -185,48 +203,49 @@ with gr.Blocks(css=css, theme=gr.themes.Soft(), delete_cache=(60, 900)) as demo:
185
  label="Duration (seconds)",
186
  info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps."
187
  )
188
- size_input = gr.Dropdown(label="Output Resolution", choices=list(SUPPORTED_SIZES[TASK_NAME]), value="704*1280")
189
- with gr.Column(scale=2):
190
- video_output = gr.Video(label="Generated Video", elem_id="output_video")
191
 
192
-
193
  with gr.Accordion("Advanced Settings", open=False):
 
 
 
194
  steps_input = gr.Slider(label="Sampling Steps", minimum=10, maximum=50, value=38, step=1)
195
  scale_input = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, value=cfg.sample_guide_scale, step=0.1)
196
  shift_input = gr.Slider(label="Sample Shift", minimum=1.0, maximum=20.0, value=cfg.sample_shift, step=0.1)
197
  seed_input = gr.Number(label="Seed (-1 for random)", value=-1, precision=0)
198
 
 
 
199
  run_button = gr.Button("Generate Video", variant="primary")
200
 
201
  # Add image upload handler
202
  image_input.upload(
203
- fn=handle_image_upload,
204
- inputs=[image_input],
205
- outputs=[size_input]
206
  )
207
 
208
  image_input.clear(
209
- fn=handle_image_upload,
210
- inputs=[image_input],
211
- outputs=[size_input]
212
  )
213
 
214
  example_image_path = os.path.join(os.path.dirname(__file__), "examples/i2v_input.JPG")
215
  gr.Examples(
216
  examples=[
217
- [example_image_path, "The cat removes the glasses from its eyes.", "1280*704", 1.5],
218
- [None, "A cinematic shot of a boat sailing on a calm sea at sunset.", "1280*704", 2.0],
219
- [None, "Drone footage flying over a futuristic city with flying cars.", "1280*704", 2.0],
220
  ],
221
- inputs=[image_input, prompt_input, size_input, duration_input],
222
  outputs=video_output,
223
  fn=generate_video,
224
- cache_examples=False,
225
  )
226
 
227
  run_button.click(
228
  fn=generate_video,
229
- inputs=[image_input, prompt_input, size_input, duration_input, steps_input, scale_input, shift_input, seed_input],
230
  outputs=video_output
231
  )
232
 
 
1
  import os
2
  import sys
 
3
  sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
4
 
 
 
 
5
  # wan2.2-main/gradio_ti2v.py
6
  import gradio as gr
7
  import torch
 
22
  print("Starting Gradio App for Wan 2.2 TI2V-5B...")
23
 
24
  # Download model snapshots from Hugging Face Hub
25
+ repo_id = "Wan-AI/Wan2.2-TI2V-5B"
26
  print(f"Downloading/loading checkpoints for {repo_id}...")
27
  ckpt_dir = snapshot_download(repo_id, local_dir_use_symlinks=False)
28
  print(f"Using checkpoints from {ckpt_dir}")
29
 
30
  # Load the model configuration
31
+ TASK_NAME = 'ti2v-5B'
32
  cfg = WAN_CONFIGS[TASK_NAME]
33
  FIXED_FPS = 24
34
  MIN_FRAMES_MODEL = 8
35
  MAX_FRAMES_MODEL = 121
36
 
37
+ # Dimension calculation constants
38
+ MOD_VALUE = 32
39
+ DEFAULT_H_SLIDER_VALUE = 704
40
+ DEFAULT_W_SLIDER_VALUE = 1280
41
+ NEW_FORMULA_MAX_AREA = 1280.0 * 704.0
42
+
43
+ SLIDER_MIN_H, SLIDER_MAX_H = 128, 1280
44
+ SLIDER_MIN_W, SLIDER_MAX_W = 128, 1280
45
+
46
  # Instantiate the pipeline in the global scope
47
  print("Initializing WanTI2V pipeline...")
48
  device = "cuda" if torch.cuda.is_available() else "cpu"
49
  device_id = 0 if torch.cuda.is_available() else -1
 
 
 
 
50
  pipeline = wan.WanTI2V(
51
  config=cfg,
52
  checkpoint_dir=ckpt_dir,
 
61
  )
62
  print("Pipeline initialized and ready.")
63
 
64
+ # --- Helper Functions (from Wan 2.1 Fast demo) ---
65
+ def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area,
66
+ min_slider_h, max_slider_h,
67
+ min_slider_w, max_slider_w,
68
+ default_h, default_w):
69
+ orig_w, orig_h = pil_image.size
70
+ if orig_w <= 0 or orig_h <= 0:
71
+ return default_h, default_w
72
+
73
+ aspect_ratio = orig_h / orig_w
 
 
 
 
 
 
 
 
 
 
 
74
 
75
+ calc_h = round(np.sqrt(calculation_max_area * aspect_ratio))
76
+ calc_w = round(np.sqrt(calculation_max_area / aspect_ratio))
77
 
78
+ calc_h = max(mod_val, (calc_h // mod_val) * mod_val)
79
+ calc_w = max(mod_val, (calc_w // mod_val) * mod_val)
 
 
80
 
81
+ new_h = int(np.clip(calc_h, min_slider_h, (max_slider_h // mod_val) * mod_val))
82
+ new_w = int(np.clip(calc_w, min_slider_w, (max_slider_w // mod_val) * mod_val))
 
83
 
84
+ return new_h, new_w
85
+
86
+ def handle_image_upload_for_dims_wan(uploaded_pil_image, current_h_val, current_w_val):
87
+ if uploaded_pil_image is None:
88
+ return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
89
+ try:
90
+ # Convert numpy array to PIL Image if needed
91
+ if hasattr(uploaded_pil_image, 'shape'): # numpy array
92
+ pil_image = Image.fromarray(uploaded_pil_image).convert("RGB")
93
+ else: # already PIL Image
94
+ pil_image = uploaded_pil_image
95
+
96
+ new_h, new_w = _calculate_new_dimensions_wan(
97
+ pil_image, MOD_VALUE, NEW_FORMULA_MAX_AREA,
98
+ SLIDER_MIN_H, SLIDER_MAX_H, SLIDER_MIN_W, SLIDER_MAX_W,
99
+ DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE
100
+ )
101
+ return gr.update(value=new_h), gr.update(value=new_w)
102
+ except Exception as e:
103
+ gr.Warning("Error attempting to calculate new dimensions")
104
+ return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
105
 
106
  def get_duration(image,
107
  prompt,
108
+ height,
109
+ width,
110
  duration_seconds,
111
  sampling_steps,
112
  guide_scale,
 
114
  seed,
115
  progress):
116
  """Calculate dynamic GPU duration based on parameters."""
117
+ if duration_seconds >= 3:
118
+ return 220
119
+ elif sampling_steps > 35 and duration_seconds >= 2:
120
+ return 180
121
  elif sampling_steps < 35 or duration_seconds < 2:
122
  return 105
123
  else:
 
128
  def generate_video(
129
  image,
130
  prompt,
131
+ height,
132
+ width,
133
  duration_seconds,
134
+ sampling_steps=38,
135
+ guide_scale=cfg.sample_guide_scale,
136
+ shift=cfg.sample_shift,
137
+ seed=42,
138
  progress=gr.Progress(track_tqdm=True)
139
  ):
140
  """The main function to generate video, called by the Gradio interface."""
141
  if seed == -1:
142
  seed = random.randint(0, sys.maxsize)
143
 
144
+ # Ensure dimensions are multiples of MOD_VALUE
145
+ target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
146
+ target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
147
+
148
  input_image = None
149
  if image is not None:
150
  input_image = Image.fromarray(image).convert("RGB")
151
+ # Resize image to match target dimensions
152
+ input_image = input_image.resize((target_w, target_h))
 
153
 
154
  # Calculate number of frames based on duration
155
  num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
156
 
157
+ # Create size string for the pipeline
158
+ size_str = f"{target_h}*{target_w}"
159
+
160
  video_tensor = pipeline.generate(
161
  input_prompt=prompt,
162
  img=input_image, # Pass None for T2V, Image for I2V
163
+ size=SIZE_CONFIGS.get(size_str, (target_h, target_w)),
164
+ max_area=MAX_AREA_CONFIGS.get(size_str, target_h * target_w),
165
  frame_num=num_frames, # Use calculated frames instead of cfg.frame_num
166
  shift=shift,
167
  sample_solver='unipc',
 
193
 
194
  with gr.Row():
195
  with gr.Column(scale=2):
196
+ image_input = gr.Image(type="numpy", label="Optional (blank = text-to-image)", elem_id="input_image")
197
  prompt_input = gr.Textbox(label="Prompt", value="A beautiful waterfall in a lush jungle, cinematic.", lines=3)
198
  duration_input = gr.Slider(
199
  minimum=round(MIN_FRAMES_MODEL/FIXED_FPS, 1),
 
203
  label="Duration (seconds)",
204
  info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps."
205
  )
 
 
 
206
 
 
207
  with gr.Accordion("Advanced Settings", open=False):
208
+ with gr.Row():
209
+ height_input = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"Output Height (multiple of {MOD_VALUE})")
210
+ width_input = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"Output Width (multiple of {MOD_VALUE})")
211
  steps_input = gr.Slider(label="Sampling Steps", minimum=10, maximum=50, value=38, step=1)
212
  scale_input = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, value=cfg.sample_guide_scale, step=0.1)
213
  shift_input = gr.Slider(label="Sample Shift", minimum=1.0, maximum=20.0, value=cfg.sample_shift, step=0.1)
214
  seed_input = gr.Number(label="Seed (-1 for random)", value=-1, precision=0)
215
 
216
+ with gr.Column(scale=2):
217
+ video_output = gr.Video(label="Generated Video", elem_id="output_video")
218
  run_button = gr.Button("Generate Video", variant="primary")
219
 
220
  # Add image upload handler
221
  image_input.upload(
222
+ fn=handle_image_upload_for_dims_wan,
223
+ inputs=[image_input, height_input, width_input],
224
+ outputs=[height_input, width_input]
225
  )
226
 
227
  image_input.clear(
228
+ fn=handle_image_upload_for_dims_wan,
229
+ inputs=[image_input, height_input, width_input],
230
+ outputs=[height_input, width_input]
231
  )
232
 
233
  example_image_path = os.path.join(os.path.dirname(__file__), "examples/i2v_input.JPG")
234
  gr.Examples(
235
  examples=[
236
+ [example_image_path, "The cat removes the glasses from its eyes.", 1088, 800, 1.5],
237
+ [None, "A cinematic shot of a boat sailing on a calm sea at sunset.", 704, 1280, 2.0],
238
+ [None, "Drone footage flying over a futuristic city with flying cars.", 704, 1280, 2.0],
239
  ],
240
+ inputs=[image_input, prompt_input, height_input, width_input, duration_input],
241
  outputs=video_output,
242
  fn=generate_video,
243
+ cache_examples="lazy",
244
  )
245
 
246
  run_button.click(
247
  fn=generate_video,
248
+ inputs=[image_input, prompt_input, height_input, width_input, duration_input, steps_input, scale_input, shift_input, seed_input],
249
  outputs=video_output
250
  )
251