jbilcke-hf HF Staff commited on
Commit
9d13d0d
Β·
1 Parent(s): ab4cfa8

remove some logs

Browse files
Files changed (2) hide show
  1. app.py +5 -5
  2. demo_utils/vae_block3.py +1 -1
app.py CHANGED
@@ -322,7 +322,7 @@ def video_generation_handler(prompt, seed=42, fps=15, width=DEFAULT_WIDTH, heigh
322
  seed = random.randint(0, 2**32 - 1)
323
 
324
 
325
- print(f"🎬 video_generation_handler called, seed: {seed}, duration: {duration}s, fps: {fps}, width: {width}, height: {height}")
326
 
327
  # Setup
328
  conditional_dict = text_encoder(text_prompts=[prompt])
@@ -361,7 +361,7 @@ def video_generation_handler(prompt, seed=42, fps=15, width=DEFAULT_WIDTH, heigh
361
 
362
  # Generation loop
363
  for idx, current_num_frames in enumerate(all_num_frames):
364
- print(f"πŸ“¦ Processing block {idx+1}/{num_blocks}")
365
 
366
  noisy_input = noise[:, current_start_frame : current_start_frame + current_num_frames]
367
 
@@ -408,7 +408,7 @@ def video_generation_handler(prompt, seed=42, fps=15, width=DEFAULT_WIDTH, heigh
408
  elif APP_STATE["current_use_taehv"] and idx > 0:
409
  pixels = pixels[:, 12:]
410
 
411
- print(f"πŸ” DEBUG Block {idx}: Pixels shape after skipping: {pixels.shape}")
412
 
413
  # Process all frames from this block and add to main collection
414
  for frame_idx in range(pixels.shape[1]):
@@ -422,13 +422,13 @@ def video_generation_handler(prompt, seed=42, fps=15, width=DEFAULT_WIDTH, heigh
422
  all_frames.append(frame_np)
423
  total_frames_generated += 1
424
 
425
- print(f"πŸ“¦ Block {idx+1}/{num_blocks}, Frame {frame_idx+1}/{pixels.shape[1]} - Total frames: {total_frames_generated}")
426
 
427
  current_start_frame += current_num_frames
428
 
429
  # Generate final MP4 as base64 data URI
430
  if all_frames:
431
- print(f"πŸ“Ή Encoding final MP4 with {len(all_frames)} frames")
432
 
433
  try:
434
  base64_data_uri = frames_to_mp4_base64(all_frames, fps)
 
322
  seed = random.randint(0, 2**32 - 1)
323
 
324
 
325
+ #print(f"🎬 video_generation_handler called, seed: {seed}, duration: {duration}s, fps: {fps}, width: {width}, height: {height}")
326
 
327
  # Setup
328
  conditional_dict = text_encoder(text_prompts=[prompt])
 
361
 
362
  # Generation loop
363
  for idx, current_num_frames in enumerate(all_num_frames):
364
+ #print(f"πŸ“¦ Processing block {idx+1}/{num_blocks}")
365
 
366
  noisy_input = noise[:, current_start_frame : current_start_frame + current_num_frames]
367
 
 
408
  elif APP_STATE["current_use_taehv"] and idx > 0:
409
  pixels = pixels[:, 12:]
410
 
411
+ #print(f"πŸ” DEBUG Block {idx}: Pixels shape after skipping: {pixels.shape}")
412
 
413
  # Process all frames from this block and add to main collection
414
  for frame_idx in range(pixels.shape[1]):
 
422
  all_frames.append(frame_np)
423
  total_frames_generated += 1
424
 
425
+ #print(f"πŸ“¦ Block {idx+1}/{num_blocks}, Frame {frame_idx+1}/{pixels.shape[1]} - Total frames: {total_frames_generated}")
426
 
427
  current_start_frame += current_num_frames
428
 
429
  # Generate final MP4 as base64 data URI
430
  if all_frames:
431
+ #print(f"πŸ“Ή Encoding final MP4 with {len(all_frames)} frames")
432
 
433
  try:
434
  base64_data_uri = frames_to_mp4_base64(all_frames, fps)
demo_utils/vae_block3.py CHANGED
@@ -153,7 +153,7 @@ class VAEDecoderWrapper(nn.Module):
153
  # to [batch_size, num_channels, num_frames, height, width]
154
  z = z.permute(0, 2, 1, 3, 4)
155
  feat_cache = list(feat_cache)
156
- print("Length of feat_cache: ", len(feat_cache))
157
 
158
  device, dtype = z.device, z.dtype
159
  scale = [self.mean.to(device=device, dtype=dtype),
 
153
  # to [batch_size, num_channels, num_frames, height, width]
154
  z = z.permute(0, 2, 1, 3, 4)
155
  feat_cache = list(feat_cache)
156
+ #print("Length of feat_cache: ", len(feat_cache))
157
 
158
  device, dtype = z.device, z.dtype
159
  scale = [self.mean.to(device=device, dtype=dtype),