Skip to content

Commit

Permalink
[core] improve VAE encode/decode framewise batching (#9684)
Browse files Browse the repository at this point in the history
* update

* apply suggestions from review

---------

Co-authored-by: Sayak Paul <[email protected]>
  • Loading branch information
a-r-r-o-w and sayakpaul committed Dec 23, 2024
1 parent 5f88292 commit 6796ef0
Showing 1 changed file with 5 additions and 3 deletions.
8 changes: 5 additions & 3 deletions src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py
Original file line number Diff line number Diff line change
Expand Up @@ -1182,7 +1182,8 @@ def _encode(self, x: torch.Tensor) -> torch.Tensor:

frame_batch_size = self.num_sample_frames_batch_size
# Note: We expect the number of frames to be either `1` or `frame_batch_size * k` or `frame_batch_size * k + 1` for some k.
num_batches = num_frames // frame_batch_size if num_frames > 1 else 1
# As the extra single frame is handled inside the loop, it is not required to round up here.
num_batches = max(num_frames // frame_batch_size, 1)
conv_cache = None
enc = []

Expand Down Expand Up @@ -1330,7 +1331,8 @@ def tiled_encode(self, x: torch.Tensor) -> torch.Tensor:
row = []
for j in range(0, width, overlap_width):
# Note: We expect the number of frames to be either `1` or `frame_batch_size * k` or `frame_batch_size * k + 1` for some k.
num_batches = num_frames // frame_batch_size if num_frames > 1 else 1
# As the extra single frame is handled inside the loop, it is not required to round up here.
num_batches = max(num_frames // frame_batch_size, 1)
conv_cache = None
time = []

Expand Down Expand Up @@ -1409,7 +1411,7 @@ def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[Decod
for i in range(0, height, overlap_height):
row = []
for j in range(0, width, overlap_width):
num_batches = num_frames // frame_batch_size
num_batches = max(num_frames // frame_batch_size, 1)
conv_cache = None
time = []

Expand Down

0 comments on commit 6796ef0

Please sign in to comment.