Skip to content

Commit

Permalink
[LTX-Video] fix attribute adjustment for ltx. (#10426)
Browse files Browse the repository at this point in the history
fix attribute adjustment for ltx.
  • Loading branch information
sayakpaul authored Jan 2, 2025
1 parent 68bd693 commit 3cb6686
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 10 deletions.
16 changes: 11 additions & 5 deletions src/diffusers/pipelines/ltx/pipeline_ltx.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,16 +186,22 @@ def __init__(
scheduler=scheduler,
)

self.vae_spatial_compression_ratio = self.vae.spatial_compression_ratio if hasattr(self, "vae") else 32
self.vae_temporal_compression_ratio = self.vae.temporal_compression_ratio if hasattr(self, "vae") else 8
self.transformer_spatial_patch_size = self.transformer.config.patch_size if hasattr(self, "transformer") else 1
self.vae_spatial_compression_ratio = (
self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32
)
self.vae_temporal_compression_ratio = (
self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8
)
self.transformer_spatial_patch_size = (
self.transformer.config.patch_size if getattr(self, "transformer", None) is not None else 1
)
self.transformer_temporal_patch_size = (
self.transformer.config.patch_size_t if hasattr(self, "transformer") else 1
self.transformer.config.patch_size_t if getattr(self, "transformer") is not None else 1
)

self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio)
self.tokenizer_max_length = (
self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 128
self.tokenizer.model_max_length if getattr(self, "tokenizer", None) is not None else 128
)

def _get_t5_prompt_embeds(
Expand Down
16 changes: 11 additions & 5 deletions src/diffusers/pipelines/ltx/pipeline_ltx_image2video.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,16 +205,22 @@ def __init__(
scheduler=scheduler,
)

self.vae_spatial_compression_ratio = self.vae.spatial_compression_ratio if hasattr(self, "vae") else 32
self.vae_temporal_compression_ratio = self.vae.temporal_compression_ratio if hasattr(self, "vae") else 8
self.transformer_spatial_patch_size = self.transformer.config.patch_size if hasattr(self, "transformer") else 1
self.vae_spatial_compression_ratio = (
self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32
)
self.vae_temporal_compression_ratio = (
self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8
)
self.transformer_spatial_patch_size = (
self.transformer.config.patch_size if getattr(self, "transformer", None) is not None else 1
)
self.transformer_temporal_patch_size = (
self.transformer.config.patch_size_t if hasattr(self, "transformer") else 1
self.transformer.config.patch_size_t if getattr(self, "transformer") is not None else 1
)

self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio)
self.tokenizer_max_length = (
self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 128
self.tokenizer.model_max_length if getattr(self, "tokenizer", None) is not None else 128
)

self.default_height = 512
Expand Down

0 comments on commit 3cb6686

Please sign in to comment.