Skip to content

Commit

Permalink
Make lumina model work with any latent resolution.
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Feb 10, 2025
1 parent 095d867 commit 4027466
Showing 1 changed file with 4 additions and 1 deletion.
5 changes: 4 additions & 1 deletion comfy/ldm/lumina/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
import comfy.ldm.common_dit

from comfy.ldm.modules.diffusionmodules.mmdit import TimestepEmbedder, RMSNorm
from comfy.ldm.modules.attention import optimized_attention_masked
Expand Down Expand Up @@ -594,6 +595,8 @@ def forward(self, x, timesteps, context, num_tokens, attention_mask=None, **kwar
t = 1.0 - timesteps
cap_feats = context
cap_mask = attention_mask
bs, c, h, w = x.shape
x = comfy.ldm.common_dit.pad_to_patch_size(x, (self.patch_size, self.patch_size))
"""
Forward pass of NextDiT.
t: (N,) tensor of diffusion timesteps
Expand All @@ -613,7 +616,7 @@ def forward(self, x, timesteps, context, num_tokens, attention_mask=None, **kwar
x = layer(x, mask, freqs_cis, adaln_input)

x = self.final_layer(x, adaln_input)
x = self.unpatchify(x, img_size, cap_size, return_tensor=x_is_tensor)
x = self.unpatchify(x, img_size, cap_size, return_tensor=x_is_tensor)[:,:,:h,:w]

return -x

0 comments on commit 4027466

Please sign in to comment.