Skip to content

Commit

Permalink
Disable pytorch attention in VAE for AMD.
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Feb 14, 2025
1 parent d7b4bf2 commit 1cd6cd6
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 1 deletion.
2 changes: 1 addition & 1 deletion comfy/ldm/modules/diffusionmodules/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ def vae_attention():
if model_management.xformers_enabled_vae():
logging.info("Using xformers attention in VAE")
return xformers_attention
elif model_management.pytorch_attention_enabled():
elif model_management.pytorch_attention_enabled_vae():
logging.info("Using pytorch attention in VAE")
return pytorch_attention
else:
Expand Down
5 changes: 5 additions & 0 deletions comfy/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -912,6 +912,11 @@ def pytorch_attention_enabled():
global ENABLE_PYTORCH_ATTENTION
return ENABLE_PYTORCH_ATTENTION

def pytorch_attention_enabled_vae():
if is_amd():
return False # enabling pytorch attention on AMD currently causes crash when doing high res
return pytorch_attention_enabled()

def pytorch_attention_flash_attention():
global ENABLE_PYTORCH_ATTENTION
if ENABLE_PYTORCH_ATTENTION:
Expand Down

0 comments on commit 1cd6cd6

Please sign in to comment.