Skip to content

Commit

Permalink
Merge pull request #153 from seruva19/dev
Browse files Browse the repository at this point in the history
fix #143
  • Loading branch information
seruva19 authored Oct 24, 2023
2 parents 03fabf4 + 9586288 commit 2729850
Showing 1 changed file with 8 additions and 2 deletions.
10 changes: 8 additions & 2 deletions src/models/model_diffusers22/model_22_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,7 @@ def prepare_weights_for_task(model, task):
attention_op=MemoryEfficientAttentionOp
)
applied_optimizations.append("xformers for prior")
model.pipe_info["xformers_prior"] = True
except:
k_log("failed to apply xformers for prior")

Expand All @@ -235,13 +236,16 @@ def prepare_weights_for_task(model, task):
attention_op=MemoryEfficientAttentionOp
)
applied_optimizations.append("xformers for decoder")
model.pipe_info["xformers_decoder"] = True
except:
k_log("failed to apply xformers for decoder")
else:
k_log("xformers use requested, but no xformers installed")
else:
current_prior.disable_xformers_memory_efficient_attention()
current_decoder.disable_xformers_memory_efficient_attention()
if model.pipe_info["xformers_prior"]:
current_prior.disable_xformers_memory_efficient_attention()
if model.pipe_info["xformers_decoder"]:
current_decoder.disable_xformers_memory_efficient_attention()

if enable_sdp_attention:
current_decoder.unet.set_attn_processor(AttnAddedKVProcessor2_0())
Expand Down Expand Up @@ -323,6 +327,8 @@ def clear_pipe_info(model):
"sequential_decoder_offload": False,
"full_prior_offload": False,
"full_decoder_offload": False,
"xformers_prior": False,
"xformers_decoder": False,
"cnet_depth_estimator": None,
"cnet_dmap_type": None,
}
Expand Down

0 comments on commit 2729850

Please sign in to comment.